Actual source code: dainterp.c
2: /*
3: Code for interpolating between grids represented by DMDAs
4: */
6: /*
7: For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does
8: not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results
9: we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some
10: consider it cleaner, but old version is turned on since it handles periodic case.
11: */
12: #define NEWVERSION 0
14: #include <petsc/private/dmdaimpl.h>
16: /*
17: Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ.
18: This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the
19: matrix type for both the operator matrices and the interpolation matrices so that users
20: can select matrix types of base MATAIJ for accelerators
21: */
22: static PetscErrorCode ConvertToAIJ(MatType intype,MatType *outtype)
23: {
24: PetscInt i;
25: char const *types[3] = {MATAIJ,MATSEQAIJ,MATMPIAIJ};
26: PetscBool flg;
28: *outtype = MATAIJ;
29: for (i=0; i<3; i++) {
30: PetscStrbeginswith(intype,types[i],&flg);
31: if (flg) {
32: *outtype = intype;
33: return 0;
34: }
35: }
36: return 0;
37: }
39: PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac,DM daf,Mat *A)
40: {
41: PetscInt i,i_start,m_f,Mx;
42: const PetscInt *idx_f,*idx_c;
43: PetscInt m_ghost,m_ghost_c;
44: PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio;
45: PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof;
46: PetscScalar v[2],x;
47: Mat mat;
48: DMBoundaryType bx;
49: ISLocalToGlobalMapping ltog_f,ltog_c;
50: MatType mattype;
52: DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL);
53: DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
54: if (bx == DM_BOUNDARY_PERIODIC) {
55: ratio = mx/Mx;
57: } else {
58: ratio = (mx-1)/(Mx-1);
60: }
62: DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL);
63: DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL);
64: DMGetLocalToGlobalMapping(daf,<og_f);
65: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
67: DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL);
68: DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL);
69: DMGetLocalToGlobalMapping(dac,<og_c);
70: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
72: /* create interpolation matrix */
73: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
74: #if defined(PETSC_HAVE_CUDA)
75: /*
76: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
77: we don't want the original unconverted matrix copied to the GPU
78: */
79: if (dof > 1) {
80: MatBindToCPU(mat,PETSC_TRUE);
81: }
82: #endif
83: MatSetSizes(mat,m_f,m_c,mx,Mx);
84: ConvertToAIJ(dac->mattype,&mattype);
85: MatSetType(mat,mattype);
86: MatSeqAIJSetPreallocation(mat,2,NULL);
87: MatMPIAIJSetPreallocation(mat,2,NULL,1,NULL);
89: /* loop over local fine grid nodes setting interpolation for those*/
90: if (!NEWVERSION) {
92: for (i=i_start; i<i_start+m_f; i++) {
93: /* convert to local "natural" numbering and then to PETSc global numbering */
94: row = idx_f[i-i_start_ghost];
96: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
98: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
100: /*
101: Only include those interpolation points that are truly
102: nonzero. Note this is very important for final grid lines
103: in x direction; since they have no right neighbor
104: */
105: x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio);
106: nc = 0;
107: /* one left and below; or we are right on it */
108: col = (i_c-i_start_ghost_c);
109: cols[nc] = idx_c[col];
110: v[nc++] = -x + 1.0;
111: /* one right? */
112: if (i_c*ratio != i) {
113: cols[nc] = idx_c[col+1];
114: v[nc++] = x;
115: }
116: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
117: }
119: } else {
120: PetscScalar *xi;
121: PetscInt li,nxi,n;
122: PetscScalar Ni[2];
124: /* compute local coordinate arrays */
125: nxi = ratio + 1;
126: PetscMalloc1(nxi,&xi);
127: for (li=0; li<nxi; li++) {
128: xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
129: }
131: for (i=i_start; i<i_start+m_f; i++) {
132: /* convert to local "natural" numbering and then to PETSc global numbering */
133: row = idx_f[(i-i_start_ghost)];
135: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
137: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
139: /* remainders */
140: li = i - ratio * (i/ratio);
141: if (i==mx-1) li = nxi-1;
143: /* corners */
144: col = (i_c-i_start_ghost_c);
145: cols[0] = idx_c[col];
146: Ni[0] = 1.0;
147: if ((li==0) || (li==nxi-1)) {
148: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
149: continue;
150: }
152: /* edges + interior */
153: /* remainders */
154: if (i==mx-1) i_c--;
156: col = (i_c-i_start_ghost_c);
157: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
158: cols[1] = idx_c[col+1];
160: Ni[0] = 0.5*(1.0-xi[li]);
161: Ni[1] = 0.5*(1.0+xi[li]);
162: for (n=0; n<2; n++) {
163: if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1;
164: }
165: MatSetValues(mat,1,&row,2,cols,Ni,INSERT_VALUES);
166: }
167: PetscFree(xi);
168: }
169: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
170: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
171: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
172: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
173: MatCreateMAIJ(mat,dof,A);
174: MatDestroy(&mat);
175: return 0;
176: }
178: PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac,DM daf,Mat *A)
179: {
180: PetscInt i,i_start,m_f,Mx;
181: const PetscInt *idx_f,*idx_c;
182: ISLocalToGlobalMapping ltog_f,ltog_c;
183: PetscInt m_ghost,m_ghost_c;
184: PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio;
185: PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof;
186: PetscScalar v[2],x;
187: Mat mat;
188: DMBoundaryType bx;
189: MatType mattype;
191: DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL);
192: DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
193: if (bx == DM_BOUNDARY_PERIODIC) {
195: ratio = mx/Mx;
197: } else {
199: ratio = (mx-1)/(Mx-1);
201: }
203: DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL);
204: DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL);
205: DMGetLocalToGlobalMapping(daf,<og_f);
206: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
208: DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL);
209: DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL);
210: DMGetLocalToGlobalMapping(dac,<og_c);
211: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
213: /* create interpolation matrix */
214: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
215: #if defined(PETSC_HAVE_CUDA)
216: /*
217: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
218: we don't want the original unconverted matrix copied to the GPU
219: */
220: if (dof > 1) {
221: MatBindToCPU(mat,PETSC_TRUE);
222: }
223: #endif
224: MatSetSizes(mat,m_f,m_c,mx,Mx);
225: ConvertToAIJ(dac->mattype,&mattype);
226: MatSetType(mat,mattype);
227: MatSeqAIJSetPreallocation(mat,2,NULL);
228: MatMPIAIJSetPreallocation(mat,2,NULL,0,NULL);
230: /* loop over local fine grid nodes setting interpolation for those*/
231: for (i=i_start; i<i_start+m_f; i++) {
232: /* convert to local "natural" numbering and then to PETSc global numbering */
233: row = idx_f[(i-i_start_ghost)];
235: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
237: /*
238: Only include those interpolation points that are truly
239: nonzero. Note this is very important for final grid lines
240: in x direction; since they have no right neighbor
241: */
242: x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio);
243: nc = 0;
244: /* one left and below; or we are right on it */
245: col = (i_c-i_start_ghost_c);
246: cols[nc] = idx_c[col];
247: v[nc++] = -x + 1.0;
248: /* one right? */
249: if (i_c*ratio != i) {
250: cols[nc] = idx_c[col+1];
251: v[nc++] = x;
252: }
253: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
254: }
255: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
256: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
257: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
258: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
259: MatCreateMAIJ(mat,dof,A);
260: MatDestroy(&mat);
261: PetscLogFlops(5.0*m_f);
262: return 0;
263: }
265: PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac,DM daf,Mat *A)
266: {
267: PetscErrorCode ierr;
268: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
269: const PetscInt *idx_c,*idx_f;
270: ISLocalToGlobalMapping ltog_f,ltog_c;
271: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz;
272: PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj;
273: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale;
274: PetscMPIInt size_c,size_f,rank_f;
275: PetscScalar v[4],x,y;
276: Mat mat;
277: DMBoundaryType bx,by;
278: MatType mattype;
280: DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL);
281: DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
282: if (bx == DM_BOUNDARY_PERIODIC) {
284: ratioi = mx/Mx;
286: } else {
288: ratioi = (mx-1)/(Mx-1);
290: }
291: if (by == DM_BOUNDARY_PERIODIC) {
293: ratioj = my/My;
295: } else {
297: ratioj = (my-1)/(My-1);
299: }
301: DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL);
302: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL);
303: DMGetLocalToGlobalMapping(daf,<og_f);
304: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
306: DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL);
307: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL);
308: DMGetLocalToGlobalMapping(dac,<og_c);
309: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
311: /*
312: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
313: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
314: processors). It's effective length is hence 4 times its normal length, this is
315: why the col_scale is multiplied by the interpolation matrix column sizes.
316: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
317: copy of the coarse vector. A bit of a hack but you do better.
319: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
320: */
321: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
322: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
323: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
324: col_scale = size_f/size_c;
325: col_shift = Mx*My*(rank_f/size_c);
327: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);
328: for (j=j_start; j<j_start+n_f; j++) {
329: for (i=i_start; i<i_start+m_f; i++) {
330: /* convert to local "natural" numbering and then to PETSc global numbering */
331: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
333: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
334: j_c = (j/ratioj); /* coarse grid node below fine grid node */
337: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
339: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
341: /*
342: Only include those interpolation points that are truly
343: nonzero. Note this is very important for final grid lines
344: in x and y directions; since they have no right/top neighbors
345: */
346: nc = 0;
347: /* one left and below; or we are right on it */
348: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
349: cols[nc++] = col_shift + idx_c[col];
350: /* one right and below */
351: if (i_c*ratioi != i) cols[nc++] = col_shift + idx_c[col+1];
352: /* one left and above */
353: if (j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+m_ghost_c];
354: /* one right and above */
355: if (i_c*ratioi != i && j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+(m_ghost_c+1)];
356: MatPreallocateSet(row,nc,cols,dnz,onz);
357: }
358: }
359: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
360: #if defined(PETSC_HAVE_CUDA)
361: /*
362: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
363: we don't want the original unconverted matrix copied to the GPU
364: */
365: if (dof > 1) {
366: MatBindToCPU(mat,PETSC_TRUE);
367: }
368: #endif
369: MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);
370: ConvertToAIJ(dac->mattype,&mattype);
371: MatSetType(mat,mattype);
372: MatSeqAIJSetPreallocation(mat,0,dnz);
373: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
374: MatPreallocateFinalize(dnz,onz);
376: /* loop over local fine grid nodes setting interpolation for those*/
377: if (!NEWVERSION) {
379: for (j=j_start; j<j_start+n_f; j++) {
380: for (i=i_start; i<i_start+m_f; i++) {
381: /* convert to local "natural" numbering and then to PETSc global numbering */
382: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
384: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
385: j_c = (j/ratioj); /* coarse grid node below fine grid node */
387: /*
388: Only include those interpolation points that are truly
389: nonzero. Note this is very important for final grid lines
390: in x and y directions; since they have no right/top neighbors
391: */
392: x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi);
393: y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj);
395: nc = 0;
396: /* one left and below; or we are right on it */
397: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
398: cols[nc] = col_shift + idx_c[col];
399: v[nc++] = x*y - x - y + 1.0;
400: /* one right and below */
401: if (i_c*ratioi != i) {
402: cols[nc] = col_shift + idx_c[col+1];
403: v[nc++] = -x*y + x;
404: }
405: /* one left and above */
406: if (j_c*ratioj != j) {
407: cols[nc] = col_shift + idx_c[col+m_ghost_c];
408: v[nc++] = -x*y + y;
409: }
410: /* one right and above */
411: if (j_c*ratioj != j && i_c*ratioi != i) {
412: cols[nc] = col_shift + idx_c[col+(m_ghost_c+1)];
413: v[nc++] = x*y;
414: }
415: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
416: }
417: }
419: } else {
420: PetscScalar Ni[4];
421: PetscScalar *xi,*eta;
422: PetscInt li,nxi,lj,neta;
424: /* compute local coordinate arrays */
425: nxi = ratioi + 1;
426: neta = ratioj + 1;
427: PetscMalloc1(nxi,&xi);
428: PetscMalloc1(neta,&eta);
429: for (li=0; li<nxi; li++) {
430: xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
431: }
432: for (lj=0; lj<neta; lj++) {
433: eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1));
434: }
436: /* loop over local fine grid nodes setting interpolation for those*/
437: for (j=j_start; j<j_start+n_f; j++) {
438: for (i=i_start; i<i_start+m_f; i++) {
439: /* convert to local "natural" numbering and then to PETSc global numbering */
440: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
442: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
443: j_c = (j/ratioj); /* coarse grid node below fine grid node */
445: /* remainders */
446: li = i - ratioi * (i/ratioi);
447: if (i==mx-1) li = nxi-1;
448: lj = j - ratioj * (j/ratioj);
449: if (j==my-1) lj = neta-1;
451: /* corners */
452: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
453: cols[0] = col_shift + idx_c[col]; /* left, below */
454: Ni[0] = 1.0;
455: if ((li==0) || (li==nxi-1)) {
456: if ((lj==0) || (lj==neta-1)) {
457: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
458: continue;
459: }
460: }
462: /* edges + interior */
463: /* remainders */
464: if (i==mx-1) i_c--;
465: if (j==my-1) j_c--;
467: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
468: cols[0] = col_shift + idx_c[col]; /* left, below */
469: cols[1] = col_shift + idx_c[col+1]; /* right, below */
470: cols[2] = col_shift + idx_c[col+m_ghost_c]; /* left, above */
471: cols[3] = col_shift + idx_c[col+(m_ghost_c+1)]; /* right, above */
473: Ni[0] = 0.25*(1.0-xi[li])*(1.0-eta[lj]);
474: Ni[1] = 0.25*(1.0+xi[li])*(1.0-eta[lj]);
475: Ni[2] = 0.25*(1.0-xi[li])*(1.0+eta[lj]);
476: Ni[3] = 0.25*(1.0+xi[li])*(1.0+eta[lj]);
478: nc = 0;
479: if (PetscAbsScalar(Ni[0])<1.0e-32) cols[0]=-1;
480: if (PetscAbsScalar(Ni[1])<1.0e-32) cols[1]=-1;
481: if (PetscAbsScalar(Ni[2])<1.0e-32) cols[2]=-1;
482: if (PetscAbsScalar(Ni[3])<1.0e-32) cols[3]=-1;
484: MatSetValues(mat,1,&row,4,cols,Ni,INSERT_VALUES);
485: }
486: }
487: PetscFree(xi);
488: PetscFree(eta);
489: }
490: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
491: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
492: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
493: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
494: MatCreateMAIJ(mat,dof,A);
495: MatDestroy(&mat);
496: return 0;
497: }
499: /*
500: Contributed by Andrei Draganescu <aidraga@sandia.gov>
501: */
502: PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac,DM daf,Mat *A)
503: {
504: PetscErrorCode ierr;
505: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
506: const PetscInt *idx_c,*idx_f;
507: ISLocalToGlobalMapping ltog_f,ltog_c;
508: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz;
509: PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj;
510: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale;
511: PetscMPIInt size_c,size_f,rank_f;
512: PetscScalar v[4];
513: Mat mat;
514: DMBoundaryType bx,by;
515: MatType mattype;
517: DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL);
518: DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
521: ratioi = mx/Mx;
522: ratioj = my/My;
528: DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL);
529: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL);
530: DMGetLocalToGlobalMapping(daf,<og_f);
531: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
533: DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL);
534: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL);
535: DMGetLocalToGlobalMapping(dac,<og_c);
536: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
538: /*
539: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
540: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
541: processors). It's effective length is hence 4 times its normal length, this is
542: why the col_scale is multiplied by the interpolation matrix column sizes.
543: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
544: copy of the coarse vector. A bit of a hack but you do better.
546: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
547: */
548: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
549: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
550: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
551: col_scale = size_f/size_c;
552: col_shift = Mx*My*(rank_f/size_c);
554: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);
555: for (j=j_start; j<j_start+n_f; j++) {
556: for (i=i_start; i<i_start+m_f; i++) {
557: /* convert to local "natural" numbering and then to PETSc global numbering */
558: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
560: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
561: j_c = (j/ratioj); /* coarse grid node below fine grid node */
564: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
566: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
568: /*
569: Only include those interpolation points that are truly
570: nonzero. Note this is very important for final grid lines
571: in x and y directions; since they have no right/top neighbors
572: */
573: nc = 0;
574: /* one left and below; or we are right on it */
575: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
576: cols[nc++] = col_shift + idx_c[col];
577: MatPreallocateSet(row,nc,cols,dnz,onz);
578: }
579: }
580: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
581: #if defined(PETSC_HAVE_CUDA)
582: /*
583: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
584: we don't want the original unconverted matrix copied to the GPU
585: */
586: if (dof > 1) {
587: MatBindToCPU(mat,PETSC_TRUE);
588: }
589: #endif
590: MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);
591: ConvertToAIJ(dac->mattype,&mattype);
592: MatSetType(mat,mattype);
593: MatSeqAIJSetPreallocation(mat,0,dnz);
594: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
595: MatPreallocateFinalize(dnz,onz);
597: /* loop over local fine grid nodes setting interpolation for those*/
598: for (j=j_start; j<j_start+n_f; j++) {
599: for (i=i_start; i<i_start+m_f; i++) {
600: /* convert to local "natural" numbering and then to PETSc global numbering */
601: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
603: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
604: j_c = (j/ratioj); /* coarse grid node below fine grid node */
605: nc = 0;
606: /* one left and below; or we are right on it */
607: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
608: cols[nc] = col_shift + idx_c[col];
609: v[nc++] = 1.0;
611: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
612: }
613: }
614: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
615: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
616: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
617: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
618: MatCreateMAIJ(mat,dof,A);
619: MatDestroy(&mat);
620: PetscLogFlops(13.0*m_f*n_f);
621: return 0;
622: }
624: /*
625: Contributed by Jianming Yang <jianming-yang@uiowa.edu>
626: */
627: PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac,DM daf,Mat *A)
628: {
629: PetscErrorCode ierr;
630: PetscInt i,j,l,i_start,j_start,l_start,m_f,n_f,p_f,Mx,My,Mz,dof;
631: const PetscInt *idx_c,*idx_f;
632: ISLocalToGlobalMapping ltog_f,ltog_c;
633: PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c,nc,*dnz,*onz;
634: PetscInt row,col,i_start_ghost,j_start_ghost,l_start_ghost,cols[8],mx,m_c,my,n_c,mz,p_c,ratioi,ratioj,ratiol;
635: PetscInt i_c,j_c,l_c,i_start_c,j_start_c,l_start_c,i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,col_shift,col_scale;
636: PetscMPIInt size_c,size_f,rank_f;
637: PetscScalar v[8];
638: Mat mat;
639: DMBoundaryType bx,by,bz;
640: MatType mattype;
642: DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL);
646: DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
647: ratioi = mx/Mx;
648: ratioj = my/My;
649: ratiol = mz/Mz;
657: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
658: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
659: DMGetLocalToGlobalMapping(daf,<og_f);
660: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
662: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
663: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
664: DMGetLocalToGlobalMapping(dac,<og_c);
665: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
667: /*
668: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
669: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
670: processors). It's effective length is hence 4 times its normal length, this is
671: why the col_scale is multiplied by the interpolation matrix column sizes.
672: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
673: copy of the coarse vector. A bit of a hack but you do better.
675: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
676: */
677: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
678: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
679: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
680: col_scale = size_f/size_c;
681: col_shift = Mx*My*Mz*(rank_f/size_c);
683: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f*p_f,col_scale*m_c*n_c*p_c,dnz,onz);
684: for (l=l_start; l<l_start+p_f; l++) {
685: for (j=j_start; j<j_start+n_f; j++) {
686: for (i=i_start; i<i_start+m_f; i++) {
687: /* convert to local "natural" numbering and then to PETSc global numbering */
688: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
690: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
691: j_c = (j/ratioj); /* coarse grid node below fine grid node */
692: l_c = (l/ratiol);
695: l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c);
697: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
699: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
701: /*
702: Only include those interpolation points that are truly
703: nonzero. Note this is very important for final grid lines
704: in x and y directions; since they have no right/top neighbors
705: */
706: nc = 0;
707: /* one left and below; or we are right on it */
708: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
709: cols[nc++] = col_shift + idx_c[col];
710: MatPreallocateSet(row,nc,cols,dnz,onz);
711: }
712: }
713: }
714: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
715: #if defined(PETSC_HAVE_CUDA)
716: /*
717: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
718: we don't want the original unconverted matrix copied to the GPU
719: */
720: if (dof > 1) {
721: MatBindToCPU(mat,PETSC_TRUE);
722: }
723: #endif
724: MatSetSizes(mat,m_f*n_f*p_f,col_scale*m_c*n_c*p_c,mx*my*mz,col_scale*Mx*My*Mz);
725: ConvertToAIJ(dac->mattype,&mattype);
726: MatSetType(mat,mattype);
727: MatSeqAIJSetPreallocation(mat,0,dnz);
728: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
729: MatPreallocateFinalize(dnz,onz);
731: /* loop over local fine grid nodes setting interpolation for those*/
732: for (l=l_start; l<l_start+p_f; l++) {
733: for (j=j_start; j<j_start+n_f; j++) {
734: for (i=i_start; i<i_start+m_f; i++) {
735: /* convert to local "natural" numbering and then to PETSc global numbering */
736: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
738: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
739: j_c = (j/ratioj); /* coarse grid node below fine grid node */
740: l_c = (l/ratiol);
741: nc = 0;
742: /* one left and below; or we are right on it */
743: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
744: cols[nc] = col_shift + idx_c[col];
745: v[nc++] = 1.0;
747: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
748: }
749: }
750: }
751: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
752: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
753: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
754: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
755: MatCreateMAIJ(mat,dof,A);
756: MatDestroy(&mat);
757: PetscLogFlops(13.0*m_f*n_f*p_f);
758: return 0;
759: }
761: PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac,DM daf,Mat *A)
762: {
763: PetscErrorCode ierr;
764: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof,l;
765: const PetscInt *idx_c,*idx_f;
766: ISLocalToGlobalMapping ltog_f,ltog_c;
767: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,Mz,mz;
768: PetscInt row,col,i_start_ghost,j_start_ghost,cols[8],mx,m_c,my,nc,ratioi,ratioj,ratiok;
769: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c;
770: PetscInt l_start,p_f,l_start_ghost,p_ghost,l_start_c,p_c;
771: PetscInt l_start_ghost_c,p_ghost_c,l_c,*dnz,*onz;
772: PetscScalar v[8],x,y,z;
773: Mat mat;
774: DMBoundaryType bx,by,bz;
775: MatType mattype;
777: DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL);
778: DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
779: if (mx == Mx) {
780: ratioi = 1;
781: } else if (bx == DM_BOUNDARY_PERIODIC) {
783: ratioi = mx/Mx;
785: } else {
787: ratioi = (mx-1)/(Mx-1);
789: }
790: if (my == My) {
791: ratioj = 1;
792: } else if (by == DM_BOUNDARY_PERIODIC) {
794: ratioj = my/My;
796: } else {
798: ratioj = (my-1)/(My-1);
800: }
801: if (mz == Mz) {
802: ratiok = 1;
803: } else if (bz == DM_BOUNDARY_PERIODIC) {
805: ratiok = mz/Mz;
807: } else {
809: ratiok = (mz-1)/(Mz-1);
811: }
813: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
814: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
815: DMGetLocalToGlobalMapping(daf,<og_f);
816: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
818: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
819: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
820: DMGetLocalToGlobalMapping(dac,<og_c);
821: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
823: /* create interpolation matrix, determining exact preallocation */
824: MatPreallocateInitialize(PetscObjectComm((PetscObject)dac),m_f*n_f*p_f,m_c*n_c*p_c,dnz,onz);
825: /* loop over local fine grid nodes counting interpolating points */
826: for (l=l_start; l<l_start+p_f; l++) {
827: for (j=j_start; j<j_start+n_f; j++) {
828: for (i=i_start; i<i_start+m_f; i++) {
829: /* convert to local "natural" numbering and then to PETSc global numbering */
830: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
831: i_c = (i/ratioi);
832: j_c = (j/ratioj);
833: l_c = (l/ratiok);
835: l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c);
837: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
839: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
841: /*
842: Only include those interpolation points that are truly
843: nonzero. Note this is very important for final grid lines
844: in x and y directions; since they have no right/top neighbors
845: */
846: nc = 0;
847: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
848: cols[nc++] = idx_c[col];
849: if (i_c*ratioi != i) {
850: cols[nc++] = idx_c[col+1];
851: }
852: if (j_c*ratioj != j) {
853: cols[nc++] = idx_c[col+m_ghost_c];
854: }
855: if (l_c*ratiok != l) {
856: cols[nc++] = idx_c[col+m_ghost_c*n_ghost_c];
857: }
858: if (j_c*ratioj != j && i_c*ratioi != i) {
859: cols[nc++] = idx_c[col+(m_ghost_c+1)];
860: }
861: if (j_c*ratioj != j && l_c*ratiok != l) {
862: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)];
863: }
864: if (i_c*ratioi != i && l_c*ratiok != l) {
865: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+1)];
866: }
867: if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) {
868: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)];
869: }
870: MatPreallocateSet(row,nc,cols,dnz,onz);
871: }
872: }
873: }
874: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
875: #if defined(PETSC_HAVE_CUDA)
876: /*
877: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
878: we don't want the original unconverted matrix copied to the GPU
879: */
880: if (dof > 1) {
881: MatBindToCPU(mat,PETSC_TRUE);
882: }
883: #endif
884: MatSetSizes(mat,m_f*n_f*p_f,m_c*n_c*p_c,mx*my*mz,Mx*My*Mz);
885: ConvertToAIJ(dac->mattype,&mattype);
886: MatSetType(mat,mattype);
887: MatSeqAIJSetPreallocation(mat,0,dnz);
888: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
889: MatPreallocateFinalize(dnz,onz);
891: /* loop over local fine grid nodes setting interpolation for those*/
892: if (!NEWVERSION) {
894: for (l=l_start; l<l_start+p_f; l++) {
895: for (j=j_start; j<j_start+n_f; j++) {
896: for (i=i_start; i<i_start+m_f; i++) {
897: /* convert to local "natural" numbering and then to PETSc global numbering */
898: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
900: i_c = (i/ratioi);
901: j_c = (j/ratioj);
902: l_c = (l/ratiok);
904: /*
905: Only include those interpolation points that are truly
906: nonzero. Note this is very important for final grid lines
907: in x and y directions; since they have no right/top neighbors
908: */
909: x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi);
910: y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj);
911: z = ((PetscReal)(l - l_c*ratiok))/((PetscReal)ratiok);
913: nc = 0;
914: /* one left and below; or we are right on it */
915: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c));
917: cols[nc] = idx_c[col];
918: v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.));
920: if (i_c*ratioi != i) {
921: cols[nc] = idx_c[col+1];
922: v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.));
923: }
925: if (j_c*ratioj != j) {
926: cols[nc] = idx_c[col+m_ghost_c];
927: v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.));
928: }
930: if (l_c*ratiok != l) {
931: cols[nc] = idx_c[col+m_ghost_c*n_ghost_c];
932: v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.));
933: }
935: if (j_c*ratioj != j && i_c*ratioi != i) {
936: cols[nc] = idx_c[col+(m_ghost_c+1)];
937: v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.));
938: }
940: if (j_c*ratioj != j && l_c*ratiok != l) {
941: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)];
942: v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.));
943: }
945: if (i_c*ratioi != i && l_c*ratiok != l) {
946: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+1)];
947: v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.));
948: }
950: if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) {
951: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)];
952: v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.));
953: }
954: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
955: }
956: }
957: }
959: } else {
960: PetscScalar *xi,*eta,*zeta;
961: PetscInt li,nxi,lj,neta,lk,nzeta,n;
962: PetscScalar Ni[8];
964: /* compute local coordinate arrays */
965: nxi = ratioi + 1;
966: neta = ratioj + 1;
967: nzeta = ratiok + 1;
968: PetscMalloc1(nxi,&xi);
969: PetscMalloc1(neta,&eta);
970: PetscMalloc1(nzeta,&zeta);
971: for (li=0; li<nxi; li++) xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
972: for (lj=0; lj<neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1));
973: for (lk=0; lk<nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk*(2.0/(PetscScalar)(nzeta-1));
975: for (l=l_start; l<l_start+p_f; l++) {
976: for (j=j_start; j<j_start+n_f; j++) {
977: for (i=i_start; i<i_start+m_f; i++) {
978: /* convert to local "natural" numbering and then to PETSc global numbering */
979: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
981: i_c = (i/ratioi);
982: j_c = (j/ratioj);
983: l_c = (l/ratiok);
985: /* remainders */
986: li = i - ratioi * (i/ratioi);
987: if (i==mx-1) li = nxi-1;
988: lj = j - ratioj * (j/ratioj);
989: if (j==my-1) lj = neta-1;
990: lk = l - ratiok * (l/ratiok);
991: if (l==mz-1) lk = nzeta-1;
993: /* corners */
994: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c));
995: cols[0] = idx_c[col];
996: Ni[0] = 1.0;
997: if ((li==0) || (li==nxi-1)) {
998: if ((lj==0) || (lj==neta-1)) {
999: if ((lk==0) || (lk==nzeta-1)) {
1000: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
1001: continue;
1002: }
1003: }
1004: }
1006: /* edges + interior */
1007: /* remainders */
1008: if (i==mx-1) i_c--;
1009: if (j==my-1) j_c--;
1010: if (l==mz-1) l_c--;
1012: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
1013: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
1014: cols[1] = idx_c[col+1]; /* one right and below */
1015: cols[2] = idx_c[col+m_ghost_c]; /* one left and above */
1016: cols[3] = idx_c[col+(m_ghost_c+1)]; /* one right and above */
1018: cols[4] = idx_c[col+m_ghost_c*n_ghost_c]; /* one left and below and front; or we are right on it */
1019: cols[5] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; /* one right and below, and front */
1020: cols[6] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; /* one left and above and front*/
1021: cols[7] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; /* one right and above and front */
1023: Ni[0] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]);
1024: Ni[1] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]);
1025: Ni[2] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]);
1026: Ni[3] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]);
1028: Ni[4] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]);
1029: Ni[5] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]);
1030: Ni[6] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]);
1031: Ni[7] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]);
1033: for (n=0; n<8; n++) {
1034: if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1;
1035: }
1036: MatSetValues(mat,1,&row,8,cols,Ni,INSERT_VALUES);
1038: }
1039: }
1040: }
1041: PetscFree(xi);
1042: PetscFree(eta);
1043: PetscFree(zeta);
1044: }
1045: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1046: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1047: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
1048: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
1050: MatCreateMAIJ(mat,dof,A);
1051: MatDestroy(&mat);
1052: return 0;
1053: }
1055: PetscErrorCode DMCreateInterpolation_DA(DM dac,DM daf,Mat *A,Vec *scale)
1056: {
1057: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1058: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1059: DMDAStencilType stc,stf;
1060: DM_DA *ddc = (DM_DA*)dac->data;
1067: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1068: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1078: if (ddc->interptype == DMDA_Q1) {
1079: if (dimc == 1) {
1080: DMCreateInterpolation_DA_1D_Q1(dac,daf,A);
1081: } else if (dimc == 2) {
1082: DMCreateInterpolation_DA_2D_Q1(dac,daf,A);
1083: } else if (dimc == 3) {
1084: DMCreateInterpolation_DA_3D_Q1(dac,daf,A);
1085: } else SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype);
1086: } else if (ddc->interptype == DMDA_Q0) {
1087: if (dimc == 1) {
1088: DMCreateInterpolation_DA_1D_Q0(dac,daf,A);
1089: } else if (dimc == 2) {
1090: DMCreateInterpolation_DA_2D_Q0(dac,daf,A);
1091: } else if (dimc == 3) {
1092: DMCreateInterpolation_DA_3D_Q0(dac,daf,A);
1093: } else SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype);
1094: }
1095: if (scale) {
1096: DMCreateInterpolationScale((DM)dac,(DM)daf,*A,scale);
1097: }
1098: return 0;
1099: }
1101: PetscErrorCode DMCreateInjection_DA_1D(DM dac,DM daf,VecScatter *inject)
1102: {
1103: PetscInt i,i_start,m_f,Mx,dof;
1104: const PetscInt *idx_f;
1105: ISLocalToGlobalMapping ltog_f;
1106: PetscInt m_ghost,m_ghost_c;
1107: PetscInt row,i_start_ghost,mx,m_c,nc,ratioi;
1108: PetscInt i_start_c,i_start_ghost_c;
1109: PetscInt *cols;
1110: DMBoundaryType bx;
1111: Vec vecf,vecc;
1112: IS isf;
1114: DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL);
1115: DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
1116: if (bx == DM_BOUNDARY_PERIODIC) {
1117: ratioi = mx/Mx;
1119: } else {
1120: ratioi = (mx-1)/(Mx-1);
1122: }
1124: DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL);
1125: DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL);
1126: DMGetLocalToGlobalMapping(daf,<og_f);
1127: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1129: DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL);
1130: DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL);
1132: /* loop over local fine grid nodes setting interpolation for those*/
1133: nc = 0;
1134: PetscMalloc1(m_f,&cols);
1136: for (i=i_start_c; i<i_start_c+m_c; i++) {
1137: PetscInt i_f = i*ratioi;
1141: row = idx_f[(i_f-i_start_ghost)];
1142: cols[nc++] = row;
1143: }
1145: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1146: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1147: DMGetGlobalVector(dac,&vecc);
1148: DMGetGlobalVector(daf,&vecf);
1149: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1150: DMRestoreGlobalVector(dac,&vecc);
1151: DMRestoreGlobalVector(daf,&vecf);
1152: ISDestroy(&isf);
1153: return 0;
1154: }
1156: PetscErrorCode DMCreateInjection_DA_2D(DM dac,DM daf,VecScatter *inject)
1157: {
1158: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
1159: const PetscInt *idx_c,*idx_f;
1160: ISLocalToGlobalMapping ltog_f,ltog_c;
1161: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c;
1162: PetscInt row,i_start_ghost,j_start_ghost,mx,m_c,my,nc,ratioi,ratioj;
1163: PetscInt i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c;
1164: PetscInt *cols;
1165: DMBoundaryType bx,by;
1166: Vec vecf,vecc;
1167: IS isf;
1169: DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL);
1170: DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
1171: if (bx == DM_BOUNDARY_PERIODIC) {
1172: ratioi = mx/Mx;
1174: } else {
1175: ratioi = (mx-1)/(Mx-1);
1177: }
1178: if (by == DM_BOUNDARY_PERIODIC) {
1179: ratioj = my/My;
1181: } else {
1182: ratioj = (my-1)/(My-1);
1184: }
1186: DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL);
1187: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL);
1188: DMGetLocalToGlobalMapping(daf,<og_f);
1189: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1191: DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL);
1192: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL);
1193: DMGetLocalToGlobalMapping(dac,<og_c);
1194: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
1196: /* loop over local fine grid nodes setting interpolation for those*/
1197: nc = 0;
1198: PetscMalloc1(n_f*m_f,&cols);
1199: for (j=j_start_c; j<j_start_c+n_c; j++) {
1200: for (i=i_start_c; i<i_start_c+m_c; i++) {
1201: PetscInt i_f = i*ratioi,j_f = j*ratioj;
1203: j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost);
1205: i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1206: row = idx_f[(m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))];
1207: cols[nc++] = row;
1208: }
1209: }
1210: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1211: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1213: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1214: DMGetGlobalVector(dac,&vecc);
1215: DMGetGlobalVector(daf,&vecf);
1216: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1217: DMRestoreGlobalVector(dac,&vecc);
1218: DMRestoreGlobalVector(daf,&vecf);
1219: ISDestroy(&isf);
1220: return 0;
1221: }
1223: PetscErrorCode DMCreateInjection_DA_3D(DM dac,DM daf,VecScatter *inject)
1224: {
1225: PetscInt i,j,k,i_start,j_start,k_start,m_f,n_f,p_f,Mx,My,Mz;
1226: PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c;
1227: PetscInt i_start_ghost,j_start_ghost,k_start_ghost;
1228: PetscInt mx,my,mz,ratioi,ratioj,ratiok;
1229: PetscInt i_start_c,j_start_c,k_start_c;
1230: PetscInt m_c,n_c,p_c;
1231: PetscInt i_start_ghost_c,j_start_ghost_c,k_start_ghost_c;
1232: PetscInt row,nc,dof;
1233: const PetscInt *idx_c,*idx_f;
1234: ISLocalToGlobalMapping ltog_f,ltog_c;
1235: PetscInt *cols;
1236: DMBoundaryType bx,by,bz;
1237: Vec vecf,vecc;
1238: IS isf;
1240: DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL);
1241: DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL);
1243: if (bx == DM_BOUNDARY_PERIODIC) {
1244: ratioi = mx/Mx;
1246: } else {
1247: ratioi = (mx-1)/(Mx-1);
1249: }
1250: if (by == DM_BOUNDARY_PERIODIC) {
1251: ratioj = my/My;
1253: } else {
1254: ratioj = (my-1)/(My-1);
1256: }
1257: if (bz == DM_BOUNDARY_PERIODIC) {
1258: ratiok = mz/Mz;
1260: } else {
1261: ratiok = (mz-1)/(Mz-1);
1263: }
1265: DMDAGetCorners(daf,&i_start,&j_start,&k_start,&m_f,&n_f,&p_f);
1266: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&k_start_ghost,&m_ghost,&n_ghost,&p_ghost);
1267: DMGetLocalToGlobalMapping(daf,<og_f);
1268: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1270: DMDAGetCorners(dac,&i_start_c,&j_start_c,&k_start_c,&m_c,&n_c,&p_c);
1271: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&k_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
1272: DMGetLocalToGlobalMapping(dac,<og_c);
1273: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
1275: /* loop over local fine grid nodes setting interpolation for those*/
1276: nc = 0;
1277: PetscMalloc1(n_f*m_f*p_f,&cols);
1278: for (k=k_start_c; k<k_start_c+p_c; k++) {
1279: for (j=j_start_c; j<j_start_c+n_c; j++) {
1280: for (i=i_start_c; i<i_start_c+m_c; i++) {
1281: PetscInt i_f = i*ratioi,j_f = j*ratioj,k_f = k*ratiok;
1283: "k_c %D k_f %D fine ghost range [%D,%D]",k,k_f,k_start_ghost,k_start_ghost+p_ghost);
1285: "j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost);
1287: "i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1288: row = idx_f[(m_ghost*n_ghost*(k_f-k_start_ghost) + m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))];
1289: cols[nc++] = row;
1290: }
1291: }
1292: }
1293: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1294: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1296: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1297: DMGetGlobalVector(dac,&vecc);
1298: DMGetGlobalVector(daf,&vecf);
1299: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1300: DMRestoreGlobalVector(dac,&vecc);
1301: DMRestoreGlobalVector(daf,&vecf);
1302: ISDestroy(&isf);
1303: return 0;
1304: }
1306: PetscErrorCode DMCreateInjection_DA(DM dac,DM daf,Mat *mat)
1307: {
1308: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1309: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1310: DMDAStencilType stc,stf;
1311: VecScatter inject = NULL;
1317: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1318: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1328: if (dimc == 1) {
1329: DMCreateInjection_DA_1D(dac,daf,&inject);
1330: } else if (dimc == 2) {
1331: DMCreateInjection_DA_2D(dac,daf,&inject);
1332: } else if (dimc == 3) {
1333: DMCreateInjection_DA_3D(dac,daf,&inject);
1334: }
1335: MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat);
1336: VecScatterDestroy(&inject);
1337: return 0;
1338: }
1340: /*@
1341: DMCreateAggregates - Deprecated, see DMDACreateAggregates.
1343: Level: intermediate
1344: @*/
1345: PetscErrorCode DMCreateAggregates(DM dac,DM daf,Mat *mat)
1346: {
1347: return DMDACreateAggregates(dac,daf,mat);
1348: }
1350: /*@
1351: DMDACreateAggregates - Gets the aggregates that map between
1352: grids associated with two DMDAs.
1354: Collective on dmc
1356: Input Parameters:
1357: + dmc - the coarse grid DMDA
1358: - dmf - the fine grid DMDA
1360: Output Parameters:
1361: . rest - the restriction matrix (transpose of the projection matrix)
1363: Level: intermediate
1365: Note: This routine is not used by PETSc.
1366: It is not clear what its use case is and it may be removed in a future release.
1367: Users should contact petsc-maint@mcs.anl.gov if they plan to use it.
1369: .seealso: DMRefine(), DMCreateInjection(), DMCreateInterpolation()
1370: @*/
1371: PetscErrorCode DMDACreateAggregates(DM dac,DM daf,Mat *rest)
1372: {
1373: PetscErrorCode ierr;
1374: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc;
1375: PetscInt dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1376: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1377: DMDAStencilType stc,stf;
1378: PetscInt i,j,l;
1379: PetscInt i_start,j_start,l_start, m_f,n_f,p_f;
1380: PetscInt i_start_ghost,j_start_ghost,l_start_ghost,m_ghost,n_ghost,p_ghost;
1381: const PetscInt *idx_f;
1382: PetscInt i_c,j_c,l_c;
1383: PetscInt i_start_c,j_start_c,l_start_c, m_c,n_c,p_c;
1384: PetscInt i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,m_ghost_c,n_ghost_c,p_ghost_c;
1385: const PetscInt *idx_c;
1386: PetscInt d;
1387: PetscInt a;
1388: PetscInt max_agg_size;
1389: PetscInt *fine_nodes;
1390: PetscScalar *one_vec;
1391: PetscInt fn_idx;
1392: ISLocalToGlobalMapping ltogmf,ltogmc;
1398: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1399: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1410: if (Pc < 0) Pc = 1;
1411: if (Pf < 0) Pf = 1;
1412: if (Nc < 0) Nc = 1;
1413: if (Nf < 0) Nf = 1;
1415: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
1416: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
1418: DMGetLocalToGlobalMapping(daf,<ogmf);
1419: ISLocalToGlobalMappingGetIndices(ltogmf,&idx_f);
1421: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
1422: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
1424: DMGetLocalToGlobalMapping(dac,<ogmc);
1425: ISLocalToGlobalMappingGetIndices(ltogmc,&idx_c);
1427: /*
1428: Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios
1429: for dimension 1 and 2 respectively.
1430: Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1)
1431: and r_y*j and r_y*(j+1) will be grouped into the same coarse grid agregate.
1432: Each specific dof on the fine grid is mapped to one dof on the coarse grid.
1433: */
1435: max_agg_size = (Mf/Mc+1)*(Nf/Nc+1)*(Pf/Pc+1);
1437: /* create the matrix that will contain the restriction operator */
1438: MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c*n_c*p_c*dofc, m_f*n_f*p_f*doff, Mc*Nc*Pc*dofc, Mf*Nf*Pf*doff,
1439: max_agg_size, NULL, max_agg_size, NULL, rest);
1441: /* store nodes in the fine grid here */
1442: PetscMalloc2(max_agg_size, &one_vec,max_agg_size, &fine_nodes);
1443: for (i=0; i<max_agg_size; i++) one_vec[i] = 1.0;
1445: /* loop over all coarse nodes */
1446: for (l_c=l_start_c; l_c<l_start_c+p_c; l_c++) {
1447: for (j_c=j_start_c; j_c<j_start_c+n_c; j_c++) {
1448: for (i_c=i_start_c; i_c<i_start_c+m_c; i_c++) {
1449: for (d=0; d<dofc; d++) {
1450: /* convert to local "natural" numbering and then to PETSc global numbering */
1451: a = idx_c[dofc*(m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c))] + d;
1453: fn_idx = 0;
1454: /* Corresponding fine points are all points (i_f, j_f, l_f) such that
1455: i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc
1456: (same for other dimensions)
1457: */
1458: for (l=l_c*Pf/Pc; l<PetscMin((l_c+1)*Pf/Pc,Pf); l++) {
1459: for (j=j_c*Nf/Nc; j<PetscMin((j_c+1)*Nf/Nc,Nf); j++) {
1460: for (i=i_c*Mf/Mc; i<PetscMin((i_c+1)*Mf/Mc,Mf); i++) {
1461: fine_nodes[fn_idx] = idx_f[doff*(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))] + d;
1462: fn_idx++;
1463: }
1464: }
1465: }
1466: /* add all these points to one aggregate */
1467: MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
1468: }
1469: }
1470: }
1471: }
1472: ISLocalToGlobalMappingRestoreIndices(ltogmf,&idx_f);
1473: ISLocalToGlobalMappingRestoreIndices(ltogmc,&idx_c);
1474: PetscFree2(one_vec,fine_nodes);
1475: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
1476: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
1477: return 0;
1478: }