Actual source code: dasub.c
petsc-3.14.6 2021-03-30
2: /*
3: Code for manipulating distributed regular arrays in parallel.
4: */
6: #include <petsc/private/dmdaimpl.h>
8: /*@
9: DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
11: Collective on da
13: Input Parameters:
14: + da - the distributed array
15: - x,y,z - the physical coordinates
17: Output Parameters:
18: + II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
19: - X, Y, Z, - (optional) the coordinates of the located grid point
21: Level: advanced
23: Notes:
24: All processors that share the DMDA must call this with the same coordinate value
26: @*/
27: PetscErrorCode DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
28: {
30: Vec coors;
31: DM dacoors;
32: DMDACoor2d **c;
33: PetscInt i,j,xs,xm,ys,ym;
34: PetscReal d,D = PETSC_MAX_REAL,Dv;
35: PetscMPIInt rank,root;
38: if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
39: if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
41: *II = -1;
42: *JJ = -1;
44: DMGetCoordinateDM(da,&dacoors);
45: DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);
46: DMGetCoordinates(da,&coors);
47: DMDAVecGetArrayRead(dacoors,coors,&c);
48: for (j=ys; j<ys+ym; j++) {
49: for (i=xs; i<xs+xm; i++) {
50: d = PetscSqrtReal(PetscRealPart((c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y)));
51: if (d < D) {
52: D = d;
53: *II = i;
54: *JJ = j;
55: }
56: }
57: }
58: MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));
59: if (D != Dv) {
60: *II = -1;
61: *JJ = -1;
62: rank = 0;
63: } else {
64: *X = c[*JJ][*II].x;
65: *Y = c[*JJ][*II].y;
66: MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
67: rank++;
68: }
69: MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));
70: root--;
71: MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
72: MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
73: DMDAVecRestoreArrayRead(dacoors,coors,&c);
74: return(0);
75: }
77: /*@
78: DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
80: Collective on DMDA
82: Input Parameters:
83: + da - the distributed array
84: . vec - the vector
85: . dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
86: - gp - global grid point number in this direction
88: Output Parameters:
89: + newvec - the new vector that can hold the values (size zero on all processes except process 0)
90: - scatter - the VecScatter that will map from the original vector to the slice
92: Level: advanced
94: Notes:
95: All processors that share the DMDA must call this with the same gp value
97: @*/
98: PetscErrorCode DMDAGetRay(DM da,DMDirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
99: {
100: PetscMPIInt rank;
101: DM_DA *dd = (DM_DA*)da->data;
103: IS is;
104: AO ao;
105: Vec vec;
106: PetscInt *indices,i,j;
109: if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
110: MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);
111: DMDAGetAO(da, &ao);
112: if (!rank) {
113: if (da->dim == 1) {
114: if (dir == DM_X) {
115: PetscMalloc1(dd->w, &indices);
116: indices[0] = dd->w*gp;
117: for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
118: AOApplicationToPetsc(ao, dd->w, indices);
119: VecCreate(PETSC_COMM_SELF, newvec);
120: VecSetBlockSize(*newvec, dd->w);
121: VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);
122: VecSetType(*newvec, VECSEQ);
123: ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);
124: } else if (dir == DM_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
125: else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
126: } else {
127: if (dir == DM_Y) {
128: PetscMalloc1(dd->w*dd->M,&indices);
129: indices[0] = gp*dd->M*dd->w;
130: for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
132: AOApplicationToPetsc(ao,dd->M*dd->w,indices);
133: VecCreate(PETSC_COMM_SELF,newvec);
134: VecSetBlockSize(*newvec,dd->w);
135: VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);
136: VecSetType(*newvec,VECSEQ);
137: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);
138: } else if (dir == DM_X) {
139: PetscMalloc1(dd->w*dd->N,&indices);
140: indices[0] = dd->w*gp;
141: for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
142: for (i=1; i<dd->N; i++) {
143: indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
144: for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
145: }
146: AOApplicationToPetsc(ao,dd->w*dd->N,indices);
147: VecCreate(PETSC_COMM_SELF,newvec);
148: VecSetBlockSize(*newvec,dd->w);
149: VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);
150: VecSetType(*newvec,VECSEQ);
151: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);
152: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDirection");
153: }
154: } else {
155: VecCreateSeq(PETSC_COMM_SELF, 0, newvec);
156: ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);
157: }
158: DMGetGlobalVector(da, &vec);
159: VecScatterCreate(vec, is, *newvec, NULL, scatter);
160: DMRestoreGlobalVector(da, &vec);
161: ISDestroy(&is);
162: return(0);
163: }
165: /*@C
166: DMDAGetProcessorSubset - Returns a communicator consisting only of the
167: processors in a DMDA that own a particular global x, y, or z grid point
168: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
170: Collective on da
172: Input Parameters:
173: + da - the distributed array
174: . dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
175: - gp - global grid point number in this direction
177: Output Parameters:
178: . comm - new communicator
180: Level: advanced
182: Notes:
183: All processors that share the DMDA must call this with the same gp value
185: After use, comm should be freed with MPI_Comm_free()
187: This routine is particularly useful to compute boundary conditions
188: or other application-specific calculations that require manipulating
189: sets of data throughout a logical plane of grid points.
191: Not supported from Fortran
193: @*/
194: PetscErrorCode DMDAGetProcessorSubset(DM da,DMDirection dir,PetscInt gp,MPI_Comm *comm)
195: {
196: MPI_Group group,subgroup;
198: PetscInt i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
199: PetscMPIInt size,*ranks = NULL;
200: DM_DA *dd = (DM_DA*)da->data;
204: flag = 0;
205: DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
206: MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);
207: if (dir == DM_Z) {
208: if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
209: if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
210: if (gp >= zs && gp < zs+zm) flag = 1;
211: } else if (dir == DM_Y) {
212: if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
213: if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
214: if (gp >= ys && gp < ys+ym) flag = 1;
215: } else if (dir == DM_X) {
216: if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
217: if (gp >= xs && gp < xs+xm) flag = 1;
218: } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
220: PetscMalloc2(size,&owners,size,&ranks);
221: MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));
222: ict = 0;
223: PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
224: for (i=0; i<size; i++) {
225: if (owners[i]) {
226: ranks[ict] = i; ict++;
227: PetscInfo1(da,"%D ",i);
228: }
229: }
230: PetscInfo(da,"\n");
231: MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);
232: MPI_Group_incl(group,ict,ranks,&subgroup);
233: MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);
234: MPI_Group_free(&subgroup);
235: MPI_Group_free(&group);
236: PetscFree2(owners,ranks);
237: return(0);
238: }
240: /*@C
241: DMDAGetProcessorSubsets - Returns communicators consisting only of the
242: processors in a DMDA adjacent in a particular dimension,
243: corresponding to a logical plane in a 3D grid or a line in a 2D grid.
245: Collective on da
247: Input Parameters:
248: + da - the distributed array
249: - dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
251: Output Parameters:
252: . subcomm - new communicator
254: Level: advanced
256: Notes:
257: This routine is useful for distributing one-dimensional data in a tensor product grid.
259: After use, comm should be freed with MPI_Comm_free()
261: Not supported from Fortran
263: @*/
264: PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
265: {
266: MPI_Comm comm;
267: MPI_Group group, subgroup;
268: PetscInt subgroupSize = 0;
269: PetscInt *firstPoints;
270: PetscMPIInt size, *subgroupRanks = NULL;
271: PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p;
276: PetscObjectGetComm((PetscObject)da,&comm);
277: DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
278: MPI_Comm_size(comm, &size);
279: if (dir == DM_Z) {
280: if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
281: firstPoint = zs;
282: } else if (dir == DM_Y) {
283: if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
284: firstPoint = ys;
285: } else if (dir == DM_X) {
286: firstPoint = xs;
287: } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
289: PetscMalloc2(size, &firstPoints, size, &subgroupRanks);
290: MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
291: PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
292: for (p = 0; p < size; ++p) {
293: if (firstPoints[p] == firstPoint) {
294: subgroupRanks[subgroupSize++] = p;
295: PetscInfo1(da, "%D ", p);
296: }
297: }
298: PetscInfo(da, "\n");
299: MPI_Comm_group(comm, &group);
300: MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
301: MPI_Comm_create(comm, subgroup, subcomm);
302: MPI_Group_free(&subgroup);
303: MPI_Group_free(&group);
304: PetscFree2(firstPoints, subgroupRanks);
305: return(0);
306: }