Actual source code: dasub.c
petsc-3.10.5 2019-03-28
2: /*
3: Code for manipulating distributed regular arrays in parallel.
4: */
6: #include <petsc/private/dmdaimpl.h>
8: /*@
9: DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
11: Collective on DMDA
13: Input Parameters:
14: + da - the distributed array
15: - x,y,z - the physical coordinates
17: Output Parameters:
18: + II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
19: - X, Y, Z, - (optional) the coordinates of the located grid point
21: Level: advanced
23: Notes:
24: All processors that share the DMDA must call this with the same coordinate value
26: .keywords: distributed array, get, processor subset
27: @*/
28: PetscErrorCode DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
29: {
31: Vec coors;
32: DM dacoors;
33: DMDACoor2d **c;
34: PetscInt i,j,xs,xm,ys,ym;
35: PetscReal d,D = PETSC_MAX_REAL,Dv;
36: PetscMPIInt rank,root;
39: if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
40: if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
42: *II = -1;
43: *JJ = -1;
45: DMGetCoordinateDM(da,&dacoors);
46: DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);
47: DMGetCoordinates(da,&coors);
48: DMDAVecGetArrayRead(dacoors,coors,&c);
49: for (j=ys; j<ys+ym; j++) {
50: for (i=xs; i<xs+xm; i++) {
51: d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) ));
52: if (d < D) {
53: D = d;
54: *II = i;
55: *JJ = j;
56: }
57: }
58: }
59: MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));
60: if (D != Dv) {
61: *II = -1;
62: *JJ = -1;
63: rank = 0;
64: } else {
65: *X = c[*JJ][*II].x;
66: *Y = c[*JJ][*II].y;
67: MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
68: rank++;
69: }
70: MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));
71: root--;
72: MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
73: MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
74: DMDAVecRestoreArrayRead(dacoors,coors,&c);
75: return(0);
76: }
78: /*@
79: DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
81: Collective on DMDA
83: Input Parameters:
84: + da - the distributed array
85: . vec - the vector
86: . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
87: - gp - global grid point number in this direction
89: Output Parameters:
90: + newvec - the new vector that can hold the values (size zero on all processes except process 0)
91: - scatter - the VecScatter that will map from the original vector to the slice
93: Level: advanced
95: Notes:
96: All processors that share the DMDA must call this with the same gp value
98: .keywords: distributed array, get, processor subset
99: @*/
100: PetscErrorCode DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
101: {
102: PetscMPIInt rank;
103: DM_DA *dd = (DM_DA*)da->data;
105: IS is;
106: AO ao;
107: Vec vec;
108: PetscInt *indices,i,j;
111: if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
112: MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);
113: DMDAGetAO(da, &ao);
114: if (!rank) {
115: if (da->dim == 1) {
116: if (dir == DMDA_X) {
117: PetscMalloc1(dd->w, &indices);
118: indices[0] = dd->w*gp;
119: for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
120: AOApplicationToPetsc(ao, dd->w, indices);
121: VecCreate(PETSC_COMM_SELF, newvec);
122: VecSetBlockSize(*newvec, dd->w);
123: VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);
124: VecSetType(*newvec, VECSEQ);
125: ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);
126: } else if (dir == DMDA_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
127: else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDADirection");
128: } else {
129: if (dir == DMDA_Y) {
130: PetscMalloc1(dd->w*dd->M,&indices);
131: indices[0] = gp*dd->M*dd->w;
132: for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
134: AOApplicationToPetsc(ao,dd->M*dd->w,indices);
135: VecCreate(PETSC_COMM_SELF,newvec);
136: VecSetBlockSize(*newvec,dd->w);
137: VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);
138: VecSetType(*newvec,VECSEQ);
139: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);
140: } else if (dir == DMDA_X) {
141: PetscMalloc1(dd->w*dd->N,&indices);
142: indices[0] = dd->w*gp;
143: for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
144: for (i=1; i<dd->N; i++) {
145: indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
146: for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
147: }
148: AOApplicationToPetsc(ao,dd->w*dd->N,indices);
149: VecCreate(PETSC_COMM_SELF,newvec);
150: VecSetBlockSize(*newvec,dd->w);
151: VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);
152: VecSetType(*newvec,VECSEQ);
153: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);
154: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection");
155: }
156: } else {
157: VecCreateSeq(PETSC_COMM_SELF, 0, newvec);
158: ISCreateGeneral(PETSC_COMM_SELF, 0, 0, PETSC_COPY_VALUES, &is);
159: }
160: DMGetGlobalVector(da, &vec);
161: VecScatterCreate(vec, is, *newvec, NULL, scatter);
162: DMRestoreGlobalVector(da, &vec);
163: ISDestroy(&is);
164: return(0);
165: }
167: /*@C
168: DMDAGetProcessorSubset - Returns a communicator consisting only of the
169: processors in a DMDA that own a particular global x, y, or z grid point
170: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
172: Collective on DMDA
174: Input Parameters:
175: + da - the distributed array
176: . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
177: - gp - global grid point number in this direction
179: Output Parameters:
180: . comm - new communicator
182: Level: advanced
184: Notes:
185: All processors that share the DMDA must call this with the same gp value
187: After use, comm should be freed with MPI_Comm_free()
189: This routine is particularly useful to compute boundary conditions
190: or other application-specific calculations that require manipulating
191: sets of data throughout a logical plane of grid points.
193: Not supported from Fortran
195: .keywords: distributed array, get, processor subset
196: @*/
197: PetscErrorCode DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
198: {
199: MPI_Group group,subgroup;
201: PetscInt i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
202: PetscMPIInt size,*ranks = NULL;
203: DM_DA *dd = (DM_DA*)da->data;
207: flag = 0;
208: DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
209: MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);
210: if (dir == DMDA_Z) {
211: if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
212: if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
213: if (gp >= zs && gp < zs+zm) flag = 1;
214: } else if (dir == DMDA_Y) {
215: if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
216: if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
217: if (gp >= ys && gp < ys+ym) flag = 1;
218: } else if (dir == DMDA_X) {
219: if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
220: if (gp >= xs && gp < xs+xm) flag = 1;
221: } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
223: PetscMalloc2(size,&owners,size,&ranks);
224: MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));
225: ict = 0;
226: PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
227: for (i=0; i<size; i++) {
228: if (owners[i]) {
229: ranks[ict] = i; ict++;
230: PetscInfo1(da,"%D ",i);
231: }
232: }
233: PetscInfo(da,"\n");
234: MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);
235: MPI_Group_incl(group,ict,ranks,&subgroup);
236: MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);
237: MPI_Group_free(&subgroup);
238: MPI_Group_free(&group);
239: PetscFree2(owners,ranks);
240: return(0);
241: }
243: /*@C
244: DMDAGetProcessorSubsets - Returns communicators consisting only of the
245: processors in a DMDA adjacent in a particular dimension,
246: corresponding to a logical plane in a 3D grid or a line in a 2D grid.
248: Collective on DMDA
250: Input Parameters:
251: + da - the distributed array
252: - dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
254: Output Parameters:
255: . subcomm - new communicator
257: Level: advanced
259: Notes:
260: This routine is useful for distributing one-dimensional data in a tensor product grid.
262: After use, comm should be freed with MPI_Comm_free()
264: Not supported from Fortran
266: .keywords: distributed array, get, processor subset
267: @*/
268: PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
269: {
270: MPI_Comm comm;
271: MPI_Group group, subgroup;
272: PetscInt subgroupSize = 0;
273: PetscInt *firstPoints;
274: PetscMPIInt size, *subgroupRanks = NULL;
275: PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p;
280: PetscObjectGetComm((PetscObject)da,&comm);
281: DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
282: MPI_Comm_size(comm, &size);
283: if (dir == DMDA_Z) {
284: if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
285: firstPoint = zs;
286: } else if (dir == DMDA_Y) {
287: if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
288: firstPoint = ys;
289: } else if (dir == DMDA_X) {
290: firstPoint = xs;
291: } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
293: PetscMalloc2(size, &firstPoints, size, &subgroupRanks);
294: MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
295: PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
296: for (p = 0; p < size; ++p) {
297: if (firstPoints[p] == firstPoint) {
298: subgroupRanks[subgroupSize++] = p;
299: PetscInfo1(da, "%D ", p);
300: }
301: }
302: PetscInfo(da, "\n");
303: MPI_Comm_group(comm, &group);
304: MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
305: MPI_Comm_create(comm, subgroup, subcomm);
306: MPI_Group_free(&subgroup);
307: MPI_Group_free(&group);
308: PetscFree2(firstPoints, subgroupRanks);
309: return(0);
310: }