Actual source code: dasub.c
1: /*
2: Code for manipulating distributed regular arrays in parallel.
3: */
5: #include <petsc/private/dmdaimpl.h>
7: /*@
8: DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a `x`, `y`, `z` point in the coordinates of the `DMDA`
10: Collective
12: Input Parameters:
13: + da - the `DMDA`
14: . x - the first physical coordinate
15: . y - the second physical coordinate
16: - z - the third physical coordinate
18: Output Parameters:
19: + II - the first logical coordinate (-1 on processes that do not contain that point)
20: . JJ - the second logical coordinate (-1 on processes that do not contain that point)
21: . KK - the third logical coordinate (-1 on processes that do not contain that point)
22: . X - (optional) the first coordinate of the located grid point
23: . Y - (optional) the second coordinate of the located grid point
24: - Z - (optional) the third coordinate of the located grid point
26: Level: advanced
28: Note:
29: All processors that share the `DMDA` must call this with the same coordinate value
31: .seealso: [](sec_struct), `DM`, `DMDA`
32: @*/
33: PetscErrorCode DMDAGetLogicalCoordinate(DM da, PetscScalar x, PetscScalar y, PetscScalar z, PetscInt *II, PetscInt *JJ, PetscInt *KK, PetscScalar *X, PetscScalar *Y, PetscScalar *Z)
34: {
35: Vec coors;
36: DM dacoors;
37: DMDACoor2d **c;
38: PetscInt i, j, xs, xm, ys, ym;
39: PetscReal d, D = PETSC_MAX_REAL, Dv;
40: PetscMPIInt rank, root;
42: PetscFunctionBegin;
43: PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 1d DMDA");
44: PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 3d DMDA");
46: *II = -1;
47: *JJ = -1;
49: PetscCall(DMGetCoordinateDM(da, &dacoors));
50: PetscCall(DMDAGetCorners(dacoors, &xs, &ys, NULL, &xm, &ym, NULL));
51: PetscCall(DMGetCoordinates(da, &coors));
52: PetscCall(DMDAVecGetArrayRead(dacoors, coors, &c));
53: for (j = ys; j < ys + ym; j++) {
54: for (i = xs; i < xs + xm; i++) {
55: d = PetscSqrtReal(PetscRealPart((c[j][i].x - x) * (c[j][i].x - x) + (c[j][i].y - y) * (c[j][i].y - y)));
56: if (d < D) {
57: D = d;
58: *II = i;
59: *JJ = j;
60: }
61: }
62: }
63: PetscCall(MPIU_Allreduce(&D, &Dv, 1, MPIU_REAL, MPIU_MIN, PetscObjectComm((PetscObject)da)));
64: if (D != Dv) {
65: *II = -1;
66: *JJ = -1;
67: rank = 0;
68: } else {
69: *X = c[*JJ][*II].x;
70: *Y = c[*JJ][*II].y;
71: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank));
72: rank++;
73: }
74: PetscCall(MPIU_Allreduce(&rank, &root, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)da)));
75: root--;
76: PetscCallMPI(MPI_Bcast(X, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da)));
77: PetscCallMPI(MPI_Bcast(Y, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da)));
78: PetscCall(DMDAVecRestoreArrayRead(dacoors, coors, &c));
79: PetscFunctionReturn(PETSC_SUCCESS);
80: }
82: /*@
83: DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a `DMDA` vector
85: Collective
87: Input Parameters:
88: + da - the `DMDA`
89: . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
90: - gp - global grid point number in this direction
92: Output Parameters:
93: + newvec - the new vector that can hold the values (size zero on all processes except MPI rank 0)
94: - scatter - the `VecScatter` that will map from the original vector to the ray
96: Level: advanced
98: Note:
99: All processors that share the `DMDA` must call this with the same `gp` value
101: .seealso: [](sec_struct), `DM`, `DMDA`, `DMDirection`, `Vec`, `VecScatter`
102: @*/
103: PetscErrorCode DMDAGetRay(DM da, DMDirection dir, PetscInt gp, Vec *newvec, VecScatter *scatter)
104: {
105: PetscMPIInt rank;
106: DM_DA *dd = (DM_DA *)da->data;
107: IS is;
108: AO ao;
109: Vec vec;
110: PetscInt *indices, i, j;
112: PetscFunctionBegin;
113: PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
114: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank));
115: PetscCall(DMDAGetAO(da, &ao));
116: if (rank == 0) {
117: if (da->dim == 1) {
118: if (dir == DM_X) {
119: PetscCall(PetscMalloc1(dd->w, &indices));
120: indices[0] = dd->w * gp;
121: for (i = 1; i < dd->w; ++i) indices[i] = indices[i - 1] + 1;
122: PetscCall(AOApplicationToPetsc(ao, dd->w, indices));
123: PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
124: PetscCall(VecSetBlockSize(*newvec, dd->w));
125: PetscCall(VecSetSizes(*newvec, dd->w, PETSC_DETERMINE));
126: PetscCall(VecSetType(*newvec, VECSEQ));
127: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is));
128: } else {
129: PetscCheck(dir != DM_Y, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
130: SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
131: }
132: } else {
133: if (dir == DM_Y) {
134: PetscCall(PetscMalloc1(dd->w * dd->M, &indices));
135: indices[0] = gp * dd->M * dd->w;
136: for (i = 1; i < dd->M * dd->w; i++) indices[i] = indices[i - 1] + 1;
138: PetscCall(AOApplicationToPetsc(ao, dd->M * dd->w, indices));
139: PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
140: PetscCall(VecSetBlockSize(*newvec, dd->w));
141: PetscCall(VecSetSizes(*newvec, dd->M * dd->w, PETSC_DETERMINE));
142: PetscCall(VecSetType(*newvec, VECSEQ));
143: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->M, indices, PETSC_OWN_POINTER, &is));
144: } else if (dir == DM_X) {
145: PetscCall(PetscMalloc1(dd->w * dd->N, &indices));
146: indices[0] = dd->w * gp;
147: for (j = 1; j < dd->w; j++) indices[j] = indices[j - 1] + 1;
148: for (i = 1; i < dd->N; i++) {
149: indices[i * dd->w] = indices[i * dd->w - 1] + dd->w * dd->M - dd->w + 1;
150: for (j = 1; j < dd->w; j++) indices[i * dd->w + j] = indices[i * dd->w + j - 1] + 1;
151: }
152: PetscCall(AOApplicationToPetsc(ao, dd->w * dd->N, indices));
153: PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
154: PetscCall(VecSetBlockSize(*newvec, dd->w));
155: PetscCall(VecSetSizes(*newvec, dd->N * dd->w, PETSC_DETERMINE));
156: PetscCall(VecSetType(*newvec, VECSEQ));
157: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->N, indices, PETSC_OWN_POINTER, &is));
158: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
159: }
160: } else {
161: PetscCall(VecCreateSeq(PETSC_COMM_SELF, 0, newvec));
162: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is));
163: }
164: PetscCall(DMGetGlobalVector(da, &vec));
165: PetscCall(VecScatterCreate(vec, is, *newvec, NULL, scatter));
166: PetscCall(DMRestoreGlobalVector(da, &vec));
167: PetscCall(ISDestroy(&is));
168: PetscFunctionReturn(PETSC_SUCCESS);
169: }
171: /*@C
172: DMDAGetProcessorSubset - Returns a communicator consisting only of the
173: processors in a `DMDA` that own a particular global x, y, or z grid point
174: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
176: Collective; No Fortran Support
178: Input Parameters:
179: + da - the `DMDA`
180: . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
181: - gp - global grid point number in this direction
183: Output Parameter:
184: . comm - new communicator
186: Level: advanced
188: Notes:
189: All processors that share the `DMDA` must call this with the same `gp` value
191: After use, `comm` should be freed with `MPI_Comm_free()`
193: This routine is particularly useful to compute boundary conditions
194: or other application-specific calculations that require manipulating
195: sets of data throughout a logical plane of grid points.
197: .seealso: [](sec_struct), `DM`, `DMDA`, `DMDirection`, `DM_X`, `DM_Y`, `DM_Z`, `DMDAGetProcessorSubsets()`
198: @*/
199: PetscErrorCode DMDAGetProcessorSubset(DM da, DMDirection dir, PetscInt gp, MPI_Comm *comm)
200: {
201: MPI_Group group, subgroup;
202: PetscInt i, ict, flag, *owners, xs, xm, ys, ym, zs, zm;
203: PetscMPIInt size, *ranks = NULL;
204: DM_DA *dd = (DM_DA *)da->data;
206: PetscFunctionBegin;
208: flag = 0;
209: PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm));
210: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)da), &size));
211: if (dir == DM_Z) {
212: PetscCheck(da->dim >= 3, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3");
213: PetscCheck(gp >= 0 && gp <= dd->P, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
214: if (gp >= zs && gp < zs + zm) flag = 1;
215: } else if (dir == DM_Y) {
216: PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1");
217: PetscCheck(gp >= 0 && gp <= dd->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
218: if (gp >= ys && gp < ys + ym) flag = 1;
219: } else if (dir == DM_X) {
220: PetscCheck(gp >= 0 && gp <= dd->M, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
221: if (gp >= xs && gp < xs + xm) flag = 1;
222: } else SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
224: PetscCall(PetscMalloc2(size, &owners, size, &ranks));
225: PetscCallMPI(MPI_Allgather(&flag, 1, MPIU_INT, owners, 1, MPIU_INT, PetscObjectComm((PetscObject)da)));
226: ict = 0;
227: PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); /* checkbadSource \n */
228: for (i = 0; i < size; i++) {
229: if (owners[i]) {
230: ranks[ict] = i;
231: ict++;
232: PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", i)); /* checkbadSource \n */
233: }
234: }
235: PetscCall(PetscInfo(da, "\n"));
236: PetscCallMPI(MPI_Comm_group(PetscObjectComm((PetscObject)da), &group));
237: PetscCallMPI(MPI_Group_incl(group, ict, ranks, &subgroup));
238: PetscCallMPI(MPI_Comm_create(PetscObjectComm((PetscObject)da), subgroup, comm));
239: PetscCallMPI(MPI_Group_free(&subgroup));
240: PetscCallMPI(MPI_Group_free(&group));
241: PetscCall(PetscFree2(owners, ranks));
242: PetscFunctionReturn(PETSC_SUCCESS);
243: }
245: /*@C
246: DMDAGetProcessorSubsets - Returns communicators consisting only of the
247: processors in a `DMDA` adjacent in a particular dimension,
248: corresponding to a logical plane in a 3D grid or a line in a 2D grid.
250: Collective; No Fortran Support
252: Input Parameters:
253: + da - the `DMDA`
254: - dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
256: Output Parameter:
257: . subcomm - new communicator
259: Level: advanced
261: Notes:
262: This routine is useful for distributing one-dimensional data in a tensor product grid.
264: After use, `comm` should be freed with `MPI_Comm_free()`
266: .seealso: [](sec_struct), `DM`, `DMDA`, `DMDirection`, `DMDAGetProcessorSubset()`, `DM_X`, `DM_Y`, `DM_Z`
267: @*/
268: PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
269: {
270: MPI_Comm comm;
271: MPI_Group group, subgroup;
272: PetscInt subgroupSize = 0;
273: PetscInt *firstPoints;
274: PetscMPIInt size, *subgroupRanks = NULL;
275: PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p;
277: PetscFunctionBegin;
279: PetscCall(PetscObjectGetComm((PetscObject)da, &comm));
280: PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm));
281: PetscCallMPI(MPI_Comm_size(comm, &size));
282: if (dir == DM_Z) {
283: PetscCheck(da->dim >= 3, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3");
284: firstPoint = zs;
285: } else if (dir == DM_Y) {
286: PetscCheck(da->dim != 1, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1");
287: firstPoint = ys;
288: } else if (dir == DM_X) {
289: firstPoint = xs;
290: } else SETERRQ(comm, PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
292: PetscCall(PetscMalloc2(size, &firstPoints, size, &subgroupRanks));
293: PetscCallMPI(MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm));
294: PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); /* checkbadSource \n */
295: for (p = 0; p < size; ++p) {
296: if (firstPoints[p] == firstPoint) {
297: subgroupRanks[subgroupSize++] = p;
298: PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", p)); /* checkbadSource \n */
299: }
300: }
301: PetscCall(PetscInfo(da, "\n"));
302: PetscCallMPI(MPI_Comm_group(comm, &group));
303: PetscCallMPI(MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup));
304: PetscCallMPI(MPI_Comm_create(comm, subgroup, subcomm));
305: PetscCallMPI(MPI_Group_free(&subgroup));
306: PetscCallMPI(MPI_Group_free(&group));
307: PetscCall(PetscFree2(firstPoints, subgroupRanks));
308: PetscFunctionReturn(PETSC_SUCCESS);
309: }