Actual source code: dasub.c
petsc-3.4.5 2014-06-29
2: /*
3: Code for manipulating distributed regular arrays in parallel.
4: */
6: #include <petsc-private/dmdaimpl.h> /*I "petscdmda.h" I*/
10: /*@C
11: DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
13: Collective on DMDA
15: Input Parameters:
16: + da - the distributed array
17: - x,y,z - the physical coordinates
19: Output Parameters:
20: + II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
21: - X, Y, Z, - (optional) the coordinates of the located grid point
23: Level: advanced
25: Notes:
26: All processors that share the DMDA must call this with the same coordinate value
28: .keywords: distributed array, get, processor subset
29: @*/
30: PetscErrorCode DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
31: {
32: DM_DA *dd = (DM_DA*)da->data;
34: Vec coors;
35: DM dacoors;
36: DMDACoor2d **c;
37: PetscInt i,j,xs,xm,ys,ym;
38: PetscReal d,D = PETSC_MAX_REAL,Dv;
39: PetscMPIInt rank,root;
42: if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
43: if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
45: *II = -1;
46: *JJ = -1;
48: DMGetCoordinateDM(da,&dacoors);
49: DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);
50: DMGetCoordinates(da,&coors);
51: DMDAVecGetArray(dacoors,coors,&c);
52: for (j=ys; j<ys+ym; j++) {
53: for (i=xs; i<xs+xm; i++) {
54: d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) ));
55: if (d < D) {
56: D = d;
57: *II = i;
58: *JJ = j;
59: }
60: }
61: }
62: MPI_Allreduce(&D,&Dv,1,MPIU_REAL,MPI_MIN,PetscObjectComm((PetscObject)da));
63: if (D != Dv) {
64: *II = -1;
65: *JJ = -1;
66: rank = 0;
67: } else {
68: *X = c[*JJ][*II].x;
69: *Y = c[*JJ][*II].y;
70: MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
71: rank++;
72: }
73: MPI_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));
74: root--;
75: MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
76: MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
77: DMDAVecRestoreArray(dacoors,coors,&c);
78: return(0);
79: }
83: /*@C
84: DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
86: Collective on DMDA
88: Input Parameters:
89: + da - the distributed array
90: . vec - the vector
91: . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
92: - gp - global grid point number in this direction
94: Output Parameters:
95: + newvec - the new vector that can hold the values (size zero on all processes except process 0)
96: - scatter - the VecScatter that will map from the original vector to the slice
98: Level: advanced
100: Notes:
101: All processors that share the DMDA must call this with the same gp value
103: .keywords: distributed array, get, processor subset
104: @*/
105: PetscErrorCode DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
106: {
107: PetscMPIInt rank;
108: DM_DA *dd = (DM_DA*)da->data;
110: IS is;
111: AO ao;
112: Vec vec;
113: PetscInt *indices,i,j;
116: if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 1d DMDA");
117: if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 3d DMDA");
118: DMDAGetAO(da,&ao);
119: MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
120: if (!rank) {
121: if (dir == DMDA_Y) {
122: PetscMalloc(dd->w*dd->M*sizeof(PetscInt),&indices);
123: indices[0] = gp*dd->M*dd->w;
124: for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
126: AOApplicationToPetsc(ao,dd->M*dd->w,indices);
127: VecCreate(PETSC_COMM_SELF,newvec);
128: VecSetBlockSize(*newvec,dd->w);
129: VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);
130: VecSetType(*newvec,VECSEQ);
131: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);
132: } else if (dir == DMDA_X) {
133: PetscMalloc(dd->w*dd->N*sizeof(PetscInt),&indices);
134: indices[0] = dd->w*gp;
135: for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
136: for (i=1; i<dd->N; i++) {
137: indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
138: for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
139: }
140: AOApplicationToPetsc(ao,dd->w*dd->N,indices);
141: VecCreate(PETSC_COMM_SELF,newvec);
142: VecSetBlockSize(*newvec,dd->w);
143: VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);
144: VecSetType(*newvec,VECSEQ);
145: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);
146: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection");
147: } else {
148: VecCreateSeq(PETSC_COMM_SELF,0,newvec);
149: ISCreateGeneral(PETSC_COMM_SELF,0,0,PETSC_COPY_VALUES,&is);
150: }
151: DMGetGlobalVector(da,&vec);
152: VecScatterCreate(vec,is,*newvec,NULL,scatter);
153: DMRestoreGlobalVector(da,&vec);
154: ISDestroy(&is);
155: return(0);
156: }
160: /*@C
161: DMDAGetProcessorSubset - Returns a communicator consisting only of the
162: processors in a DMDA that own a particular global x, y, or z grid point
163: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
165: Collective on DMDA
167: Input Parameters:
168: + da - the distributed array
169: . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
170: - gp - global grid point number in this direction
172: Output Parameters:
173: . comm - new communicator
175: Level: advanced
177: Notes:
178: All processors that share the DMDA must call this with the same gp value
180: This routine is particularly useful to compute boundary conditions
181: or other application-specific calculations that require manipulating
182: sets of data throughout a logical plane of grid points.
184: .keywords: distributed array, get, processor subset
185: @*/
186: PetscErrorCode DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
187: {
188: MPI_Group group,subgroup;
190: PetscInt i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
191: PetscMPIInt size,*ranks = NULL;
192: DM_DA *dd = (DM_DA*)da->data;
196: flag = 0;
197: DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
198: MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);
199: if (dir == DMDA_Z) {
200: if (dd->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
201: if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
202: if (gp >= zs && gp < zs+zm) flag = 1;
203: } else if (dir == DMDA_Y) {
204: if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
205: if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
206: if (gp >= ys && gp < ys+ym) flag = 1;
207: } else if (dir == DMDA_X) {
208: if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
209: if (gp >= xs && gp < xs+xm) flag = 1;
210: } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
212: PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);
213: MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));
214: ict = 0;
215: PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);
216: for (i=0; i<size; i++) {
217: if (owners[i]) {
218: ranks[ict] = i; ict++;
219: PetscInfo1(da,"%D ",i);
220: }
221: }
222: PetscInfo(da,"\n");
223: MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);
224: MPI_Group_incl(group,ict,ranks,&subgroup);
225: MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);
226: MPI_Group_free(&subgroup);
227: MPI_Group_free(&group);
228: PetscFree2(owners,ranks);
229: return(0);
230: }
234: /*@C
235: DMDAGetProcessorSubsets - Returns communicators consisting only of the
236: processors in a DMDA adjacent in a particular dimension,
237: corresponding to a logical plane in a 3D grid or a line in a 2D grid.
239: Collective on DMDA
241: Input Parameters:
242: + da - the distributed array
243: - dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
245: Output Parameters:
246: . subcomm - new communicator
248: Level: advanced
250: Notes:
251: This routine is useful for distributing one-dimensional data in a tensor product grid.
253: .keywords: distributed array, get, processor subset
254: @*/
255: PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
256: {
257: MPI_Comm comm;
258: MPI_Group group, subgroup;
259: PetscInt subgroupSize = 0;
260: PetscInt *firstPoints;
261: PetscMPIInt size, *subgroupRanks = NULL;
262: PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p;
264: DM_DA *dd = (DM_DA*)da->data;
268: PetscObjectGetComm((PetscObject)da,&comm);
269: DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
270: MPI_Comm_size(comm, &size);
271: if (dir == DMDA_Z) {
272: if (dd->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
273: firstPoint = zs;
274: } else if (dir == DMDA_Y) {
275: if (dd->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
276: firstPoint = ys;
277: } else if (dir == DMDA_X) {
278: firstPoint = xs;
279: } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
281: PetscMalloc2(size, PetscInt, &firstPoints, size, PetscMPIInt, &subgroupRanks);
282: MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
283: PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);
284: for (p = 0; p < size; ++p) {
285: if (firstPoints[p] == firstPoint) {
286: subgroupRanks[subgroupSize++] = p;
287: PetscInfo1(da, "%D ", p);
288: }
289: }
290: PetscInfo(da, "\n");
291: MPI_Comm_group(comm, &group);
292: MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
293: MPI_Comm_create(comm, subgroup, subcomm);
294: MPI_Group_free(&subgroup);
295: MPI_Group_free(&group);
296: PetscFree2(firstPoints, subgroupRanks);
297: return(0);
298: }