Actual source code: dasub.c


  2: /*
  3:   Code for manipulating distributed regular arrays in parallel.
  4: */

  6: #include <petsc/private/dmdaimpl.h>

  8: /*@
  9:    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA

 11:    Collective on da

 13:    Input Parameters:
 14: +  da - the distributed array
 15: .  x  - the first physical coordinate
 16: .  y  - the second physical coordinate
 17: -  z  - the third physical coordinate

 19:    Output Parameters:
 20: +  II - the first logical coordinate (-1 on processes that do not contain that point)
 21: .  JJ - the second logical coordinate (-1 on processes that do not contain that point)
 22: .  KK - the third logical coordinate (-1 on processes that do not contain that point)
 23: .  X  - (optional) the first coordinate of the located grid point
 24: .  Y  - (optional) the second coordinate of the located grid point
 25: -  Z  - (optional) the third coordinate of the located grid point

 27:    Level: advanced

 29:    Notes:
 30:    All processors that share the DMDA must call this with the same coordinate value

 32: @*/
 33: PetscErrorCode  DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
 34: {
 36:   Vec            coors;
 37:   DM             dacoors;
 38:   DMDACoor2d     **c;
 39:   PetscInt       i,j,xs,xm,ys,ym;
 40:   PetscReal      d,D = PETSC_MAX_REAL,Dv;
 41:   PetscMPIInt    rank,root;

 44:   if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
 45:   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");

 47:   *II = -1;
 48:   *JJ = -1;

 50:   DMGetCoordinateDM(da,&dacoors);
 51:   DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);
 52:   DMGetCoordinates(da,&coors);
 53:   DMDAVecGetArrayRead(dacoors,coors,&c);
 54:   for (j=ys; j<ys+ym; j++) {
 55:     for (i=xs; i<xs+xm; i++) {
 56:       d = PetscSqrtReal(PetscRealPart((c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y)));
 57:       if (d < D) {
 58:         D   = d;
 59:         *II = i;
 60:         *JJ = j;
 61:       }
 62:     }
 63:   }
 64:   MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));
 65:   if (D != Dv) {
 66:     *II  = -1;
 67:     *JJ  = -1;
 68:     rank = 0;
 69:   } else {
 70:     *X = c[*JJ][*II].x;
 71:     *Y = c[*JJ][*II].y;
 72:     MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
 73:     rank++;
 74:   }
 75:   MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));
 76:   root--;
 77:   MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
 78:   MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
 79:   DMDAVecRestoreArrayRead(dacoors,coors,&c);
 80:   return(0);
 81: }

 83: /*@
 84:    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector

 86:    Collective on DMDA

 88:    Input Parameters:
 89: +  da - the distributed array
 90: .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
 91: -  gp - global grid point number in this direction

 93:    Output Parameters:
 94: +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
 95: -  scatter - the VecScatter that will map from the original vector to the slice

 97:    Level: advanced

 99:    Notes:
100:    All processors that share the DMDA must call this with the same gp value

102: @*/
103: PetscErrorCode  DMDAGetRay(DM da,DMDirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
104: {
105:   PetscMPIInt    rank;
106:   DM_DA          *dd = (DM_DA*)da->data;
108:   IS             is;
109:   AO             ao;
110:   Vec            vec;
111:   PetscInt       *indices,i,j;

114:   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
115:   MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);
116:   DMDAGetAO(da, &ao);
117:   if (rank == 0) {
118:     if (da->dim == 1) {
119:       if (dir == DM_X) {
120:         PetscMalloc1(dd->w, &indices);
121:         indices[0] = dd->w*gp;
122:         for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
123:         AOApplicationToPetsc(ao, dd->w, indices);
124:         VecCreate(PETSC_COMM_SELF, newvec);
125:         VecSetBlockSize(*newvec, dd->w);
126:         VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);
127:         VecSetType(*newvec, VECSEQ);
128:         ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);
129:       } else if (dir == DM_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
130:       else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
131:     } else {
132:       if (dir == DM_Y) {
133:         PetscMalloc1(dd->w*dd->M,&indices);
134:         indices[0] = gp*dd->M*dd->w;
135:         for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;

137:         AOApplicationToPetsc(ao,dd->M*dd->w,indices);
138:         VecCreate(PETSC_COMM_SELF,newvec);
139:         VecSetBlockSize(*newvec,dd->w);
140:         VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);
141:         VecSetType(*newvec,VECSEQ);
142:         ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);
143:       } else if (dir == DM_X) {
144:         PetscMalloc1(dd->w*dd->N,&indices);
145:         indices[0] = dd->w*gp;
146:         for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
147:         for (i=1; i<dd->N; i++) {
148:           indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
149:           for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
150:         }
151:         AOApplicationToPetsc(ao,dd->w*dd->N,indices);
152:         VecCreate(PETSC_COMM_SELF,newvec);
153:         VecSetBlockSize(*newvec,dd->w);
154:         VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);
155:         VecSetType(*newvec,VECSEQ);
156:         ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);
157:       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDirection");
158:     }
159:   } else {
160:     VecCreateSeq(PETSC_COMM_SELF, 0, newvec);
161:     ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);
162:   }
163:   DMGetGlobalVector(da, &vec);
164:   VecScatterCreate(vec, is, *newvec, NULL, scatter);
165:   DMRestoreGlobalVector(da, &vec);
166:   ISDestroy(&is);
167:   return(0);
168: }

170: /*@C
171:    DMDAGetProcessorSubset - Returns a communicator consisting only of the
172:    processors in a DMDA that own a particular global x, y, or z grid point
173:    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).

175:    Collective on da

177:    Input Parameters:
178: +  da - the distributed array
179: .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
180: -  gp - global grid point number in this direction

182:    Output Parameter:
183: .  comm - new communicator

185:    Level: advanced

187:    Notes:
188:    All processors that share the DMDA must call this with the same gp value

190:    After use, comm should be freed with MPI_Comm_free()

192:    This routine is particularly useful to compute boundary conditions
193:    or other application-specific calculations that require manipulating
194:    sets of data throughout a logical plane of grid points.

196:    Not supported from Fortran

198: @*/
199: PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDirection dir,PetscInt gp,MPI_Comm *comm)
200: {
201:   MPI_Group      group,subgroup;
203:   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
204:   PetscMPIInt    size,*ranks = NULL;
205:   DM_DA          *dd = (DM_DA*)da->data;

209:   flag = 0;
210:   DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
211:   MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);
212:   if (dir == DM_Z) {
213:     if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
214:     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
215:     if (gp >= zs && gp < zs+zm) flag = 1;
216:   } else if (dir == DM_Y) {
217:     if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
218:     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
219:     if (gp >= ys && gp < ys+ym) flag = 1;
220:   } else if (dir == DM_X) {
221:     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
222:     if (gp >= xs && gp < xs+xm) flag = 1;
223:   } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

225:   PetscMalloc2(size,&owners,size,&ranks);
226:   MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));
227:   ict  = 0;
228:   PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
229:   for (i=0; i<size; i++) {
230:     if (owners[i]) {
231:       ranks[ict] = i; ict++;
232:       PetscInfo1(da,"%D ",i);
233:     }
234:   }
235:   PetscInfo(da,"\n");
236:   MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);
237:   MPI_Group_incl(group,ict,ranks,&subgroup);
238:   MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);
239:   MPI_Group_free(&subgroup);
240:   MPI_Group_free(&group);
241:   PetscFree2(owners,ranks);
242:   return(0);
243: }

245: /*@C
246:    DMDAGetProcessorSubsets - Returns communicators consisting only of the
247:    processors in a DMDA adjacent in a particular dimension,
248:    corresponding to a logical plane in a 3D grid or a line in a 2D grid.

250:    Collective on da

252:    Input Parameters:
253: +  da - the distributed array
254: -  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z

256:    Output Parameter:
257: .  subcomm - new communicator

259:    Level: advanced

261:    Notes:
262:    This routine is useful for distributing one-dimensional data in a tensor product grid.

264:    After use, comm should be freed with MPI_Comm_free()

266:    Not supported from Fortran

268: @*/
269: PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
270: {
271:   MPI_Comm       comm;
272:   MPI_Group      group, subgroup;
273:   PetscInt       subgroupSize = 0;
274:   PetscInt       *firstPoints;
275:   PetscMPIInt    size, *subgroupRanks = NULL;
276:   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;

281:   PetscObjectGetComm((PetscObject)da,&comm);
282:   DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
283:   MPI_Comm_size(comm, &size);
284:   if (dir == DM_Z) {
285:     if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
286:     firstPoint = zs;
287:   } else if (dir == DM_Y) {
288:     if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
289:     firstPoint = ys;
290:   } else if (dir == DM_X) {
291:     firstPoint = xs;
292:   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

294:   PetscMalloc2(size, &firstPoints, size, &subgroupRanks);
295:   MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
296:   PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
297:   for (p = 0; p < size; ++p) {
298:     if (firstPoints[p] == firstPoint) {
299:       subgroupRanks[subgroupSize++] = p;
300:       PetscInfo1(da, "%D ", p);
301:     }
302:   }
303:   PetscInfo(da, "\n");
304:   MPI_Comm_group(comm, &group);
305:   MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
306:   MPI_Comm_create(comm, subgroup, subcomm);
307:   MPI_Group_free(&subgroup);
308:   MPI_Group_free(&group);
309:   PetscFree2(firstPoints, subgroupRanks);
310:   return(0);
311: }