Actual source code: dasub.c

petsc-3.8.4 2018-03-24
Report Typos and Errors

  2: /*
  3:   Code for manipulating distributed regular arrays in parallel.
  4: */

  6:  #include <petsc/private/dmdaimpl.h>

  8: /*@
  9:    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA

 11:    Collective on DMDA

 13:    Input Parameters:
 14: +  da - the distributed array
 15: -  x,y,z - the physical coordinates

 17:    Output Parameters:
 18: +   II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
 19: -   X, Y, Z, - (optional) the coordinates of the located grid point

 21:    Level: advanced

 23:    Notes:
 24:    All processors that share the DMDA must call this with the same coordinate value

 26: .keywords: distributed array, get, processor subset
 27: @*/
 28: PetscErrorCode  DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
 29: {
 31:   Vec            coors;
 32:   DM             dacoors;
 33:   DMDACoor2d     **c;
 34:   PetscInt       i,j,xs,xm,ys,ym;
 35:   PetscReal      d,D = PETSC_MAX_REAL,Dv;
 36:   PetscMPIInt    rank,root;

 39:   if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
 40:   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");

 42:   *II = -1;
 43:   *JJ = -1;

 45:   DMGetCoordinateDM(da,&dacoors);
 46:   DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);
 47:   DMGetCoordinates(da,&coors);
 48:   DMDAVecGetArrayRead(dacoors,coors,&c);
 49:   for (j=ys; j<ys+ym; j++) {
 50:     for (i=xs; i<xs+xm; i++) {
 51:       d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) ));
 52:       if (d < D) {
 53:         D   = d;
 54:         *II = i;
 55:         *JJ = j;
 56:       }
 57:     }
 58:   }
 59:   MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));
 60:   if (D != Dv) {
 61:     *II  = -1;
 62:     *JJ  = -1;
 63:     rank = 0;
 64:   } else {
 65:     *X = c[*JJ][*II].x;
 66:     *Y = c[*JJ][*II].y;
 67:     MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
 68:     rank++;
 69:   }
 70:   MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));
 71:   root--;
 72:   MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
 73:   MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
 74:   DMDAVecRestoreArrayRead(dacoors,coors,&c);
 75:   return(0);
 76: }

 78: /*@
 79:    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector

 81:    Collective on DMDA

 83:    Input Parameters:
 84: +  da - the distributed array
 85: .  vec - the vector
 86: .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
 87: -  gp - global grid point number in this direction

 89:    Output Parameters:
 90: +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
 91: -  scatter - the VecScatter that will map from the original vector to the slice

 93:    Level: advanced

 95:    Notes:
 96:    All processors that share the DMDA must call this with the same gp value

 98: .keywords: distributed array, get, processor subset
 99: @*/
100: PetscErrorCode  DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
101: {
102:   PetscMPIInt    rank;
103:   DM_DA          *dd = (DM_DA*)da->data;
105:   IS             is;
106:   AO             ao;
107:   Vec            vec;
108:   PetscInt       *indices,i,j;

111:   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
112:   MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);
113:   DMDAGetAO(da, &ao);
114:   if (!rank) {
115:     if (da->dim == 1) {
116:       if (dir == DMDA_X) {
117:         PetscMalloc1(dd->w, &indices);
118:         indices[0] = dd->w*gp;
119:         for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
120:         AOApplicationToPetsc(ao, dd->w, indices);
121:         VecCreate(PETSC_COMM_SELF, newvec);
122:         VecSetBlockSize(*newvec, dd->w);
123:         VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);
124:         VecSetType(*newvec, VECSEQ);
125:         ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);
126:       } else if (dir == DMDA_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
127:       else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDADirection");
128:     } else {
129:       if (dir == DMDA_Y) {
130:         PetscMalloc1(dd->w*dd->M,&indices);
131:         indices[0] = gp*dd->M*dd->w;
132:         for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;

134:         AOApplicationToPetsc(ao,dd->M*dd->w,indices);
135:         VecCreate(PETSC_COMM_SELF,newvec);
136:         VecSetBlockSize(*newvec,dd->w);
137:         VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);
138:         VecSetType(*newvec,VECSEQ);
139:         ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);
140:       } else if (dir == DMDA_X) {
141:         PetscMalloc1(dd->w*dd->N,&indices);
142:         indices[0] = dd->w*gp;
143:         for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
144:         for (i=1; i<dd->N; i++) {
145:           indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
146:           for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
147:         }
148:         AOApplicationToPetsc(ao,dd->w*dd->N,indices);
149:         VecCreate(PETSC_COMM_SELF,newvec);
150:         VecSetBlockSize(*newvec,dd->w);
151:         VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);
152:         VecSetType(*newvec,VECSEQ);
153:         ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);
154:       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection");
155:     }
156:   } else {
157:     VecCreateSeq(PETSC_COMM_SELF, 0, newvec);
158:     ISCreateGeneral(PETSC_COMM_SELF, 0, 0, PETSC_COPY_VALUES, &is);
159:   }
160:   DMGetGlobalVector(da, &vec);
161:   VecScatterCreate(vec, is, *newvec, NULL, scatter);
162:   DMRestoreGlobalVector(da, &vec);
163:   ISDestroy(&is);
164:   return(0);
165: }

167: /*@C
168:    DMDAGetProcessorSubset - Returns a communicator consisting only of the
169:    processors in a DMDA that own a particular global x, y, or z grid point
170:    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).

172:    Collective on DMDA

174:    Input Parameters:
175: +  da - the distributed array
176: .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
177: -  gp - global grid point number in this direction

179:    Output Parameters:
180: .  comm - new communicator

182:    Level: advanced

184:    Notes:
185:    All processors that share the DMDA must call this with the same gp value

187:    After use, comm should be freed with MPI_Comm_free()

189:    This routine is particularly useful to compute boundary conditions
190:    or other application-specific calculations that require manipulating
191:    sets of data throughout a logical plane of grid points.

193: .keywords: distributed array, get, processor subset
194: @*/
195: PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
196: {
197:   MPI_Group      group,subgroup;
199:   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
200:   PetscMPIInt    size,*ranks = NULL;
201:   DM_DA          *dd = (DM_DA*)da->data;

205:   flag = 0;
206:   DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
207:   MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);
208:   if (dir == DMDA_Z) {
209:     if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
210:     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
211:     if (gp >= zs && gp < zs+zm) flag = 1;
212:   } else if (dir == DMDA_Y) {
213:     if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
214:     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
215:     if (gp >= ys && gp < ys+ym) flag = 1;
216:   } else if (dir == DMDA_X) {
217:     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
218:     if (gp >= xs && gp < xs+xm) flag = 1;
219:   } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

221:   PetscMalloc2(size,&owners,size,&ranks);
222:   MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));
223:   ict  = 0;
224:   PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
225:   for (i=0; i<size; i++) {
226:     if (owners[i]) {
227:       ranks[ict] = i; ict++;
228:       PetscInfo1(da,"%D ",i);
229:     }
230:   }
231:   PetscInfo(da,"\n");
232:   MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);
233:   MPI_Group_incl(group,ict,ranks,&subgroup);
234:   MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);
235:   MPI_Group_free(&subgroup);
236:   MPI_Group_free(&group);
237:   PetscFree2(owners,ranks);
238:   return(0);
239: }

241: /*@C
242:    DMDAGetProcessorSubsets - Returns communicators consisting only of the
243:    processors in a DMDA adjacent in a particular dimension,
244:    corresponding to a logical plane in a 3D grid or a line in a 2D grid.

246:    Collective on DMDA

248:    Input Parameters:
249: +  da - the distributed array
250: -  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z

252:    Output Parameters:
253: .  subcomm - new communicator

255:    Level: advanced

257:    Notes:
258:    This routine is useful for distributing one-dimensional data in a tensor product grid.

260:    After use, comm should be freed with MPI_Comm_free()

262: .keywords: distributed array, get, processor subset
263: @*/
264: PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
265: {
266:   MPI_Comm       comm;
267:   MPI_Group      group, subgroup;
268:   PetscInt       subgroupSize = 0;
269:   PetscInt       *firstPoints;
270:   PetscMPIInt    size, *subgroupRanks = NULL;
271:   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;

276:   PetscObjectGetComm((PetscObject)da,&comm);
277:   DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
278:   MPI_Comm_size(comm, &size);
279:   if (dir == DMDA_Z) {
280:     if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
281:     firstPoint = zs;
282:   } else if (dir == DMDA_Y) {
283:     if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
284:     firstPoint = ys;
285:   } else if (dir == DMDA_X) {
286:     firstPoint = xs;
287:   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

289:   PetscMalloc2(size, &firstPoints, size, &subgroupRanks);
290:   MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
291:   PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
292:   for (p = 0; p < size; ++p) {
293:     if (firstPoints[p] == firstPoint) {
294:       subgroupRanks[subgroupSize++] = p;
295:       PetscInfo1(da, "%D ", p);
296:     }
297:   }
298:   PetscInfo(da, "\n");
299:   MPI_Comm_group(comm, &group);
300:   MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
301:   MPI_Comm_create(comm, subgroup, subcomm);
302:   MPI_Group_free(&subgroup);
303:   MPI_Group_free(&group);
304:   PetscFree2(firstPoints, subgroupRanks);
305:   return(0);
306: }