Actual source code: da1.c
petsc-3.6.1 2015-08-06
2: /*
3: Code for manipulating distributed regular 1d arrays in parallel.
4: This file was created by Peter Mell 6/30/95
5: */
7: #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/
9: #include <petscdraw.h>
12: PetscErrorCode DMView_DA_1d(DM da,PetscViewer viewer)
13: {
15: PetscMPIInt rank;
16: PetscBool iascii,isdraw,isbinary;
17: DM_DA *dd = (DM_DA*)da->data;
18: #if defined(PETSC_HAVE_MATLAB_ENGINE)
19: PetscBool ismatlab;
20: #endif
23: MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
25: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
26: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
27: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
28: #if defined(PETSC_HAVE_MATLAB_ENGINE)
29: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERMATLAB,&ismatlab);
30: #endif
31: if (iascii) {
32: PetscViewerFormat format;
34: PetscViewerGetFormat(viewer, &format);
35: if (format != PETSC_VIEWER_ASCII_VTK && format != PETSC_VIEWER_ASCII_VTK_CELL) {
36: DMDALocalInfo info;
37: DMDAGetLocalInfo(da,&info);
38: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
39: PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %D m %D w %D s %D\n",rank,dd->M,dd->m,dd->w,dd->s);
40: PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %D %D\n",info.xs,info.xs+info.xm);
41: PetscViewerFlush(viewer);
42: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
43: } else {
44: DMView_DA_VTK(da, viewer);
45: }
46: } else if (isdraw) {
47: PetscDraw draw;
48: double ymin = -1,ymax = 1,xmin = -1,xmax = dd->M,x;
49: PetscInt base;
50: char node[10];
51: PetscBool isnull;
53: PetscViewerDrawGetDraw(viewer,0,&draw);
54: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
56: PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);
57: PetscDrawSynchronizedClear(draw);
59: /* first processor draws all node lines */
60: if (!rank) {
61: PetscInt xmin_tmp;
62: ymin = 0.0; ymax = 0.3;
64: for (xmin_tmp=0; xmin_tmp < dd->M; xmin_tmp++) {
65: PetscDrawLine(draw,(double)xmin_tmp,ymin,(double)xmin_tmp,ymax,PETSC_DRAW_BLACK);
66: }
68: xmin = 0.0; xmax = dd->M - 1;
69: PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);
70: PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_BLACK);
71: }
73: PetscDrawSynchronizedFlush(draw);
74: PetscDrawPause(draw);
76: /* draw my box */
77: ymin = 0; ymax = 0.3; xmin = dd->xs / dd->w; xmax = (dd->xe / dd->w) - 1;
78: PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);
79: PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);
80: PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);
81: PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);
83: /* Put in index numbers */
84: base = dd->base / dd->w;
85: for (x=xmin; x<=xmax; x++) {
86: sprintf(node,"%d",(int)base++);
87: PetscDrawString(draw,x,ymin,PETSC_DRAW_RED,node);
88: }
90: PetscDrawSynchronizedFlush(draw);
91: PetscDrawPause(draw);
92: } else if (isbinary) {
93: DMView_DA_Binary(da,viewer);
94: #if defined(PETSC_HAVE_MATLAB_ENGINE)
95: } else if (ismatlab) {
96: DMView_DA_Matlab(da,viewer);
97: #endif
98: }
99: return(0);
100: }
105: PetscErrorCode DMSetUp_DA_1D(DM da)
106: {
107: DM_DA *dd = (DM_DA*)da->data;
108: const PetscInt M = dd->M;
109: const PetscInt dof = dd->w;
110: const PetscInt s = dd->s;
111: const PetscInt sDist = s; /* stencil distance in points */
112: const PetscInt *lx = dd->lx;
113: DMBoundaryType bx = dd->bx;
114: MPI_Comm comm;
115: Vec local, global;
116: VecScatter gtol;
117: IS to, from;
118: PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE;
119: PetscMPIInt rank, size;
120: PetscInt i,*idx,nn,left,xs,xe,x,Xs,Xe,start,m,IXs,IXe;
121: PetscErrorCode ierr;
124: PetscObjectGetComm((PetscObject) da, &comm);
125: MPI_Comm_size(comm,&size);
126: MPI_Comm_rank(comm,&rank);
128: dd->p = 1;
129: dd->n = 1;
130: dd->m = size;
131: m = dd->m;
133: if (s > 0) {
134: /* if not communicating data then should be ok to have nothing on some processes */
135: if (M < m) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"More processes than data points! %D %D",m,M);
136: if ((M-1) < s && size > 1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Array is too small for stencil! %D %D",M-1,s);
137: }
139: /*
140: Determine locally owned region
141: xs is the first local node number, x is the number of local nodes
142: */
143: if (!lx) {
144: PetscMalloc1(m, &dd->lx);
145: PetscOptionsGetBool(NULL,"-da_partition_blockcomm",&flg1,NULL);
146: PetscOptionsGetBool(NULL,"-da_partition_nodes_at_end",&flg2,NULL);
147: if (flg1) { /* Block Comm type Distribution */
148: xs = rank*M/m;
149: x = (rank + 1)*M/m - xs;
150: } else if (flg2) { /* The odd nodes are evenly distributed across last nodes */
151: x = (M + rank)/m;
152: if (M/m == x) xs = rank*x;
153: else xs = rank*(x-1) + (M+rank)%(x*m);
154: } else { /* The odd nodes are evenly distributed across the first k nodes */
155: /* Regular PETSc Distribution */
156: x = M/m + ((M % m) > rank);
157: if (rank >= (M % m)) xs = (rank * (PetscInt)(M/m) + M % m);
158: else xs = rank * (PetscInt)(M/m) + rank;
159: }
160: MPI_Allgather(&xs,1,MPIU_INT,dd->lx,1,MPIU_INT,comm);
161: for (i=0; i<m-1; i++) dd->lx[i] = dd->lx[i+1] - dd->lx[i];
162: dd->lx[m-1] = M - dd->lx[m-1];
163: } else {
164: x = lx[rank];
165: xs = 0;
166: for (i=0; i<rank; i++) xs += lx[i];
167: /* verify that data user provided is consistent */
168: left = xs;
169: for (i=rank; i<size; i++) left += lx[i];
170: if (left != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Sum of lx across processors not equal to M %D %D",left,M);
171: }
173: /*
174: check if the scatter requires more than one process neighbor or wraps around
175: the domain more than once
176: */
177: if ((x < s) & ((M > 1) | (bx == DM_BOUNDARY_PERIODIC))) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local x-width of domain x %D is smaller than stencil width s %D",x,s);
179: xe = xs + x;
181: /* determine ghost region (Xs) and region scattered into (IXs) */
182: if (xs-sDist > 0) {
183: Xs = xs - sDist;
184: IXs = xs - sDist;
185: } else {
186: if (bx) Xs = xs - sDist;
187: else Xs = 0;
188: IXs = 0;
189: }
190: if (xe+sDist <= M) {
191: Xe = xe + sDist;
192: IXe = xe + sDist;
193: } else {
194: if (bx) Xe = xe + sDist;
195: else Xe = M;
196: IXe = M;
197: }
199: if (bx == DM_BOUNDARY_PERIODIC || bx == DM_BOUNDARY_MIRROR) {
200: Xs = xs - sDist;
201: Xe = xe + sDist;
202: IXs = xs - sDist;
203: IXe = xe + sDist;
204: }
206: /* allocate the base parallel and sequential vectors */
207: dd->Nlocal = dof*x;
208: VecCreateMPIWithArray(comm,dof,dd->Nlocal,PETSC_DECIDE,NULL,&global);
209: dd->nlocal = dof*(Xe-Xs);
210: VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->nlocal,NULL,&local);
212: VecGetOwnershipRange(global,&start,NULL);
214: /* Create Global to Local Vector Scatter Context */
215: /* global to local must retrieve ghost points */
216: ISCreateStride(comm,dof*(IXe-IXs),dof*(IXs-Xs),1,&to);
218: PetscMalloc1(x+2*sDist,&idx);
219: PetscLogObjectMemory((PetscObject)da,(x+2*(sDist))*sizeof(PetscInt));
221: for (i=0; i<IXs-Xs; i++) idx[i] = -1; /* prepend with -1s if needed for ghosted case*/
223: nn = IXs-Xs;
224: if (bx == DM_BOUNDARY_PERIODIC) { /* Handle all cases with periodic first */
225: for (i=0; i<sDist; i++) { /* Left ghost points */
226: if ((xs-sDist+i)>=0) idx[nn++] = xs-sDist+i;
227: else idx[nn++] = M+(xs-sDist+i);
228: }
230: for (i=0; i<x; i++) idx [nn++] = xs + i; /* Non-ghost points */
232: for (i=0; i<sDist; i++) { /* Right ghost points */
233: if ((xe+i)<M) idx [nn++] = xe+i;
234: else idx [nn++] = (xe+i) - M;
235: }
236: } else if (bx == DM_BOUNDARY_MIRROR) { /* Handle all cases with periodic first */
237: for (i=0; i<(sDist); i++) { /* Left ghost points */
238: if ((xs-sDist+i)>=0) idx[nn++] = xs-sDist+i;
239: else idx[nn++] = sDist - i;
240: }
242: for (i=0; i<x; i++) idx [nn++] = xs + i; /* Non-ghost points */
244: for (i=0; i<(sDist); i++) { /* Right ghost points */
245: if ((xe+i)<M) idx[nn++] = xe+i;
246: else idx[nn++] = M - (i + 1);
247: }
248: } else { /* Now do all cases with no periodicity */
249: if (0 <= xs-sDist) {
250: for (i=0; i<sDist; i++) idx[nn++] = xs - sDist + i;
251: } else {
252: for (i=0; i<xs; i++) idx[nn++] = i;
253: }
255: for (i=0; i<x; i++) idx [nn++] = xs + i;
257: if ((xe+sDist)<=M) {
258: for (i=0; i<sDist; i++) idx[nn++]=xe+i;
259: } else {
260: for (i=xe; i<M; i++) idx[nn++]=i;
261: }
262: }
264: ISCreateBlock(comm,dof,nn-IXs+Xs,&idx[IXs-Xs],PETSC_USE_POINTER,&from);
265: VecScatterCreate(global,from,local,to,>ol);
266: PetscLogObjectParent((PetscObject)da,(PetscObject)gtol);
267: ISDestroy(&to);
268: ISDestroy(&from);
269: VecDestroy(&local);
270: VecDestroy(&global);
272: dd->xs = dof*xs; dd->xe = dof*xe; dd->ys = 0; dd->ye = 1; dd->zs = 0; dd->ze = 1;
273: dd->Xs = dof*Xs; dd->Xe = dof*Xe; dd->Ys = 0; dd->Ye = 1; dd->Zs = 0; dd->Ze = 1;
275: dd->gtol = gtol;
276: dd->base = dof*xs;
277: da->ops->view = DMView_DA_1d;
279: /*
280: Set the local to global ordering in the global vector, this allows use
281: of VecSetValuesLocal().
282: */
283: for (i=0; i<Xe-IXe; i++) idx[nn++] = -1; /* pad with -1s if needed for ghosted case*/
285: ISLocalToGlobalMappingCreate(comm,dof,nn,idx,PETSC_OWN_POINTER,&da->ltogmap);
286: PetscLogObjectParent((PetscObject)da,(PetscObject)da->ltogmap);
288: return(0);
289: }
294: /*@C
295: DMDACreate1d - Creates an object that will manage the communication of one-dimensional
296: regular array data that is distributed across some processors.
298: Collective on MPI_Comm
300: Input Parameters:
301: + comm - MPI communicator
302: . bx - type of ghost cells at the boundary the array should have, if any. Use
303: DM_BOUNDARY_NONE, DM_BOUNDARY_GHOSTED, or DM_BOUNDARY_PERIODIC.
304: . M - global dimension of the array (use -M to indicate that it may be set to a different value
305: from the command line with -da_grid_x <M>)
306: . dof - number of degrees of freedom per node
307: . s - stencil width
308: - lx - array containing number of nodes in the X direction on each processor,
309: or NULL. If non-null, must be of length as the number of processes in the MPI_Comm.
311: Output Parameter:
312: . da - the resulting distributed array object
314: Options Database Key:
315: + -dm_view - Calls DMView() at the conclusion of DMDACreate1d()
316: . -da_grid_x <nx> - number of grid points in x direction; can set if M < 0
317: . -da_refine_x <rx> - refinement factor
318: - -da_refine <n> - refine the DMDA n times before creating it, if M < 0
320: Level: beginner
322: Notes:
323: The array data itself is NOT stored in the DMDA, it is stored in Vec objects;
324: The appropriate vector objects can be obtained with calls to DMCreateGlobalVector()
325: and DMCreateLocalVector() and calls to VecDuplicate() if more are needed.
327: .keywords: distributed array, create, one-dimensional
329: .seealso: DMDestroy(), DMView(), DMDACreate2d(), DMDACreate3d(), DMGlobalToLocalBegin(), DMDASetRefinementFactor(),
330: DMGlobalToLocalEnd(), DMLocalToGlobalBegin(), DMLocalToLocalBegin(), DMLocalToLocalEnd(), DMDAGetRefinementFactor(),
331: DMDAGetInfo(), DMCreateGlobalVector(), DMCreateLocalVector(), DMDACreateNaturalVector(), DMLoad(), DMDAGetOwnershipRanges()
333: @*/
334: PetscErrorCode DMDACreate1d(MPI_Comm comm, DMBoundaryType bx, PetscInt M, PetscInt dof, PetscInt s, const PetscInt lx[], DM *da)
335: {
337: PetscMPIInt size;
340: DMDACreate(comm, da);
341: DMSetDimension(*da, 1);
342: DMDASetSizes(*da, M, 1, 1);
343: MPI_Comm_size(comm, &size);
344: DMDASetNumProcs(*da, size, PETSC_DECIDE, PETSC_DECIDE);
345: DMDASetBoundaryType(*da, bx, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE);
346: DMDASetDof(*da, dof);
347: DMDASetStencilWidth(*da, s);
348: DMDASetOwnershipRanges(*da, lx, NULL, NULL);
349: /* This violates the behavior for other classes, but right now users expect negative dimensions to be handled this way */
350: DMSetFromOptions(*da);
351: DMSetUp(*da);
352: return(0);
353: }