Actual source code: da1.c
petsc-3.3-p7 2013-05-11
2: /*
3: Code for manipulating distributed regular 1d arrays in parallel.
4: This file was created by Peter Mell 6/30/95
5: */
7: #include <petsc-private/daimpl.h> /*I "petscdmda.h" I*/
9: const char *DMDABoundaryTypes[] = {"BOUNDARY_NONE","BOUNDARY_GHOSTED","BOUNDARY_PERIODIC","DMDA_",0};
13: PetscErrorCode DMView_DA_1d(DM da,PetscViewer viewer)
14: {
16: PetscMPIInt rank;
17: PetscBool iascii,isdraw,isbinary;
18: DM_DA *dd = (DM_DA*)da->data;
19: #if defined(PETSC_HAVE_MATLAB_ENGINE)
20: PetscBool ismatlab;
21: #endif
24: MPI_Comm_rank(((PetscObject)da)->comm,&rank);
26: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
27: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
28: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
29: #if defined(PETSC_HAVE_MATLAB_ENGINE)
30: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERMATLAB,&ismatlab);
31: #endif
32: if (iascii) {
33: PetscViewerFormat format;
35: PetscViewerGetFormat(viewer, &format);
36: if (format != PETSC_VIEWER_ASCII_VTK && format != PETSC_VIEWER_ASCII_VTK_CELL) {
37: DMDALocalInfo info;
38: DMDAGetLocalInfo(da,&info);
39: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
40: PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %D m %D w %D s %D\n",rank,dd->M,dd->m,dd->w,dd->s);
41: PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %D %D\n",info.xs,info.xs+info.xm);
42: PetscViewerFlush(viewer);
43: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
44: } else {
45: DMView_DA_VTK(da, viewer);
46: }
47: } else if (isdraw) {
48: PetscDraw draw;
49: double ymin = -1,ymax = 1,xmin = -1,xmax = dd->M,x;
50: PetscInt base;
51: char node[10];
52: PetscBool isnull;
54: PetscViewerDrawGetDraw(viewer,0,&draw);
55: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
57: PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);
58: PetscDrawSynchronizedClear(draw);
60: /* first processor draws all node lines */
61: if (!rank) {
62: PetscInt xmin_tmp;
63: ymin = 0.0; ymax = 0.3;
64:
65: /* ADIC doesn't like doubles in a for loop */
66: for (xmin_tmp =0; xmin_tmp < dd->M; xmin_tmp++) {
67: PetscDrawLine(draw,(double)xmin_tmp,ymin,(double)xmin_tmp,ymax,PETSC_DRAW_BLACK);
68: }
70: xmin = 0.0; xmax = dd->M - 1;
71: PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);
72: PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_BLACK);
73: }
75: PetscDrawSynchronizedFlush(draw);
76: PetscDrawPause(draw);
78: /* draw my box */
79: ymin = 0; ymax = 0.3; xmin = dd->xs / dd->w; xmax = (dd->xe / dd->w) - 1;
80: PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);
81: PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);
82: PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);
83: PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);
85: /* Put in index numbers */
86: base = dd->base / dd->w;
87: for (x=xmin; x<=xmax; x++) {
88: sprintf(node,"%d",(int)base++);
89: PetscDrawString(draw,x,ymin,PETSC_DRAW_RED,node);
90: }
92: PetscDrawSynchronizedFlush(draw);
93: PetscDrawPause(draw);
94: } else if (isbinary){
95: DMView_DA_Binary(da,viewer);
96: #if defined(PETSC_HAVE_MATLAB_ENGINE)
97: } else if (ismatlab) {
98: DMView_DA_Matlab(da,viewer);
99: #endif
100: } else SETERRQ1(((PetscObject)da)->comm,PETSC_ERR_SUP,"Viewer type %s not supported for DMDA 1d",((PetscObject)viewer)->type_name);
101: return(0);
102: }
106: /*
107: Processes command line options to determine if/how a DMDA
108: is to be viewed. Called by DMDACreateXX()
109: */
110: PetscErrorCode DMView_DA_Private(DM da)
111: {
113: PetscBool flg1 = PETSC_FALSE;
114: PetscViewer view;
117: PetscOptionsBegin(((PetscObject)da)->comm,((PetscObject)da)->prefix,"DMDA viewing options","DMDA");
118: PetscOptionsBool("-da_view","Print information about the DMDA's distribution","DMView",PETSC_FALSE,&flg1,PETSC_NULL);
119: if (flg1) {
120: PetscViewerASCIIGetStdout(((PetscObject)da)->comm,&view);
121: DMView(da,view);
122: }
123: flg1 = PETSC_FALSE;
124: PetscOptionsBool("-da_view_draw","Draw how the DMDA is distributed","DMView",PETSC_FALSE,&flg1,PETSC_NULL);
125: if (flg1) {DMView(da,PETSC_VIEWER_DRAW_(((PetscObject)da)->comm));}
126: PetscOptionsEnd();
127: return(0);
128: }
132: PetscErrorCode DMSetUp_DA_1D(DM da)
133: {
134: DM_DA *dd = (DM_DA*)da->data;
135: const PetscInt M = dd->M;
136: const PetscInt dof = dd->w;
137: const PetscInt s = dd->s;
138: const PetscInt sDist = s*dof; /* absolute stencil distance */
139: const PetscInt *lx = dd->lx;
140: const DMDABoundaryType bx = dd->bx;
141: MPI_Comm comm;
142: Vec local, global;
143: VecScatter ltog, gtol;
144: IS to, from;
145: PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE;
146: PetscMPIInt rank, size;
147: PetscInt i,*idx,nn,left,xs,xe,x,Xs,Xe,start,end,m,IXs,IXe;
148: PetscErrorCode ierr;
151: if (dof < 1) SETERRQ1(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have 1 or more degrees of freedom per node: %D",dof);
152: if (s < 0) SETERRQ1(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Stencil width cannot be negative: %D",s);
154: dd->dim = 1;
155: PetscMalloc(dof*sizeof(char*),&dd->fieldname);
156: PetscMemzero(dd->fieldname,dof*sizeof(char*));
157: PetscObjectGetComm((PetscObject) da, &comm);
158: MPI_Comm_size(comm,&size);
159: MPI_Comm_rank(comm,&rank);
161: dd->m = size;
162: m = dd->m;
164: if (s > 0) {
165: /* if not communicating data then should be ok to have nothing on some processes */
166: if (M < m) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"More processes than data points! %D %D",m,M);
167: if ((M-1) < s) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Array is too small for stencil! %D %D",M-1,s);
168: }
170: /*
171: Determine locally owned region
172: xs is the first local node number, x is the number of local nodes
173: */
174: if (!lx) {
175: PetscMalloc(m*sizeof(PetscInt), &dd->lx);
176: PetscOptionsGetBool(PETSC_NULL,"-da_partition_blockcomm",&flg1,PETSC_NULL);
177: PetscOptionsGetBool(PETSC_NULL,"-da_partition_nodes_at_end",&flg2,PETSC_NULL);
178: if (flg1) { /* Block Comm type Distribution */
179: xs = rank*M/m;
180: x = (rank + 1)*M/m - xs;
181: } else if (flg2) { /* The odd nodes are evenly distributed across last nodes */
182: x = (M + rank)/m;
183: if (M/m == x) { xs = rank*x; }
184: else { xs = rank*(x-1) + (M+rank)%(x*m); }
185: } else { /* The odd nodes are evenly distributed across the first k nodes */
186: /* Regular PETSc Distribution */
187: x = M/m + ((M % m) > rank);
188: if (rank >= (M % m)) {xs = (rank * (PetscInt)(M/m) + M % m);}
189: else {xs = rank * (PetscInt)(M/m) + rank;}
190: }
191: MPI_Allgather(&xs,1,MPIU_INT,dd->lx,1,MPIU_INT,comm);
192: for (i=0; i<m-1; i++) dd->lx[i] = dd->lx[i+1] - dd->lx[i];
193: dd->lx[m-1] = M - dd->lx[m-1];
194: } else {
195: x = lx[rank];
196: xs = 0;
197: for (i=0; i<rank; i++) {
198: xs += lx[i];
199: }
200: /* verify that data user provided is consistent */
201: left = xs;
202: for (i=rank; i<size; i++) {
203: left += lx[i];
204: }
205: if (left != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Sum of lx across processors not equal to M %D %D",left,M);
206: }
208: /*
209: check if the scatter requires more than one process neighbor or wraps around
210: the domain more than once
211: */
212: if ((x < s) & ((M > 1) | (bx == DMDA_BOUNDARY_PERIODIC))) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local x-width of domain x %D is smaller than stencil width s %D",x,s);
214: /* From now on x,xs,xe,Xs,Xe are the exact location in the array */
215: x *= dof;
216: xs *= dof;
217: xe = xs + x;
219: /* determine ghost region */
220: if (bx) {
221: Xs = xs - sDist;
222: Xe = xe + sDist;
223: } else {
224: if ((xs-sDist) >= 0) Xs = xs-sDist; else Xs = 0;
225: if ((xe+sDist) <= M*dof) Xe = xe+sDist; else Xe = M*dof;
226: }
228: if (bx == DMDA_BOUNDARY_PERIODIC) {
229: IXs = xs - sDist;
230: IXe = xe + sDist;
231: } else {
232: if ((xs-sDist) >= 0) IXs = xs-sDist; else IXs = 0;
233: if ((xe+sDist) <= M*dof) IXe = xe+sDist; else IXe = M*dof;
234: }
236: /* allocate the base parallel and sequential vectors */
237: dd->Nlocal = x;
238: VecCreateMPIWithArray(comm,dof,dd->Nlocal,PETSC_DECIDE,0,&global);
239: dd->nlocal = (Xe-Xs);
240: VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->nlocal,0,&local);
242: /* Create Local to Global Vector Scatter Context */
243: /* local to global inserts non-ghost point region into global */
244: VecGetOwnershipRange(global,&start,&end);
245: ISCreateStride(comm,x,start,1,&to);
246: ISCreateStride(comm,x,xs-Xs,1,&from);
247: VecScatterCreate(local,from,global,to,<og);
248: PetscLogObjectParent(da,ltog);
249: ISDestroy(&from);
250: ISDestroy(&to);
252: /* Create Global to Local Vector Scatter Context */
253: /* global to local must retrieve ghost points */
254: ISCreateStride(comm,(IXe-IXs),IXs-Xs,1,&to);
256: PetscMalloc((x+2*sDist)*sizeof(PetscInt),&idx);
257: PetscLogObjectMemory(da,(x+2*sDist)*sizeof(PetscInt));
259: for (i=0; i<IXs-Xs; i++) {idx[i] = -1; } /* prepend with -1s if needed for ghosted case*/
261: nn = IXs-Xs;
262: if (bx == DMDA_BOUNDARY_PERIODIC) { /* Handle all cases with wrap first */
263: for (i=0; i<sDist; i++) { /* Left ghost points */
264: if ((xs-sDist+i)>=0) { idx[nn++] = xs-sDist+i;}
265: else { idx[nn++] = M*dof+(xs-sDist+i);}
266: }
268: for (i=0; i<x; i++) { idx [nn++] = xs + i;} /* Non-ghost points */
270: for (i=0; i<sDist; i++) { /* Right ghost points */
271: if ((xe+i)<M*dof) { idx [nn++] = xe+i; }
272: else { idx [nn++] = (xe+i) - M*dof;}
273: }
274: } else { /* Now do all cases with no wrapping */
275: if (sDist <= xs) {for (i=0; i<sDist; i++) {idx[nn++] = xs - sDist + i;}}
276: else {for (i=0; i<xs; i++) {idx[nn++] = i;}}
278: for (i=0; i<x; i++) { idx [nn++] = xs + i;}
280: if ((xe+sDist)<=M*dof) {for (i=0; i<sDist; i++) {idx[nn++]=xe+i;}}
281: else {for (i=xe; i<(M*dof); i++) {idx[nn++]=i;}}
282: }
284: ISCreateGeneral(comm,nn-IXs+Xs,&idx[IXs-Xs],PETSC_COPY_VALUES,&from);
285: VecScatterCreate(global,from,local,to,>ol);
286: PetscLogObjectParent(da,to);
287: PetscLogObjectParent(da,from);
288: PetscLogObjectParent(da,gtol);
289: ISDestroy(&to);
290: ISDestroy(&from);
291: VecDestroy(&local);
292: VecDestroy(&global);
294: dd->xs = xs; dd->xe = xe; dd->ys = 0; dd->ye = 1; dd->zs = 0; dd->ze = 1;
295: dd->Xs = Xs; dd->Xe = Xe; dd->Ys = 0; dd->Ye = 1; dd->Zs = 0; dd->Ze = 1;
297: dd->gtol = gtol;
298: dd->ltog = ltog;
299: dd->base = xs;
300: da->ops->view = DMView_DA_1d;
302: /*
303: Set the local to global ordering in the global vector, this allows use
304: of VecSetValuesLocal().
305: */
306: for (i=0; i<Xe-IXe; i++) {idx[nn++] = -1; } /* pad with -1s if needed for ghosted case*/
308: ISLocalToGlobalMappingCreate(comm,nn,idx,PETSC_COPY_VALUES,&da->ltogmap);
309: ISLocalToGlobalMappingBlock(da->ltogmap,dd->w,&da->ltogmapb);
310: PetscLogObjectParent(da,da->ltogmap);
312: dd->idx = idx;
313: dd->Nl = nn;
315: return(0);
316: }
321: /*@C
322: DMDACreate1d - Creates an object that will manage the communication of one-dimensional
323: regular array data that is distributed across some processors.
325: Collective on MPI_Comm
327: Input Parameters:
328: + comm - MPI communicator
329: . bx - type of ghost cells at the boundary the array should have, if any. Use
330: DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_GHOSTED, or DMDA_BOUNDARY_PERIODIC.
331: . M - global dimension of the array (use -M to indicate that it may be set to a different value
332: from the command line with -da_grid_x <M>)
333: . dof - number of degrees of freedom per node
334: . s - stencil width
335: - lx - array containing number of nodes in the X direction on each processor,
336: or PETSC_NULL. If non-null, must be of length as the number of processes in the MPI_Comm.
338: Output Parameter:
339: . da - the resulting distributed array object
341: Options Database Key:
342: + -da_view - Calls DMView() at the conclusion of DMDACreate1d()
343: . -da_grid_x <nx> - number of grid points in x direction; can set if M < 0
344: . -da_refine_x <rx> - refinement factor
345: - -da_refine <n> - refine the DMDA n times before creating it, if M < 0
347: Level: beginner
349: Notes:
350: The array data itself is NOT stored in the DMDA, it is stored in Vec objects;
351: The appropriate vector objects can be obtained with calls to DMCreateGlobalVector()
352: and DMCreateLocalVector() and calls to VecDuplicate() if more are needed.
354: .keywords: distributed array, create, one-dimensional
356: .seealso: DMDestroy(), DMView(), DMDACreate2d(), DMDACreate3d(), DMGlobalToLocalBegin(), DMDASetRefinementFactor(),
357: DMGlobalToLocalEnd(), DMLocalToGlobalBegin(), DMDALocalToLocalBegin(), DMDALocalToLocalEnd(), DMDAGetRefinementFactor(),
358: DMDAGetInfo(), DMCreateGlobalVector(), DMCreateLocalVector(), DMDACreateNaturalVector(), DMLoad(), DMDAGetOwnershipRanges()
360: @*/
361: PetscErrorCode DMDACreate1d(MPI_Comm comm, DMDABoundaryType bx, PetscInt M, PetscInt dof, PetscInt s, const PetscInt lx[], DM *da)
362: {
364: PetscMPIInt size;
367: DMDACreate(comm, da);
368: DMDASetDim(*da, 1);
369: DMDASetSizes(*da, M, 1, 1);
370: MPI_Comm_size(comm, &size);
371: DMDASetNumProcs(*da, size, PETSC_DECIDE, PETSC_DECIDE);
372: DMDASetBoundaryType(*da, bx, DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_NONE);
373: DMDASetDof(*da, dof);
374: DMDASetStencilWidth(*da, s);
375: DMDASetOwnershipRanges(*da, lx, PETSC_NULL, PETSC_NULL);
376: /* This violates the behavior for other classes, but right now users expect negative dimensions to be handled this way */
377: DMSetFromOptions(*da);
378: DMSetUp(*da);
379: DMView_DA_Private(*da);
380: return(0);
381: }