Actual source code: adda.c
petsc-3.4.5 2014-06-29
1: /*
3: Contributed by Arvid Bessen, Columbia University, June 2007
5: Extension of DMDA object to any number of dimensions.
7: */
8: #include <../src/dm/impls/adda/addaimpl.h> /*I "petscdmadda.h" I*/
13: PetscErrorCode DMDestroy_ADDA(DM dm)
14: {
16: DM_ADDA *dd = (DM_ADDA*)dm->data;
19: /* destroy the allocated data */
20: PetscFree(dd->nodes);
21: PetscFree(dd->procs);
22: PetscFree(dd->lcs);
23: PetscFree(dd->lce);
24: PetscFree(dd->lgs);
25: PetscFree(dd->lge);
26: PetscFree(dd->refine);
28: VecDestroy(&dd->global);
29: /* This was originally freed in DMDestroy(), but that prevents reference counting of backend objects */
30: PetscFree(dd);
31: return(0);
32: }
36: PetscErrorCode DMView_ADDA(DM dm, PetscViewer v)
37: {
39: SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP, "Not implemented yet");
40: return(0);
41: }
45: PetscErrorCode DMCreateGlobalVector_ADDA(DM dm, Vec *vec)
46: {
48: DM_ADDA *dd = (DM_ADDA*)dm->data;
53: VecDuplicate(dd->global, vec);
54: return(0);
55: }
59: PetscErrorCode DMCreateColoring_ADDA(DM dm, ISColoringType ctype,MatType mtype,ISColoring *coloring)
60: {
62: SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP, "Not implemented yet");
63: return(0);
64: }
68: PetscErrorCode DMCreateMatrix_ADDA(DM dm, MatType mtype, Mat *mat)
69: {
71: DM_ADDA *dd = (DM_ADDA*)dm->data;
75: MatCreate(PetscObjectComm((PetscObject)dm), mat);
76: MatSetSizes(*mat, dd->lsize, dd->lsize, PETSC_DECIDE, PETSC_DECIDE);
77: MatSetType(*mat, mtype);
78: return(0);
79: }
83: /*@
84: DMADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays
86: Collective on ADDA
88: Input Parameter:
89: . addar - the distributed array for which we create the matrix, which indexes the rows
90: . addac - the distributed array for which we create the matrix, which indexes the columns
91: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
92: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
94: Output Parameter:
95: . mat - the empty Jacobian
97: Level: beginner
99: .keywords: distributed array, matrix
101: .seealso: DMCreateMatrix()
102: @*/
103: PetscErrorCode DMADDAGetMatrixNS(DM dm, DM dmc, MatType mtype, Mat *mat)
104: {
106: DM_ADDA *dd = (DM_ADDA*)dm->data;
107: DM_ADDA *ddc = (DM_ADDA*)dmc->data;
113: MatCreate(PetscObjectComm((PetscObject)dm), mat);
114: MatSetSizes(*mat, dd->lsize, ddc->lsize, PETSC_DECIDE, PETSC_DECIDE);
115: MatSetType(*mat, mtype);
116: return(0);
117: }
121: PetscErrorCode DMCreateInterpolation_ADDA(DM dm1,DM dm2,Mat *mat,Vec *vec)
122: {
124: SETERRQ(PetscObjectComm((PetscObject)dm1),PETSC_ERR_SUP, "Not implemented yet");
125: return(0);
126: }
130: PetscErrorCode DMRefine_ADDA(DM dm, MPI_Comm comm, DM *dmf)
131: {
133: SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP, "Not implemented yet");
134: return(0);
135: }
139: PetscErrorCode DMCoarsen_ADDA(DM dm, MPI_Comm comm,DM *dmc)
140: {
142: PetscInt *nodesc;
143: PetscInt dofc;
144: PetscInt i;
145: DM_ADDA *dd = (DM_ADDA*)dm->data;
150: PetscMalloc(dd->dim*sizeof(PetscInt), &nodesc);
151: for (i=0; i<dd->dim; i++) {
152: nodesc[i] = (dd->nodes[i] % dd->refine[i]) ? dd->nodes[i] / dd->refine[i] + 1 : dd->nodes[i] / dd->refine[i];
153: }
154: dofc = (dd->dof % dd->dofrefine) ? dd->dof / dd->dofrefine + 1 : dd->dof / dd->dofrefine;
155: DMADDACreate(PetscObjectComm((PetscObject)dm), dd->dim, nodesc, dd->procs, dofc, dd->periodic, dmc);
156: PetscFree(nodesc);
157: /* copy refinement factors */
158: DMADDASetRefinement(*dmc, dd->refine, dd->dofrefine);
159: return(0);
160: }
164: PetscErrorCode DMCreateInjection_ADDA(DM dm1,DM dm2, VecScatter *ctx)
165: {
167: SETERRQ(PetscObjectComm((PetscObject)dm1),PETSC_ERR_SUP, "Not implemented yet");
168: return(0);
169: }
171: /*@C
172: ADDAHCiterStartup - performs the first check for an iteration through a hypercube
173: lc, uc, idx all have to be valid arrays of size dim
174: This function sets idx to lc and then checks, whether the lower corner (lc) is less
175: than thre upper corner (uc). If lc "<=" uc in all coordinates, it returns PETSC_TRUE,
176: and PETSC_FALSE otherwise.
178: Input Parameters:
179: + dim - the number of dimension
180: . lc - the "lower" corner
181: - uc - the "upper" corner
183: Output Parameters:
184: . idx - the index that this function increases
186: Developer Notes: This code is crap! You cannot return a value and NO ERROR code in PETSc!
188: Level: developer
189: @*/
190: PetscBool ADDAHCiterStartup(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx)
191: {
193: PetscInt i;
195: PetscMemcpy(idx, lc, sizeof(PetscInt)*dim);
196: if (ierr) {
197: PetscError(PETSC_COMM_SELF,__LINE__,__FUNCT__,__FILE__,__SDIR__,ierr,PETSC_ERROR_REPEAT," ");
198: return PETSC_FALSE;
199: }
200: for (i=0; i<dim; i++) {
201: if (lc[i] > uc[i]) return PETSC_FALSE;
202: }
203: return PETSC_TRUE;
204: }
206: /*@C
207: ADDAHCiter - iterates through a hypercube
208: lc, uc, idx all have to be valid arrays of size dim
209: This function return PETSC_FALSE, if idx exceeds uc, PETSC_TRUE otherwise.
210: There are no guarantees on what happens if idx is not in the hypercube
211: spanned by lc, uc, this should be checked with ADDAHCiterStartup.
213: Use this code as follows:
214: if (ADDAHCiterStartup(dim, lc, uc, idx)) {
215: do {
216: ...
217: } while (ADDAHCiter(dim, lc, uc, idx));
218: }
220: Input Parameters:
221: + dim - the number of dimension
222: . lc - the "lower" corner
223: - uc - the "upper" corner
225: Output Parameters:
226: . idx - the index that this function increases
228: Level: developer
229: @*/
230: PetscBool ADDAHCiter(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx)
231: {
232: PetscInt i;
233: for (i=dim-1; i>=0; i--) {
234: idx[i] += 1;
235: if (uc[i] > idx[i]) {
236: return PETSC_TRUE;
237: } else {
238: idx[i] -= uc[i] - lc[i];
239: }
240: }
241: return PETSC_FALSE;
242: }
246: PetscErrorCode DMCreateAggregates_ADDA(DM dmc,DM dmf,Mat *rest)
247: {
248: PetscErrorCode ierr=0;
249: PetscInt i;
250: PetscInt dim;
251: PetscInt dofc, doff;
252: PetscInt *lcs_c, *lce_c;
253: PetscInt *lcs_f, *lce_f;
254: PetscInt *fgs, *fge;
255: PetscInt fgdofs, fgdofe;
256: ADDAIdx iter_c, iter_f;
257: PetscInt max_agg_size;
258: PetscMPIInt comm_size;
259: ADDAIdx *fine_nodes;
260: PetscInt fn_idx;
261: PetscScalar *one_vec;
262: DM_ADDA *ddc = (DM_ADDA*)dmc->data;
263: DM_ADDA *ddf = (DM_ADDA*)dmf->data;
269: if (ddc->dim != ddf->dim) SETERRQ2(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Dimensions of ADDA do not match %D %D", ddc->dim, ddf->dim);
270: /* if (dmc->dof != dmf->dof) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"DOF of ADDA do not match %D %D", dmc->dof, dmf->dof); */
271: dim = ddc->dim;
272: dofc = ddc->dof;
273: doff = ddf->dof;
275: DMADDAGetCorners(dmc, &lcs_c, &lce_c);
276: DMADDAGetCorners(dmf, &lcs_f, &lce_f);
278: /* compute maximum size of aggregate */
279: max_agg_size = 1;
280: for (i=0; i<dim; i++) {
281: max_agg_size *= ddf->nodes[i] / ddc->nodes[i] + 1;
282: }
283: max_agg_size *= doff / dofc + 1;
285: /* create the matrix that will contain the restriction operator */
286: MPI_Comm_size(PETSC_COMM_WORLD,&comm_size);
288: /* construct matrix */
289: if (comm_size == 1) {
290: DMADDAGetMatrixNS(dmc, dmf, MATSEQAIJ, rest);
291: MatSeqAIJSetPreallocation(*rest, max_agg_size, NULL);
292: } else {
293: DMADDAGetMatrixNS(dmc, dmf, MATMPIAIJ, rest);
294: MatMPIAIJSetPreallocation(*rest, max_agg_size, NULL, max_agg_size, NULL);
295: }
296: /* store nodes in the fine grid here */
297: PetscMalloc(sizeof(ADDAIdx)*max_agg_size, &fine_nodes);
298: /* these are the values to set to, a collection of 1's */
299: PetscMalloc(sizeof(PetscScalar)*max_agg_size, &one_vec);
300: /* initialize */
301: for (i=0; i<max_agg_size; i++) {
302: PetscMalloc(sizeof(PetscInt)*dim, &(fine_nodes[i].x));
303: one_vec[i] = 1.0;
304: }
306: /* get iterators */
307: PetscMalloc(sizeof(PetscInt)*dim, &(iter_c.x));
308: PetscMalloc(sizeof(PetscInt)*dim, &(iter_f.x));
310: /* the fine grid node corner for each coarse grid node */
311: PetscMalloc(sizeof(PetscInt)*dim, &fgs);
312: PetscMalloc(sizeof(PetscInt)*dim, &fge);
314: /* loop over all coarse nodes */
315: PetscMemcpy(iter_c.x, lcs_c, sizeof(PetscInt)*dim);
316: if (ADDAHCiterStartup(dim, lcs_c, lce_c, iter_c.x)) {
317: do {
318: /* find corresponding fine grid nodes */
319: for (i=0; i<dim; i++) {
320: fgs[i] = iter_c.x[i]*ddf->nodes[i]/ddc->nodes[i];
321: fge[i] = PetscMin((iter_c.x[i]+1)*ddf->nodes[i]/ddc->nodes[i], ddf->nodes[i]);
322: }
323: /* treat all dof of the coarse grid */
324: for (iter_c.d=0; iter_c.d<dofc; iter_c.d++) {
325: /* find corresponding fine grid dof's */
326: fgdofs = iter_c.d*doff/dofc;
327: fgdofe = PetscMin((iter_c.d+1)*doff/dofc, doff);
328: /* we now know the "box" of all the fine grid nodes that are mapped to one coarse grid node */
329: fn_idx = 0;
330: /* loop over those corresponding fine grid nodes */
331: if (ADDAHCiterStartup(dim, fgs, fge, iter_f.x)) {
332: do {
333: /* loop over all corresponding fine grid dof */
334: for (iter_f.d=fgdofs; iter_f.d<fgdofe; iter_f.d++) {
335: PetscMemcpy(fine_nodes[fn_idx].x, iter_f.x, sizeof(PetscInt)*dim);
337: fine_nodes[fn_idx].d = iter_f.d;
338: fn_idx++;
339: }
340: } while (ADDAHCiter(dim, fgs, fge, iter_f.x));
341: }
342: /* add all these points to one aggregate */
343: DMADDAMatSetValues(*rest, dmc, 1, &iter_c, dmf, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
344: }
345: } while (ADDAHCiter(dim, lcs_c, lce_c, iter_c.x));
346: }
348: /* free memory */
349: PetscFree(fgs);
350: PetscFree(fge);
351: PetscFree(iter_c.x);
352: PetscFree(iter_f.x);
353: PetscFree(lcs_c);
354: PetscFree(lce_c);
355: PetscFree(lcs_f);
356: PetscFree(lce_f);
357: PetscFree(one_vec);
358: for (i=0; i<max_agg_size; i++) {
359: PetscFree(fine_nodes[i].x);
360: }
361: PetscFree(fine_nodes);
363: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
364: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
365: return(0);
366: }
370: /*@
371: DMADDASetRefinement - Sets the refinement factors of the distributed arrays.
373: Collective on ADDA
375: Input Parameter:
376: + adda - the ADDA object
377: . refine - array of refinement factors
378: - dofrefine - the refinement factor for the dof, usually just 1
380: Level: developer
382: .keywords: distributed array, refinement
383: @*/
384: PetscErrorCode DMADDASetRefinement(DM dm, PetscInt *refine, PetscInt dofrefine)
385: {
386: DM_ADDA *dd = (DM_ADDA*)dm->data;
392: PetscMemcpy(dd->refine, refine, dd->dim*sizeof(PetscInt));
393: dd->dofrefine = dofrefine;
394: return(0);
395: }
399: /*@
400: DMADDAGetCorners - Gets the corners of the local area
402: Not Collective
404: Input Parameter:
405: . adda - the ADDA object
407: Output Parameter:
408: + lcorner - the "lower" corner
409: - ucorner - the "upper" corner
411: Both lcorner and ucorner are allocated by this procedure and will point to an
412: array of size dd->dim.
414: Level: beginner
416: .keywords: distributed array, refinement
417: @*/
418: PetscErrorCode DMADDAGetCorners(DM dm, PetscInt **lcorner, PetscInt **ucorner)
419: {
420: DM_ADDA *dd = (DM_ADDA*)dm->data;
427: PetscMalloc(dd->dim*sizeof(PetscInt), lcorner);
428: PetscMalloc(dd->dim*sizeof(PetscInt), ucorner);
429: PetscMemcpy(*lcorner, dd->lcs, dd->dim*sizeof(PetscInt));
430: PetscMemcpy(*ucorner, dd->lce, dd->dim*sizeof(PetscInt));
431: return(0);
432: }
436: /*@
437: DMADDAGetGhostCorners - Gets the ghost corners of the local area
439: Note Collective
441: Input Parameter:
442: . adda - the ADDA object
444: Output Parameter:
445: + lcorner - the "lower" corner of the ghosted area
446: - ucorner - the "upper" corner of the ghosted area
448: Both lcorner and ucorner are allocated by this procedure and will point to an
449: array of size dd->dim.
451: Level: beginner
453: .keywords: distributed array, refinement
454: @*/
455: PetscErrorCode DMADDAGetGhostCorners(DM dm, PetscInt **lcorner, PetscInt **ucorner)
456: {
457: DM_ADDA *dd = (DM_ADDA*)dm->data;
464: PetscMalloc(dd->dim*sizeof(PetscInt), lcorner);
465: PetscMalloc(dd->dim*sizeof(PetscInt), ucorner);
466: PetscMemcpy(*lcorner, dd->lgs, dd->dim*sizeof(PetscInt));
467: PetscMemcpy(*ucorner, dd->lge, dd->dim*sizeof(PetscInt));
468: return(0);
469: }
475: /*@C
476: DMADDAMatSetValues - Inserts or adds a block of values into a matrix. The values
477: are indexed geometrically with the help of the ADDA data structure.
478: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
479: MUST be called after all calls to ADDAMatSetValues() have been completed.
481: Not Collective
483: Input Parameters:
484: + mat - the matrix
485: . addam - the ADDA geometry information for the rows
486: . m - the number of rows
487: . idxm - the row indices, each of the a proper ADDAIdx
488: + addan - the ADDA geometry information for the columns
489: . n - the number of columns
490: . idxn - the column indices, each of the a proper ADDAIdx
491: . v - a logically two-dimensional array of values of size m*n
492: - addv - either ADD_VALUES or INSERT_VALUES, where
493: ADD_VALUES adds values to any existing entries, and
494: INSERT_VALUES replaces existing entries with new values
496: Notes:
497: By default the values, v, are row-oriented and unsorted.
498: See MatSetOption() for other options.
500: Calls to ADDAMatSetValues() (and MatSetValues()) with the INSERT_VALUES and ADD_VALUES
501: options cannot be mixed without intervening calls to the assembly
502: routines.
504: Efficiency Alert:
505: The routine ADDAMatSetValuesBlocked() may offer much better efficiency
506: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
508: Level: beginner
510: Concepts: matrices^putting entries in
512: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), ADDAMatSetValuesBlocked(),
513: InsertMode, INSERT_VALUES, ADD_VALUES
514: @*/
515: PetscErrorCode DMADDAMatSetValues(Mat mat, DM dmm, PetscInt m, const ADDAIdx idxm[],DM dmn, PetscInt n, const ADDAIdx idxn[],const PetscScalar v[], InsertMode addv)
516: {
517: DM_ADDA *ddm = (DM_ADDA*)dmm->data;
518: DM_ADDA *ddn = (DM_ADDA*)dmn->data;
520: PetscInt *nodemult;
521: PetscInt i, j;
522: PetscInt *matidxm, *matidxn;
523: PetscInt *x, d;
524: PetscInt idx;
527: /* find correct multiplying factors */
528: PetscMalloc(ddm->dim*sizeof(PetscInt), &nodemult);
530: nodemult[ddm->dim-1] = 1;
531: for (j=ddm->dim-2; j>=0; j--) {
532: nodemult[j] = nodemult[j+1]*(ddm->nodes[j+1]);
533: }
534: /* convert each coordinate in idxm to the matrix row index */
535: PetscMalloc(m*sizeof(PetscInt), &matidxm);
536: for (i=0; i<m; i++) {
537: x = idxm[i].x; d = idxm[i].d;
538: idx = 0;
539: for (j=ddm->dim-1; j>=0; j--) {
540: if (x[j] < 0) { /* "left", "below", etc. of boundary */
541: if (ddm->periodic[j]) { /* periodic wraps around */
542: x[j] += ddm->nodes[j];
543: } else { /* non-periodic get discarded */
544: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
545: goto endofloop_m;
546: }
547: }
548: if (x[j] >= ddm->nodes[j]) { /* "right", "above", etc. of boundary */
549: if (ddm->periodic[j]) { /* periodic wraps around */
550: x[j] -= ddm->nodes[j];
551: } else { /* non-periodic get discarded */
552: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
553: goto endofloop_m;
554: }
555: }
556: idx += x[j]*nodemult[j];
557: }
558: matidxm[i] = idx*(ddm->dof) + d;
559: endofloop_m:
560: ;
561: }
562: PetscFree(nodemult);
564: /* find correct multiplying factors */
565: PetscMalloc(ddn->dim*sizeof(PetscInt), &nodemult);
567: nodemult[ddn->dim-1] = 1;
568: for (j=ddn->dim-2; j>=0; j--) {
569: nodemult[j] = nodemult[j+1]*(ddn->nodes[j+1]);
570: }
571: /* convert each coordinate in idxn to the matrix colum index */
572: PetscMalloc(n*sizeof(PetscInt), &matidxn);
573: for (i=0; i<n; i++) {
574: x = idxn[i].x; d = idxn[i].d;
575: idx = 0;
576: for (j=ddn->dim-1; j>=0; j--) {
577: if (x[j] < 0) { /* "left", "below", etc. of boundary */
578: if (ddn->periodic[j]) { /* periodic wraps around */
579: x[j] += ddn->nodes[j];
580: } else { /* non-periodic get discarded */
581: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
582: goto endofloop_n;
583: }
584: }
585: if (x[j] >= ddn->nodes[j]) { /* "right", "above", etc. of boundary */
586: if (ddn->periodic[j]) { /* periodic wraps around */
587: x[j] -= ddn->nodes[j];
588: } else { /* non-periodic get discarded */
589: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
590: goto endofloop_n;
591: }
592: }
593: idx += x[j]*nodemult[j];
594: }
595: matidxn[i] = idx*(ddn->dof) + d;
596: endofloop_n:
597: ;
598: }
599: /* call original MatSetValues() */
600: MatSetValues(mat, m, matidxm, n, matidxn, v, addv);
601: /* clean up */
602: PetscFree(nodemult);
603: PetscFree(matidxm);
604: PetscFree(matidxn);
605: return(0);
606: }
610: PetscErrorCode DMADDASetParameters(DM dm,PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof,PetscBool *periodic)
611: {
613: PetscMPIInt rank,size;
614: MPI_Comm comm;
615: PetscInt i;
616: PetscInt nodes_total;
617: PetscInt nodesleft;
618: PetscInt procsleft;
619: DM_ADDA *dd = (DM_ADDA*)dm->data;
622: PetscObjectGetComm((PetscObject)dm,&comm);
623: MPI_Comm_size(comm,&size);
624: MPI_Comm_rank(comm,&rank);
626: /* total number of nodes */
627: nodes_total = 1;
628: for (i=0; i<dim; i++) nodes_total *= nodes[i];
629: dd->dim = dim;
630: dd->dof = dof;
631: dd->periodic = periodic;
633: PetscMalloc(dim*sizeof(PetscInt), &(dd->nodes));
634: PetscMemcpy(dd->nodes, nodes, dim*sizeof(PetscInt));
636: /* procs */
637: PetscMalloc(dim*sizeof(PetscInt), &(dd->procs));
638: /* create distribution of nodes to processors */
639: if (procs == NULL) {
640: procs = dd->procs;
641: nodesleft = nodes_total;
642: procsleft = size;
643: /* figure out a good way to split the array to several processors */
644: for (i=0; i<dim; i++) {
645: if (i==dim-1) {
646: procs[i] = procsleft;
647: } else {
648: /* calculate best partition */
649: procs[i] = (PetscInt)(((PetscReal) nodes[i])*PetscPowReal(((PetscReal) procsleft)/((PetscReal) nodesleft),1./((PetscReal)(dim-i)))+0.5);
650: if (procs[i]<1) procs[i]=1;
651: while (procs[i] > 0) {
652: if (procsleft % procs[i]) procs[i]--;
653: else break;
654: }
655: nodesleft /= nodes[i];
656: procsleft /= procs[i];
657: }
658: }
659: } else {
660: /* user provided the number of processors */
661: PetscMemcpy(dd->procs, procs, dim*sizeof(PetscInt));
662: }
663: return(0);
664: }
668: PetscErrorCode DMSetUp_ADDA(DM dm)
669: {
671: PetscInt s=1; /* stencil width, fixed to 1 at the moment */
672: PetscMPIInt rank,size;
673: PetscInt i;
674: PetscInt procsleft;
675: PetscInt procsdimi;
676: PetscInt ranki;
677: PetscInt rpq;
678: DM_ADDA *dd = (DM_ADDA*)dm->data;
679: MPI_Comm comm;
680: PetscInt *nodes,*procs,dim,dof;
681: PetscBool *periodic;
684: PetscObjectGetComm((PetscObject)dm,&comm);
685: MPI_Comm_size(comm,&size);
686: MPI_Comm_rank(comm,&rank);
687: procs = dd->procs;
688: nodes = dd->nodes;
689: dim = dd->dim;
690: dof = dd->dof;
691: periodic = dd->periodic;
693: /* check for validity */
694: procsleft = 1;
695: for (i=0; i<dim; i++) {
696: if (nodes[i] < procs[i]) SETERRQ3(comm,PETSC_ERR_ARG_OUTOFRANGE,"Partition in direction %d is too fine! %D nodes, %D processors", i, nodes[i], procs[i]);
697: procsleft *= procs[i];
698: }
699: if (procsleft != size) SETERRQ(comm,PETSC_ERR_PLIB, "Created or was provided with inconsistent distribution of processors");
702: /* find out local region */
703: PetscMalloc(dim*sizeof(PetscInt), &(dd->lcs));
704: PetscMalloc(dim*sizeof(PetscInt), &(dd->lce));
705: procsdimi = size;
706: ranki = rank;
707: for (i=0; i<dim; i++) {
708: /* What is the number of processor for dimensions i+1, ..., dim-1? */
709: procsdimi /= procs[i];
710: /* these are all nodes that come before our region */
711: rpq = ranki / procsdimi;
712: dd->lcs[i] = rpq * (nodes[i]/procs[i]);
713: if (rpq + 1 < procs[i]) {
714: dd->lce[i] = (rpq + 1) * (nodes[i]/procs[i]);
715: } else {
716: /* last one gets all the rest */
717: dd->lce[i] = nodes[i];
718: }
719: ranki = ranki - rpq*procsdimi;
720: }
722: /* compute local size */
723: dd->lsize=1;
724: for (i=0; i<dim; i++) {
725: dd->lsize *= (dd->lce[i]-dd->lcs[i]);
726: }
727: dd->lsize *= dof;
729: /* find out ghost points */
730: PetscMalloc(dim*sizeof(PetscInt), &(dd->lgs));
731: PetscMalloc(dim*sizeof(PetscInt), &(dd->lge));
732: for (i=0; i<dim; i++) {
733: if (periodic[i]) {
734: dd->lgs[i] = dd->lcs[i] - s;
735: dd->lge[i] = dd->lce[i] + s;
736: } else {
737: dd->lgs[i] = PetscMax(dd->lcs[i] - s, 0);
738: dd->lge[i] = PetscMin(dd->lce[i] + s, nodes[i]);
739: }
740: }
742: /* compute local size with ghost points */
743: dd->lgsize=1;
744: for (i=0; i<dim; i++) {
745: dd->lgsize *= (dd->lge[i]-dd->lgs[i]);
746: }
747: dd->lgsize *= dof;
749: /* create global and local prototype vector */
750: VecCreateMPIWithArray(comm,dd->dof,dd->lsize,PETSC_DECIDE,0,&(dd->global));
751: #if ADDA_NEEDS_LOCAL_VECTOR
752: /* local includes ghost points */
753: VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->lgsize,0,&(dd->local));
754: #endif
756: PetscMalloc(dim*sizeof(PetscInt), &(dd->refine));
757: for (i=0; i<dim; i++) dd->refine[i] = 3;
758: dd->dofrefine = 1;
759: return(0);
760: }
764: PETSC_EXTERN PetscErrorCode DMCreate_ADDA(DM dm)
765: {
767: DM_ADDA *dd;
770: PetscNewLog(dm,DM_ADDA,&dd);
771: dm->data = (void*)dd;
773: dm->ops->view = DMView;
774: dm->ops->createglobalvector = DMCreateGlobalVector_ADDA;
775: dm->ops->getcoloring = DMCreateColoring_ADDA;
776: dm->ops->creatematrix = DMCreateMatrix_ADDA;
777: dm->ops->createinterpolation = DMCreateInterpolation_ADDA;
778: dm->ops->refine = DMRefine_ADDA;
779: dm->ops->coarsen = DMCoarsen_ADDA;
780: dm->ops->getinjection = DMCreateInjection_ADDA;
781: dm->ops->getaggregates = DMCreateAggregates_ADDA;
782: dm->ops->setup = DMSetUp_ADDA;
783: dm->ops->destroy = DMDestroy_ADDA;
784: return(0);
785: }
790: /*@C
791: DMADDACreate - Creates and ADDA object that translate between coordinates
792: in a geometric grid of arbitrary dimension and data in a PETSc vector
793: distributed on several processors.
795: Collective on MPI_Comm
797: Input Parameters:
798: + comm - MPI communicator
799: . dim - the dimension of the grid
800: . nodes - array with d entries that give the number of nodes in each dimension
801: . procs - array with d entries that give the number of processors in each dimension
802: (or NULL if to be determined automatically)
803: . dof - number of degrees of freedom per node
804: - periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
806: Output Parameters:
807: . adda - pointer to ADDA data structure that is created
809: Level: intermediate
811: @*/
812: PetscErrorCode DMADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof, PetscBool *periodic,DM *dm_p)
813: {
817: DMCreate(comm,dm_p);
818: DMSetType(*dm_p,DMADDA);
819: DMADDASetParameters(*dm_p,dim,nodes,procs,dof,periodic);
820: DMSetUp(*dm_p);
821: return(0);
822: }