Actual source code: adda.c
petsc-3.3-p7 2013-05-11
1: /*
3: Contributed by Arvid Bessen, Columbia University, June 2007
5: Extension of DMDA object to any number of dimensions.
7: */
8: #include <../src/dm/impls/adda/addaimpl.h> /*I "petscdmadda.h" I*/
13: PetscErrorCode DMDestroy_ADDA(DM dm)
14: {
16: DM_ADDA *dd = (DM_ADDA*)dm->data;
19: /* destroy the allocated data */
20: PetscFree(dd->nodes);
21: PetscFree(dd->procs);
22: PetscFree(dd->lcs);
23: PetscFree(dd->lce);
24: PetscFree(dd->lgs);
25: PetscFree(dd->lge);
26: PetscFree(dd->refine);
28: VecDestroy(&dd->global);
29: return(0);
30: }
34: PetscErrorCode DMView_ADDA(DM dm, PetscViewer v)
35: {
37: SETERRQ(((PetscObject)dm)->comm,PETSC_ERR_SUP, "Not implemented yet");
38: return(0);
39: }
43: PetscErrorCode DMCreateGlobalVector_ADDA(DM dm, Vec *vec)
44: {
46: DM_ADDA *dd = (DM_ADDA*)dm->data;
51: VecDuplicate(dd->global, vec);
52: return(0);
53: }
57: PetscErrorCode DMCreateColoring_ADDA(DM dm, ISColoringType ctype,const MatType mtype,ISColoring *coloring)
58: {
60: SETERRQ(((PetscObject)dm)->comm,PETSC_ERR_SUP, "Not implemented yet");
61: return(0);
62: }
66: PetscErrorCode DMCreateMatrix_ADDA(DM dm, const MatType mtype, Mat *mat)
67: {
69: DM_ADDA *dd = (DM_ADDA*)dm->data;
73: MatCreate(((PetscObject)dm)->comm, mat);
74: MatSetSizes(*mat, dd->lsize, dd->lsize, PETSC_DECIDE, PETSC_DECIDE);
75: MatSetType(*mat, mtype);
76: return(0);
77: }
81: /*@
82: DMADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays
84: Collective on ADDA
86: Input Parameter:
87: . addar - the distributed array for which we create the matrix, which indexes the rows
88: . addac - the distributed array for which we create the matrix, which indexes the columns
89: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
90: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
92: Output Parameter:
93: . mat - the empty Jacobian
95: Level: beginner
97: .keywords: distributed array, matrix
99: .seealso: DMCreateMatrix()
100: @*/
101: PetscErrorCode DMADDAGetMatrixNS(DM dm, DM dmc, const MatType mtype, Mat *mat)
102: {
104: DM_ADDA *dd = (DM_ADDA*)dm->data;
105: DM_ADDA *ddc = (DM_ADDA*)dmc->data;
111: MatCreate(((PetscObject)dm)->comm, mat);
112: MatSetSizes(*mat, dd->lsize, ddc->lsize, PETSC_DECIDE, PETSC_DECIDE);
113: MatSetType(*mat, mtype);
114: return(0);
115: }
119: PetscErrorCode DMCreateInterpolation_ADDA(DM dm1,DM dm2,Mat *mat,Vec *vec)
120: {
122: SETERRQ(((PetscObject)dm1)->comm,PETSC_ERR_SUP, "Not implemented yet");
123: return(0);
124: }
128: PetscErrorCode DMRefine_ADDA(DM dm, MPI_Comm comm, DM *dmf)
129: {
131: SETERRQ(((PetscObject)dm)->comm,PETSC_ERR_SUP, "Not implemented yet");
132: return(0);
133: }
137: PetscErrorCode DMCoarsen_ADDA(DM dm, MPI_Comm comm,DM *dmc)
138: {
140: PetscInt *nodesc;
141: PetscInt dofc;
142: PetscInt i;
143: DM_ADDA *dd = (DM_ADDA*)dm->data;
148: PetscMalloc(dd->dim*sizeof(PetscInt), &nodesc);
149: for(i=0; i<dd->dim; i++) {
150: nodesc[i] = (dd->nodes[i] % dd->refine[i]) ? dd->nodes[i] / dd->refine[i] + 1 : dd->nodes[i] / dd->refine[i];
151: }
152: dofc = (dd->dof % dd->dofrefine) ? dd->dof / dd->dofrefine + 1 : dd->dof / dd->dofrefine;
153: DMADDACreate(((PetscObject)dm)->comm, dd->dim, nodesc, dd->procs, dofc, dd->periodic, dmc);
154: PetscFree(nodesc);
155: /* copy refinement factors */
156: DMADDASetRefinement(*dmc, dd->refine, dd->dofrefine);
157: return(0);
158: }
162: PetscErrorCode DMCreateInjection_ADDA(DM dm1,DM dm2, VecScatter *ctx)
163: {
165: SETERRQ(((PetscObject)dm1)->comm,PETSC_ERR_SUP, "Not implemented yet");
166: return(0);
167: }
169: /*@C
170: ADDAHCiterStartup - performs the first check for an iteration through a hypercube
171: lc, uc, idx all have to be valid arrays of size dim
172: This function sets idx to lc and then checks, whether the lower corner (lc) is less
173: than thre upper corner (uc). If lc "<=" uc in all coordinates, it returns PETSC_TRUE,
174: and PETSC_FALSE otherwise.
175:
176: Input Parameters:
177: + dim - the number of dimension
178: . lc - the "lower" corner
179: - uc - the "upper" corner
181: Output Parameters:
182: . idx - the index that this function increases
184: Developer Notes: This code is crap! You cannot return a value and NO ERROR code in PETSc!
186: Level: developer
187: @*/
188: PetscBool ADDAHCiterStartup(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
190: PetscInt i;
192: PetscMemcpy(idx, lc, sizeof(PetscInt)*dim);
193: if(ierr) {
194: PetscError(PETSC_COMM_SELF,__LINE__,__FUNCT__,__FILE__,__SDIR__,ierr,PETSC_ERROR_REPEAT," ");
195: return PETSC_FALSE;
196: }
197: for(i=0; i<dim; i++) {
198: if( lc[i] > uc[i] ) {
199: return PETSC_FALSE;
200: }
201: }
202: return PETSC_TRUE;
203: }
205: /*@C
206: ADDAHCiter - iterates through a hypercube
207: lc, uc, idx all have to be valid arrays of size dim
208: This function return PETSC_FALSE, if idx exceeds uc, PETSC_TRUE otherwise.
209: There are no guarantees on what happens if idx is not in the hypercube
210: spanned by lc, uc, this should be checked with ADDAHCiterStartup.
211:
212: Use this code as follows:
213: if( ADDAHCiterStartup(dim, lc, uc, idx) ) {
214: do {
215: ...
216: } while( ADDAHCiter(dim, lc, uc, idx) );
217: }
218:
219: Input Parameters:
220: + dim - the number of dimension
221: . lc - the "lower" corner
222: - uc - the "upper" corner
224: Output Parameters:
225: . idx - the index that this function increases
227: Level: developer
228: @*/
229: PetscBool ADDAHCiter(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
230: PetscInt i;
231: for(i=dim-1; i>=0; i--) {
232: idx[i] += 1;
233: if( uc[i] > idx[i] ) {
234: return PETSC_TRUE;
235: } else {
236: idx[i] -= uc[i] - lc[i];
237: }
238: }
239: return PETSC_FALSE;
240: }
244: PetscErrorCode DMCreateAggregates_ADDA(DM dmc,DM dmf,Mat *rest)
245: {
246: PetscErrorCode ierr=0;
247: PetscInt i;
248: PetscInt dim;
249: PetscInt dofc, doff;
250: PetscInt *lcs_c, *lce_c;
251: PetscInt *lcs_f, *lce_f;
252: PetscInt *fgs, *fge;
253: PetscInt fgdofs, fgdofe;
254: ADDAIdx iter_c, iter_f;
255: PetscInt max_agg_size;
256: PetscMPIInt comm_size;
257: ADDAIdx *fine_nodes;
258: PetscInt fn_idx;
259: PetscScalar *one_vec;
260: DM_ADDA *ddc = (DM_ADDA*)dmc->data;
261: DM_ADDA *ddf = (DM_ADDA*)dmf->data;
267: if (ddc->dim != ddf->dim) SETERRQ2(((PetscObject)dmf)->comm,PETSC_ERR_ARG_INCOMP,"Dimensions of ADDA do not match %D %D", ddc->dim, ddf->dim);
268: /* if (dmc->dof != dmf->dof) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"DOF of ADDA do not match %D %D", dmc->dof, dmf->dof); */
269: dim = ddc->dim;
270: dofc = ddc->dof;
271: doff = ddf->dof;
273: DMADDAGetCorners(dmc, &lcs_c, &lce_c);
274: DMADDAGetCorners(dmf, &lcs_f, &lce_f);
275:
276: /* compute maximum size of aggregate */
277: max_agg_size = 1;
278: for(i=0; i<dim; i++) {
279: max_agg_size *= ddf->nodes[i] / ddc->nodes[i] + 1;
280: }
281: max_agg_size *= doff / dofc + 1;
283: /* create the matrix that will contain the restriction operator */
284: MPI_Comm_size(PETSC_COMM_WORLD,&comm_size);
286: /* construct matrix */
287: if( comm_size == 1 ) {
288: DMADDAGetMatrixNS(dmc, dmf, MATSEQAIJ, rest);
289: MatSeqAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL);
290: } else {
291: DMADDAGetMatrixNS(dmc, dmf, MATMPIAIJ, rest);
292: MatMPIAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL, max_agg_size, PETSC_NULL);
293: }
294: /* store nodes in the fine grid here */
295: PetscMalloc(sizeof(ADDAIdx)*max_agg_size, &fine_nodes);
296: /* these are the values to set to, a collection of 1's */
297: PetscMalloc(sizeof(PetscScalar)*max_agg_size, &one_vec);
298: /* initialize */
299: for(i=0; i<max_agg_size; i++) {
300: PetscMalloc(sizeof(PetscInt)*dim, &(fine_nodes[i].x));
301: one_vec[i] = 1.0;
302: }
304: /* get iterators */
305: PetscMalloc(sizeof(PetscInt)*dim, &(iter_c.x));
306: PetscMalloc(sizeof(PetscInt)*dim, &(iter_f.x));
308: /* the fine grid node corner for each coarse grid node */
309: PetscMalloc(sizeof(PetscInt)*dim, &fgs);
310: PetscMalloc(sizeof(PetscInt)*dim, &fge);
312: /* loop over all coarse nodes */
313: PetscMemcpy(iter_c.x, lcs_c, sizeof(PetscInt)*dim);
314: if( ADDAHCiterStartup(dim, lcs_c, lce_c, iter_c.x) ) {
315: do {
316: /* find corresponding fine grid nodes */
317: for(i=0; i<dim; i++) {
318: fgs[i] = iter_c.x[i]*ddf->nodes[i]/ddc->nodes[i];
319: fge[i] = PetscMin((iter_c.x[i]+1)*ddf->nodes[i]/ddc->nodes[i], ddf->nodes[i]);
320: }
321: /* treat all dof of the coarse grid */
322: for(iter_c.d=0; iter_c.d<dofc; iter_c.d++) {
323: /* find corresponding fine grid dof's */
324: fgdofs = iter_c.d*doff/dofc;
325: fgdofe = PetscMin((iter_c.d+1)*doff/dofc, doff);
326: /* we now know the "box" of all the fine grid nodes that are mapped to one coarse grid node */
327: fn_idx = 0;
328: /* loop over those corresponding fine grid nodes */
329: if( ADDAHCiterStartup(dim, fgs, fge, iter_f.x) ) {
330: do {
331: /* loop over all corresponding fine grid dof */
332: for(iter_f.d=fgdofs; iter_f.d<fgdofe; iter_f.d++) {
333: PetscMemcpy(fine_nodes[fn_idx].x, iter_f.x, sizeof(PetscInt)*dim);
334: fine_nodes[fn_idx].d = iter_f.d;
335: fn_idx++;
336: }
337: } while( ADDAHCiter(dim, fgs, fge, iter_f.x) );
338: }
339: /* add all these points to one aggregate */
340: DMADDAMatSetValues(*rest, dmc, 1, &iter_c, dmf, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
341: }
342: } while( ADDAHCiter(dim, lcs_c, lce_c, iter_c.x) );
343: }
345: /* free memory */
346: PetscFree(fgs);
347: PetscFree(fge);
348: PetscFree(iter_c.x);
349: PetscFree(iter_f.x);
350: PetscFree(lcs_c);
351: PetscFree(lce_c);
352: PetscFree(lcs_f);
353: PetscFree(lce_f);
354: PetscFree(one_vec);
355: for(i=0; i<max_agg_size; i++) {
356: PetscFree(fine_nodes[i].x);
357: }
358: PetscFree(fine_nodes);
360: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
361: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
362: return(0);
363: }
367: /*@
368: DMADDASetRefinement - Sets the refinement factors of the distributed arrays.
370: Collective on ADDA
372: Input Parameter:
373: + adda - the ADDA object
374: . refine - array of refinement factors
375: - dofrefine - the refinement factor for the dof, usually just 1
377: Level: developer
379: .keywords: distributed array, refinement
380: @*/
381: PetscErrorCode DMADDASetRefinement(DM dm, PetscInt *refine, PetscInt dofrefine)
382: {
383: DM_ADDA *dd = (DM_ADDA*)dm->data;
389: PetscMemcpy(dd->refine, refine, dd->dim*sizeof(PetscInt));
390: dd->dofrefine = dofrefine;
391: return(0);
392: }
396: /*@
397: DMADDAGetCorners - Gets the corners of the local area
399: Not Collective
401: Input Parameter:
402: . adda - the ADDA object
404: Output Parameter:
405: + lcorner - the "lower" corner
406: - ucorner - the "upper" corner
408: Both lcorner and ucorner are allocated by this procedure and will point to an
409: array of size dd->dim.
411: Level: beginner
413: .keywords: distributed array, refinement
414: @*/
415: PetscErrorCode DMADDAGetCorners(DM dm, PetscInt **lcorner, PetscInt **ucorner)
416: {
417: DM_ADDA *dd = (DM_ADDA*)dm->data;
424: PetscMalloc(dd->dim*sizeof(PetscInt), lcorner);
425: PetscMalloc(dd->dim*sizeof(PetscInt), ucorner);
426: PetscMemcpy(*lcorner, dd->lcs, dd->dim*sizeof(PetscInt));
427: PetscMemcpy(*ucorner, dd->lce, dd->dim*sizeof(PetscInt));
428: return(0);
429: }
433: /*@
434: DMADDAGetGhostCorners - Gets the ghost corners of the local area
436: Note Collective
438: Input Parameter:
439: . adda - the ADDA object
441: Output Parameter:
442: + lcorner - the "lower" corner of the ghosted area
443: - ucorner - the "upper" corner of the ghosted area
445: Both lcorner and ucorner are allocated by this procedure and will point to an
446: array of size dd->dim.
448: Level: beginner
450: .keywords: distributed array, refinement
451: @*/
452: PetscErrorCode DMADDAGetGhostCorners(DM dm, PetscInt **lcorner, PetscInt **ucorner)
453: {
454: DM_ADDA *dd = (DM_ADDA*)dm->data;
461: PetscMalloc(dd->dim*sizeof(PetscInt), lcorner);
462: PetscMalloc(dd->dim*sizeof(PetscInt), ucorner);
463: PetscMemcpy(*lcorner, dd->lgs, dd->dim*sizeof(PetscInt));
464: PetscMemcpy(*ucorner, dd->lge, dd->dim*sizeof(PetscInt));
465: return(0);
466: }
472: /*@C
473: DMADDAMatSetValues - Inserts or adds a block of values into a matrix. The values
474: are indexed geometrically with the help of the ADDA data structure.
475: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
476: MUST be called after all calls to ADDAMatSetValues() have been completed.
478: Not Collective
480: Input Parameters:
481: + mat - the matrix
482: . addam - the ADDA geometry information for the rows
483: . m - the number of rows
484: . idxm - the row indices, each of the a proper ADDAIdx
485: + addan - the ADDA geometry information for the columns
486: . n - the number of columns
487: . idxn - the column indices, each of the a proper ADDAIdx
488: . v - a logically two-dimensional array of values of size m*n
489: - addv - either ADD_VALUES or INSERT_VALUES, where
490: ADD_VALUES adds values to any existing entries, and
491: INSERT_VALUES replaces existing entries with new values
493: Notes:
494: By default the values, v, are row-oriented and unsorted.
495: See MatSetOption() for other options.
497: Calls to ADDAMatSetValues() (and MatSetValues()) with the INSERT_VALUES and ADD_VALUES
498: options cannot be mixed without intervening calls to the assembly
499: routines.
501: Efficiency Alert:
502: The routine ADDAMatSetValuesBlocked() may offer much better efficiency
503: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
505: Level: beginner
507: Concepts: matrices^putting entries in
509: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), ADDAMatSetValuesBlocked(),
510: InsertMode, INSERT_VALUES, ADD_VALUES
511: @*/
512: PetscErrorCode DMADDAMatSetValues(Mat mat, DM dmm, PetscInt m, const ADDAIdx idxm[],DM dmn, PetscInt n, const ADDAIdx idxn[],
513: const PetscScalar v[], InsertMode addv)
514: {
515: DM_ADDA *ddm = (DM_ADDA*)dmm->data;
516: DM_ADDA *ddn = (DM_ADDA*)dmn->data;
518: PetscInt *nodemult;
519: PetscInt i, j;
520: PetscInt *matidxm, *matidxn;
521: PetscInt *x, d;
522: PetscInt idx;
525: /* find correct multiplying factors */
526: PetscMalloc(ddm->dim*sizeof(PetscInt), &nodemult);
527: nodemult[ddm->dim-1] = 1;
528: for(j=ddm->dim-2; j>=0; j--) {
529: nodemult[j] = nodemult[j+1]*(ddm->nodes[j+1]);
530: }
531: /* convert each coordinate in idxm to the matrix row index */
532: PetscMalloc(m*sizeof(PetscInt), &matidxm);
533: for(i=0; i<m; i++) {
534: x = idxm[i].x; d = idxm[i].d;
535: idx = 0;
536: for(j=ddm->dim-1; j>=0; j--) {
537: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
538: if( ddm->periodic[j] ) { /* periodic wraps around */
539: x[j] += ddm->nodes[j];
540: } else { /* non-periodic get discarded */
541: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
542: goto endofloop_m;
543: }
544: }
545: if( x[j] >= ddm->nodes[j] ) { /* "right", "above", etc. of boundary */
546: if( ddm->periodic[j] ) { /* periodic wraps around */
547: x[j] -= ddm->nodes[j];
548: } else { /* non-periodic get discarded */
549: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
550: goto endofloop_m;
551: }
552: }
553: idx += x[j]*nodemult[j];
554: }
555: matidxm[i] = idx*(ddm->dof) + d;
556: endofloop_m:
557: ;
558: }
559: PetscFree(nodemult);
561: /* find correct multiplying factors */
562: PetscMalloc(ddn->dim*sizeof(PetscInt), &nodemult);
563: nodemult[ddn->dim-1] = 1;
564: for(j=ddn->dim-2; j>=0; j--) {
565: nodemult[j] = nodemult[j+1]*(ddn->nodes[j+1]);
566: }
567: /* convert each coordinate in idxn to the matrix colum index */
568: PetscMalloc(n*sizeof(PetscInt), &matidxn);
569: for(i=0; i<n; i++) {
570: x = idxn[i].x; d = idxn[i].d;
571: idx = 0;
572: for(j=ddn->dim-1; j>=0; j--) {
573: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
574: if( ddn->periodic[j] ) { /* periodic wraps around */
575: x[j] += ddn->nodes[j];
576: } else { /* non-periodic get discarded */
577: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
578: goto endofloop_n;
579: }
580: }
581: if( x[j] >= ddn->nodes[j] ) { /* "right", "above", etc. of boundary */
582: if( ddn->periodic[j] ) { /* periodic wraps around */
583: x[j] -= ddn->nodes[j];
584: } else { /* non-periodic get discarded */
585: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
586: goto endofloop_n;
587: }
588: }
589: idx += x[j]*nodemult[j];
590: }
591: matidxn[i] = idx*(ddn->dof) + d;
592: endofloop_n:
593: ;
594: }
595: /* call original MatSetValues() */
596: MatSetValues(mat, m, matidxm, n, matidxn, v, addv);
597: /* clean up */
598: PetscFree(nodemult);
599: PetscFree(matidxm);
600: PetscFree(matidxn);
601: return(0);
602: }
606: PetscErrorCode DMADDASetParameters(DM dm,PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof,PetscBool *periodic)
607: {
609: PetscMPIInt rank,size;
610: MPI_Comm comm;
611: PetscInt i;
612: PetscInt nodes_total;
613: PetscInt nodesleft;
614: PetscInt procsleft;
615: DM_ADDA *dd = (DM_ADDA*)dm->data;
618: PetscObjectGetComm((PetscObject)dm,&comm);
619: MPI_Comm_size(comm,&size);
620: MPI_Comm_rank(comm,&rank);
622: /* total number of nodes */
623: nodes_total = 1;
624: for(i=0; i<dim; i++) nodes_total *= nodes[i];
625: dd->dim = dim;
626: dd->dof = dof;
627: dd->periodic = periodic;
629: PetscMalloc(dim*sizeof(PetscInt), &(dd->nodes));
630: PetscMemcpy(dd->nodes, nodes, dim*sizeof(PetscInt));
632: /* procs */
633: PetscMalloc(dim*sizeof(PetscInt), &(dd->procs));
634: /* create distribution of nodes to processors */
635: if(procs == PETSC_NULL) {
636: procs = dd->procs;
637: nodesleft = nodes_total;
638: procsleft = size;
639: /* figure out a good way to split the array to several processors */
640: for(i=0; i<dim; i++) {
641: if(i==dim-1) {
642: procs[i] = procsleft;
643: } else {
644: /* calculate best partition */
645: procs[i] = (PetscInt)(((double) nodes[i])*pow(((double) procsleft)/((double) nodesleft),1./((double)(dim-i)))+0.5);
646: if(procs[i]<1) procs[i]=1;
647: while( procs[i] > 0 ) {
648: if( procsleft % procs[i] )
649: procs[i]--;
650: else
651: break;
652: }
653: nodesleft /= nodes[i];
654: procsleft /= procs[i];
655: }
656: }
657: } else {
658: /* user provided the number of processors */
659: PetscMemcpy(dd->procs, procs, dim*sizeof(PetscInt));
660: }
661: return(0);
662: }
666: PetscErrorCode DMSetUp_ADDA(DM dm)
667: {
669: PetscInt s=1; /* stencil width, fixed to 1 at the moment */
670: PetscMPIInt rank,size;
671: PetscInt i;
672: PetscInt procsleft;
673: PetscInt procsdimi;
674: PetscInt ranki;
675: PetscInt rpq;
676: DM_ADDA *dd = (DM_ADDA*)dm->data;
677: MPI_Comm comm;
678: PetscInt *nodes,*procs,dim,dof;
679: PetscBool *periodic;
682: PetscObjectGetComm((PetscObject)dm,&comm);
683: MPI_Comm_size(comm,&size);
684: MPI_Comm_rank(comm,&rank);
685: procs = dd->procs;
686: nodes = dd->nodes;
687: dim = dd->dim;
688: dof = dd->dof;
689: periodic = dd->periodic;
691: /* check for validity */
692: procsleft = 1;
693: for(i=0; i<dim; i++) {
694: if (nodes[i] < procs[i]) SETERRQ3(comm,PETSC_ERR_ARG_OUTOFRANGE,"Partition in direction %d is too fine! %D nodes, %D processors", i, nodes[i], procs[i]);
695: procsleft *= procs[i];
696: }
697: if (procsleft != size) SETERRQ(comm,PETSC_ERR_PLIB, "Created or was provided with inconsistent distribution of processors");
699:
700: /* find out local region */
701: PetscMalloc(dim*sizeof(PetscInt), &(dd->lcs));
702: PetscMalloc(dim*sizeof(PetscInt), &(dd->lce));
703: procsdimi=size;
704: ranki=rank;
705: for(i=0; i<dim; i++) {
706: /* What is the number of processor for dimensions i+1, ..., dim-1? */
707: procsdimi /= procs[i];
708: /* these are all nodes that come before our region */
709: rpq = ranki / procsdimi;
710: dd->lcs[i] = rpq * (nodes[i]/procs[i]);
711: if( rpq + 1 < procs[i] ) {
712: dd->lce[i] = (rpq + 1) * (nodes[i]/procs[i]);
713: } else {
714: /* last one gets all the rest */
715: dd->lce[i] = nodes[i];
716: }
717: ranki = ranki - rpq*procsdimi;
718: }
719:
720: /* compute local size */
721: dd->lsize=1;
722: for(i=0; i<dim; i++) {
723: dd->lsize *= (dd->lce[i]-dd->lcs[i]);
724: }
725: dd->lsize *= dof;
727: /* find out ghost points */
728: PetscMalloc(dim*sizeof(PetscInt), &(dd->lgs));
729: PetscMalloc(dim*sizeof(PetscInt), &(dd->lge));
730: for(i=0; i<dim; i++) {
731: if( periodic[i] ) {
732: dd->lgs[i] = dd->lcs[i] - s;
733: dd->lge[i] = dd->lce[i] + s;
734: } else {
735: dd->lgs[i] = PetscMax(dd->lcs[i] - s, 0);
736: dd->lge[i] = PetscMin(dd->lce[i] + s, nodes[i]);
737: }
738: }
739:
740: /* compute local size with ghost points */
741: dd->lgsize=1;
742: for(i=0; i<dim; i++) {
743: dd->lgsize *= (dd->lge[i]-dd->lgs[i]);
744: }
745: dd->lgsize *= dof;
747: /* create global and local prototype vector */
748: VecCreateMPIWithArray(comm,dd->dof,dd->lsize,PETSC_DECIDE,0,&(dd->global));
749: #if ADDA_NEEDS_LOCAL_VECTOR
750: /* local includes ghost points */
751: VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->lgsize,0,&(dd->local));
752: #endif
754: PetscMalloc(dim*sizeof(PetscInt), &(dd->refine));
755: for(i=0; i<dim; i++) dd->refine[i] = 3;
756: dd->dofrefine = 1;
757: return(0);
758: }
760: EXTERN_C_BEGIN
763: PetscErrorCode DMCreate_ADDA(DM dm)
764: {
766: DM_ADDA *dd;
769: PetscNewLog(dm,DM_ADDA,&dd);
770: dm->data = (void*)dd;
772: dm->ops->view = DMView;
773: dm->ops->createglobalvector = DMCreateGlobalVector_ADDA;
774: dm->ops->getcoloring = DMCreateColoring_ADDA;
775: dm->ops->creatematrix = DMCreateMatrix_ADDA;
776: dm->ops->createinterpolation = DMCreateInterpolation_ADDA;
777: dm->ops->refine = DMRefine_ADDA;
778: dm->ops->coarsen = DMCoarsen_ADDA;
779: dm->ops->getinjection = DMCreateInjection_ADDA;
780: dm->ops->getaggregates = DMCreateAggregates_ADDA;
781: dm->ops->setup = DMSetUp_ADDA;
782: dm->ops->destroy = DMDestroy_ADDA;
783: return(0);
784: }
785: EXTERN_C_END
790: /*@C
791: DMADDACreate - Creates and ADDA object that translate between coordinates
792: in a geometric grid of arbitrary dimension and data in a PETSc vector
793: distributed on several processors.
795: Collective on MPI_Comm
797: Input Parameters:
798: + comm - MPI communicator
799: . dim - the dimension of the grid
800: . nodes - array with d entries that give the number of nodes in each dimension
801: . procs - array with d entries that give the number of processors in each dimension
802: (or PETSC_NULL if to be determined automatically)
803: . dof - number of degrees of freedom per node
804: - periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
806: Output Parameters:
807: . adda - pointer to ADDA data structure that is created
809: Level: intermediate
811: @*/
812: PetscErrorCode DMADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof, PetscBool *periodic,DM *dm_p)
813: {
815:
817: DMCreate(comm,dm_p);
818: DMSetType(*dm_p,DMADDA);
819: DMADDASetParameters(*dm_p,dim,nodes,procs,dof,periodic);
820: DMSetUp(*dm_p);
821: return(0);
822: }