Actual source code: ml.c
2: /*
3: Provides an interface to the ML smoothed Aggregation
4: Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5: Jed Brown, see [PETSC #18321, #18449].
6: */
7: #include <petsc/private/pcimpl.h>
8: #include <petsc/private/pcmgimpl.h>
9: #include <../src/mat/impls/aij/seq/aij.h>
10: #include <../src/mat/impls/aij/mpi/mpiaij.h>
11: #include <petscdm.h>
13: EXTERN_C_BEGIN
14: /* HAVE_CONFIG_H flag is required by ML include files */
15: #if !defined(HAVE_CONFIG_H)
16: #define HAVE_CONFIG_H
17: #endif
18: #include <ml_include.h>
19: #include <ml_viz_stats.h>
20: EXTERN_C_END
22: typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23: static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
25: /* The context (data structure) at each grid level */
26: typedef struct {
27: Vec x,b,r; /* global vectors */
28: Mat A,P,R;
29: KSP ksp;
30: Vec coords; /* projected by ML, if PCSetCoordinates is called; values packed by node */
31: } GridCtx;
33: /* The context used to input PETSc matrix into ML at fine grid */
34: typedef struct {
35: Mat A; /* Petsc matrix in aij format */
36: Mat Aloc; /* local portion of A to be used by ML */
37: Vec x,y;
38: ML_Operator *mlmat;
39: PetscScalar *pwork; /* tmp array used by PetscML_comm() */
40: } FineGridCtx;
42: /* The context associates a ML matrix with a PETSc shell matrix */
43: typedef struct {
44: Mat A; /* PETSc shell matrix associated with mlmat */
45: ML_Operator *mlmat; /* ML matrix assorciated with A */
46: } Mat_MLShell;
48: /* Private context for the ML preconditioner */
49: typedef struct {
50: ML *ml_object;
51: ML_Aggregate *agg_object;
52: GridCtx *gridctx;
53: FineGridCtx *PetscMLdata;
54: PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
55: PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
56: PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
57: PetscBool reuse_interpolation;
58: PCMLNullSpaceType nulltype;
59: PetscMPIInt size; /* size of communicator for pc->pmat */
60: PetscInt dim; /* data from PCSetCoordinates(_ML) */
61: PetscInt nloc;
62: PetscReal *coords; /* ML has a grid object for each level: the finest grid will point into coords */
63: } PC_ML;
65: static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
66: {
68: PetscInt m,i,j,k=0,row,*aj;
69: PetscScalar *aa;
70: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
71: Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data;
73: MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
74: for (i = 0; i<N_requested_rows; i++) {
75: row = requested_rows[i];
76: row_lengths[i] = a->ilen[row];
77: if (allocated_space < k+row_lengths[i]) return(0);
78: if ((row >= 0) || (row <= (m-1))) {
79: aj = a->j + a->i[row];
80: aa = a->a + a->i[row];
81: for (j=0; j<row_lengths[i]; j++) {
82: columns[k] = aj[j];
83: values[k++] = aa[j];
84: }
85: }
86: }
87: return(1);
88: }
90: static PetscErrorCode PetscML_comm(double p[],void *ML_data)
91: {
92: PetscErrorCode ierr;
93: FineGridCtx *ml = (FineGridCtx*)ML_data;
94: Mat A = ml->A;
95: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
96: PetscMPIInt size;
97: PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
98: const PetscScalar *array;
101: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
102: if (size == 1) return(0);
104: VecPlaceArray(ml->y,p);
105: VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
106: VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
107: VecResetArray(ml->y);
108: VecGetArrayRead(a->lvec,&array);
109: for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
110: VecRestoreArrayRead(a->lvec,&array);
111: return(0);
112: }
114: static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
115: {
117: FineGridCtx *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
118: Mat A = ml->A, Aloc=ml->Aloc;
119: PetscMPIInt size;
120: PetscScalar *pwork=ml->pwork;
121: PetscInt i;
124: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
125: if (size == 1) {
126: VecPlaceArray(ml->x,p);
127: } else {
128: for (i=0; i<in_length; i++) pwork[i] = p[i];
129: PetscML_comm(pwork,ml);
130: VecPlaceArray(ml->x,pwork);
131: }
132: VecPlaceArray(ml->y,ap);
133: MatMult(Aloc,ml->x,ml->y);
134: VecResetArray(ml->x);
135: VecResetArray(ml->y);
136: return(0);
137: }
139: static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
140: {
141: PetscErrorCode ierr;
142: Mat_MLShell *shell;
143: PetscScalar *yarray;
144: const PetscScalar *xarray;
145: PetscInt x_length,y_length;
148: MatShellGetContext(A,(void**)&shell);
149: VecGetArrayRead(x,&xarray);
150: VecGetArray(y,&yarray);
151: x_length = shell->mlmat->invec_leng;
152: y_length = shell->mlmat->outvec_leng;
153: PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
154: VecRestoreArrayRead(x,&xarray);
155: VecRestoreArray(y,&yarray);
156: return(0);
157: }
159: /* newtype is ignored since only handles one case */
160: static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
161: {
163: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
164: Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
165: PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
166: PetscScalar *aa,*ba,*ca;
167: PetscInt am =A->rmap->n,an=A->cmap->n,i,j,k;
168: PetscInt *ci,*cj,ncols;
171: if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
172: MatSeqAIJGetArrayRead(mpimat->A,(const PetscScalar**)&aa);
173: MatSeqAIJGetArrayRead(mpimat->B,(const PetscScalar**)&ba);
174: if (scall == MAT_INITIAL_MATRIX) {
175: PetscMalloc1(1+am,&ci);
176: ci[0] = 0;
177: for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
178: PetscMalloc1(1+ci[am],&cj);
179: PetscMalloc1(1+ci[am],&ca);
181: k = 0;
182: for (i=0; i<am; i++) {
183: /* diagonal portion of A */
184: ncols = ai[i+1] - ai[i];
185: for (j=0; j<ncols; j++) {
186: cj[k] = *aj++;
187: ca[k++] = *aa++;
188: }
189: /* off-diagonal portion of A */
190: ncols = bi[i+1] - bi[i];
191: for (j=0; j<ncols; j++) {
192: cj[k] = an + (*bj); bj++;
193: ca[k++] = *ba++;
194: }
195: }
196: if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
198: /* put together the new matrix */
199: an = mpimat->A->cmap->n+mpimat->B->cmap->n;
200: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);
202: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
203: /* Since these are PETSc arrays, change flags to free them as necessary. */
204: mat = (Mat_SeqAIJ*)(*Aloc)->data;
205: mat->free_a = PETSC_TRUE;
206: mat->free_ij = PETSC_TRUE;
208: mat->nonew = 0;
209: } else if (scall == MAT_REUSE_MATRIX) {
210: mat=(Mat_SeqAIJ*)(*Aloc)->data;
211: ci = mat->i; cj = mat->j; ca = mat->a;
212: for (i=0; i<am; i++) {
213: /* diagonal portion of A */
214: ncols = ai[i+1] - ai[i];
215: for (j=0; j<ncols; j++) *ca++ = *aa++;
216: /* off-diagonal portion of A */
217: ncols = bi[i+1] - bi[i];
218: for (j=0; j<ncols; j++) *ca++ = *ba++;
219: }
220: } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
221: MatSeqAIJRestoreArrayRead(mpimat->A,(const PetscScalar**)&aa);
222: MatSeqAIJRestoreArrayRead(mpimat->B,(const PetscScalar**)&ba);
223: return(0);
224: }
226: static PetscErrorCode MatDestroy_ML(Mat A)
227: {
229: Mat_MLShell *shell;
232: MatShellGetContext(A,(void**)&shell);
233: PetscFree(shell);
234: return(0);
235: }
237: static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
238: {
239: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
240: PetscErrorCode ierr;
241: PetscInt m =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
242: PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
243: PetscScalar *ml_vals=matdata->values,*aa;
246: if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
247: if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
248: if (reuse) {
249: Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
250: aij->i = ml_rowptr;
251: aij->j = ml_cols;
252: aij->a = ml_vals;
253: } else {
254: /* sort ml_cols and ml_vals */
255: PetscMalloc1(m+1,&nnz);
256: for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
257: aj = ml_cols; aa = ml_vals;
258: for (i=0; i<m; i++) {
259: PetscSortIntWithScalarArray(nnz[i],aj,aa);
260: aj += nnz[i]; aa += nnz[i];
261: }
262: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);
263: PetscFree(nnz);
264: }
265: MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
266: MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);
267: return(0);
268: }
270: nz_max = PetscMax(1,mlmat->max_nz_per_row);
271: PetscMalloc2(nz_max,&aa,nz_max,&aj);
272: if (!reuse) {
273: MatCreate(PETSC_COMM_SELF,newmat);
274: MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
275: MatSetType(*newmat,MATSEQAIJ);
276: /* keep track of block size for A matrices */
277: MatSetBlockSize (*newmat, mlmat->num_PDEs);
279: PetscMalloc1(m,&nnz);
280: for (i=0; i<m; i++) {
281: PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
282: }
283: MatSeqAIJSetPreallocation(*newmat,0,nnz);
284: }
285: for (i=0; i<m; i++) {
286: PetscInt ncols;
288: PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
289: MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);
290: }
291: MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
292: MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);
294: PetscFree2(aa,aj);
295: PetscFree(nnz);
296: return(0);
297: }
299: static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
300: {
302: PetscInt m,n;
303: ML_Comm *MLcomm;
304: Mat_MLShell *shellctx;
307: m = mlmat->outvec_leng;
308: n = mlmat->invec_leng;
310: if (reuse) {
311: MatShellGetContext(*newmat,(void**)&shellctx);
312: shellctx->mlmat = mlmat;
313: return(0);
314: }
316: MLcomm = mlmat->comm;
318: PetscNew(&shellctx);
319: MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
320: MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
321: MatShellSetOperation(*newmat,MATOP_DESTROY,(void(*)(void))MatDestroy_ML);
323: shellctx->A = *newmat;
324: shellctx->mlmat = mlmat;
325: return(0);
326: }
328: static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
329: {
330: PetscInt *aj;
331: PetscScalar *aa;
333: PetscInt i,j,*gordering;
334: PetscInt m=mlmat->outvec_leng,n,nz_max,row;
335: Mat A;
338: if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
339: n = mlmat->invec_leng;
340: if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
342: /* create global row numbering for a ML_Operator */
343: PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
345: nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
346: PetscMalloc2(nz_max,&aa,nz_max,&aj);
347: if (reuse) {
348: A = *newmat;
349: } else {
350: PetscInt *nnzA,*nnzB,*nnz;
351: PetscInt rstart;
352: MatCreate(mlmat->comm->USR_comm,&A);
353: MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
354: MatSetType(A,MATMPIAIJ);
355: /* keep track of block size for A matrices */
356: MatSetBlockSize (A,mlmat->num_PDEs);
357: PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);
358: MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);
359: rstart -= m;
361: for (i=0; i<m; i++) {
362: row = gordering[i] - rstart;
363: PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
364: nnzA[row] = 0;
365: for (j=0; j<nnz[i]; j++) {
366: if (aj[j] < m) nnzA[row]++;
367: }
368: nnzB[row] = nnz[i] - nnzA[row];
369: }
370: MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);
371: PetscFree3(nnzA,nnzB,nnz);
372: }
373: for (i=0; i<m; i++) {
374: PetscInt ncols;
375: row = gordering[i];
377: PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
378: for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
379: MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);
380: }
381: PetscStackCall("ML_free",ML_free(gordering));
382: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
383: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
384: *newmat = A;
386: PetscFree2(aa,aj);
387: return(0);
388: }
390: /* -------------------------------------------------------------------------- */
391: /*
392: PCSetCoordinates_ML
394: Input Parameter:
395: . pc - the preconditioner context
396: */
397: static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
398: {
399: PC_MG *mg = (PC_MG*)pc->data;
400: PC_ML *pc_ml = (PC_ML*)mg->innerctx;
402: PetscInt arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend,aloc;
403: Mat Amat = pc->pmat;
405: /* this function copied and modified from PCSetCoordinates_GEO -TGI */
408: MatGetBlockSize(Amat, &bs);
410: MatGetOwnershipRange(Amat, &my0, &Iend);
411: aloc = (Iend-my0);
412: nloc = (Iend-my0)/bs;
414: if (nloc!=a_nloc && aloc != a_nloc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Number of local blocks %D must be %D or %D.",a_nloc,nloc,aloc);
416: oldarrsz = pc_ml->dim * pc_ml->nloc;
417: pc_ml->dim = ndm;
418: pc_ml->nloc = nloc;
419: arrsz = ndm * nloc;
421: /* create data - syntactic sugar that should be refactored at some point */
422: if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
423: PetscFree(pc_ml->coords);
424: PetscMalloc1(arrsz, &pc_ml->coords);
425: }
426: for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
427: /* copy data in - column oriented */
428: if (nloc == a_nloc) {
429: for (kk = 0; kk < nloc; kk++) {
430: for (ii = 0; ii < ndm; ii++) {
431: pc_ml->coords[ii*nloc + kk] = coords[kk*ndm + ii];
432: }
433: }
434: } else { /* assumes the coordinates are blocked */
435: for (kk = 0; kk < nloc; kk++) {
436: for (ii = 0; ii < ndm; ii++) {
437: pc_ml->coords[ii*nloc + kk] = coords[bs*kk*ndm + ii];
438: }
439: }
440: }
441: return(0);
442: }
444: /* -----------------------------------------------------------------------------*/
445: extern PetscErrorCode PCReset_MG(PC);
446: PetscErrorCode PCReset_ML(PC pc)
447: {
449: PC_MG *mg = (PC_MG*)pc->data;
450: PC_ML *pc_ml = (PC_ML*)mg->innerctx;
451: PetscInt level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
454: if (dim) {
455: for (level=0; level<=fine_level; level++) {
456: VecDestroy(&pc_ml->gridctx[level].coords);
457: }
458: if (pc_ml->ml_object && pc_ml->ml_object->Grid) {
459: ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
460: grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
461: grid_info->y = 0;
462: grid_info->z = 0;
463: PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
464: }
465: }
466: PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
467: PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
469: if (pc_ml->PetscMLdata) {
470: PetscFree(pc_ml->PetscMLdata->pwork);
471: MatDestroy(&pc_ml->PetscMLdata->Aloc);
472: VecDestroy(&pc_ml->PetscMLdata->x);
473: VecDestroy(&pc_ml->PetscMLdata->y);
474: }
475: PetscFree(pc_ml->PetscMLdata);
477: if (pc_ml->gridctx) {
478: for (level=0; level<fine_level; level++) {
479: if (pc_ml->gridctx[level].A) {MatDestroy(&pc_ml->gridctx[level].A);}
480: if (pc_ml->gridctx[level].P) {MatDestroy(&pc_ml->gridctx[level].P);}
481: if (pc_ml->gridctx[level].R) {MatDestroy(&pc_ml->gridctx[level].R);}
482: if (pc_ml->gridctx[level].x) {VecDestroy(&pc_ml->gridctx[level].x);}
483: if (pc_ml->gridctx[level].b) {VecDestroy(&pc_ml->gridctx[level].b);}
484: if (pc_ml->gridctx[level+1].r) {VecDestroy(&pc_ml->gridctx[level+1].r);}
485: }
486: }
487: PetscFree(pc_ml->gridctx);
488: PetscFree(pc_ml->coords);
490: pc_ml->dim = 0;
491: pc_ml->nloc = 0;
492: PCReset_MG(pc);
493: return(0);
494: }
495: /* -------------------------------------------------------------------------- */
496: /*
497: PCSetUp_ML - Prepares for the use of the ML preconditioner
498: by setting data structures and options.
500: Input Parameter:
501: . pc - the preconditioner context
503: Application Interface Routine: PCSetUp()
505: Notes:
506: The interface routine PCSetUp() is not usually called directly by
507: the user, but instead is called by PCApply() if necessary.
508: */
509: extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
510: extern PetscErrorCode PCReset_MG(PC);
512: PetscErrorCode PCSetUp_ML(PC pc)
513: {
514: PetscErrorCode ierr;
515: PetscMPIInt size;
516: FineGridCtx *PetscMLdata;
517: ML *ml_object;
518: ML_Aggregate *agg_object;
519: ML_Operator *mlmat;
520: PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
521: Mat A,Aloc;
522: GridCtx *gridctx;
523: PC_MG *mg = (PC_MG*)pc->data;
524: PC_ML *pc_ml = (PC_ML*)mg->innerctx;
525: PetscBool isSeq, isMPI;
526: KSP smoother;
527: PC subpc;
528: PetscInt mesh_level, old_mesh_level;
529: MatInfo info;
530: static PetscBool cite = PETSC_FALSE;
533: PetscCitationsRegister("@TechReport{ml_users_guide,\n author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n institution = {Sandia National Laboratories},\n number = {SAND2004-4821},\n year = 2004\n}\n",&cite);
534: A = pc->pmat;
535: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
537: if (pc->setupcalled) {
538: if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
539: /*
540: Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
541: multiple solves in which the matrix is not changing too quickly.
542: */
543: ml_object = pc_ml->ml_object;
544: gridctx = pc_ml->gridctx;
545: Nlevels = pc_ml->Nlevels;
546: fine_level = Nlevels - 1;
547: gridctx[fine_level].A = A;
549: PetscObjectBaseTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
550: PetscObjectBaseTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
551: if (isMPI) {
552: MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);
553: } else if (isSeq) {
554: Aloc = A;
555: PetscObjectReference((PetscObject)Aloc);
556: } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
558: MatGetSize(Aloc,&m,&nlocal_allcols);
559: PetscMLdata = pc_ml->PetscMLdata;
560: MatDestroy(&PetscMLdata->Aloc);
561: PetscMLdata->A = A;
562: PetscMLdata->Aloc = Aloc;
563: PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
564: PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
566: mesh_level = ml_object->ML_finest_level;
567: while (ml_object->SingleLevel[mesh_level].Rmat->to) {
568: old_mesh_level = mesh_level;
569: mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
571: /* clean and regenerate A */
572: mlmat = &(ml_object->Amat[mesh_level]);
573: PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
574: PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
575: PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
576: }
578: level = fine_level - 1;
579: if (size == 1) { /* convert ML P, R and A into seqaij format */
580: for (mllevel=1; mllevel<Nlevels; mllevel++) {
581: mlmat = &(ml_object->Amat[mllevel]);
582: MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);
583: level--;
584: }
585: } else { /* convert ML P and R into shell format, ML A into mpiaij format */
586: for (mllevel=1; mllevel<Nlevels; mllevel++) {
587: mlmat = &(ml_object->Amat[mllevel]);
588: MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);
589: level--;
590: }
591: }
593: for (level=0; level<fine_level; level++) {
594: if (level > 0) {
595: PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);
596: }
597: KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);
598: }
599: PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);
600: KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);
602: PCSetUp_MG(pc);
603: return(0);
604: } else {
605: /* since ML can change the size of vectors/matrices at any level we must destroy everything */
606: PCReset_ML(pc);
607: }
608: }
610: /* setup special features of PCML */
611: /*--------------------------------*/
612: /* covert A to Aloc to be used by ML at fine grid */
613: pc_ml->size = size;
614: PetscObjectBaseTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
615: PetscObjectBaseTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
616: if (isMPI) {
617: MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);
618: } else if (isSeq) {
619: Aloc = A;
620: PetscObjectReference((PetscObject)Aloc);
621: } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
623: /* create and initialize struct 'PetscMLdata' */
624: PetscNewLog(pc,&PetscMLdata);
625: pc_ml->PetscMLdata = PetscMLdata;
626: PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);
628: MatCreateVecs(Aloc,&PetscMLdata->x,&PetscMLdata->y);
630: PetscMLdata->A = A;
631: PetscMLdata->Aloc = Aloc;
632: if (pc_ml->dim) { /* create vecs around the coordinate data given */
633: PetscInt i,j,dim=pc_ml->dim;
634: PetscInt nloc = pc_ml->nloc,nlocghost;
635: PetscReal *ghostedcoords;
637: MatGetBlockSize(A,&bs);
638: nlocghost = Aloc->cmap->n / bs;
639: PetscMalloc1(dim*nlocghost,&ghostedcoords);
640: for (i = 0; i < dim; i++) {
641: /* copy coordinate values into first component of pwork */
642: for (j = 0; j < nloc; j++) {
643: PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
644: }
645: /* get the ghost values */
646: PetscML_comm(PetscMLdata->pwork,PetscMLdata);
647: /* write into the vector */
648: for (j = 0; j < nlocghost; j++) {
649: ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
650: }
651: }
652: /* replace the original coords with the ghosted coords, because these are
653: * what ML needs */
654: PetscFree(pc_ml->coords);
655: pc_ml->coords = ghostedcoords;
656: }
658: /* create ML discretization matrix at fine grid */
659: /* ML requires input of fine-grid matrix. It determines nlevels. */
660: MatGetSize(Aloc,&m,&nlocal_allcols);
661: MatGetBlockSize(A,&bs);
662: PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
663: PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
664: pc_ml->ml_object = ml_object;
665: PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
666: PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
667: PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
669: PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
671: /* aggregation */
672: PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
673: pc_ml->agg_object = agg_object;
675: {
676: MatNullSpace mnull;
677: MatGetNearNullSpace(A,&mnull);
678: if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
679: if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
680: else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
681: else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
682: }
683: switch (pc_ml->nulltype) {
684: case PCML_NULLSPACE_USER: {
685: PetscScalar *nullvec;
686: const PetscScalar *v;
687: PetscBool has_const;
688: PetscInt i,j,mlocal,nvec,M;
689: const Vec *vecs;
691: if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
692: MatGetSize(A,&M,NULL);
693: MatGetLocalSize(Aloc,&mlocal,NULL);
694: MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);
695: PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);
696: if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
697: for (i=0; i<nvec; i++) {
698: VecGetArrayRead(vecs[i],&v);
699: for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
700: VecRestoreArrayRead(vecs[i],&v);
701: }
702: PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
703: PetscFree(nullvec);
704: } break;
705: case PCML_NULLSPACE_BLOCK:
706: PetscStackCall("ML_Aggregate_Set_NullSpace",ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
707: break;
708: case PCML_NULLSPACE_SCALAR:
709: break;
710: default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
711: }
712: }
713: PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
714: /* set options */
715: switch (pc_ml->CoarsenScheme) {
716: case 1:
717: PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
718: case 2:
719: PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
720: case 3:
721: PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
722: }
723: PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
724: PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
725: if (pc_ml->SpectralNormScheme_Anorm) {
726: PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
727: }
728: agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo;
729: agg_object->keep_P_tentative = (int)pc_ml->Reusable;
730: agg_object->block_scaled_SA = (int)pc_ml->BlockScaling;
731: agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization;
732: agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
733: agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap;
735: if (pc_ml->Aux) {
736: if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
737: ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
738: ml_object->Amat[0].aux_data->enable = 1;
739: ml_object->Amat[0].aux_data->max_level = 10;
740: ml_object->Amat[0].num_PDEs = bs;
741: }
743: MatGetInfo(A,MAT_LOCAL,&info);
744: ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
746: if (pc_ml->dim) {
747: PetscInt i,dim = pc_ml->dim;
748: ML_Aggregate_Viz_Stats *grid_info;
749: PetscInt nlocghost;
751: MatGetBlockSize(A,&bs);
752: nlocghost = Aloc->cmap->n / bs;
754: PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
755: grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
756: for (i = 0; i < dim; i++) {
757: /* set the finest level coordinates to point to the column-order array
758: * in pc_ml */
759: /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
760: switch (i) {
761: case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
762: case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
763: case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
764: default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
765: }
766: }
767: grid_info->Ndim = dim;
768: }
770: /* repartitioning */
771: if (pc_ml->Repartition) {
772: PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
773: PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
774: PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
775: PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
776: #if 0 /* Function not yet defined in ml-6.2 */
777: /* I'm not sure what compatibility issues might crop up if we partitioned
778: * on the finest level, so to be safe repartition starts on the next
779: * finest level (reflection default behavior in
780: * ml_MultiLevelPreconditioner) */
781: PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
782: #endif
784: if (!pc_ml->RepartitionType) {
785: PetscInt i;
787: if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
788: PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
789: PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
791: for (i = 0; i < ml_object->ML_num_levels; i++) {
792: ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
793: grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
794: /* defaults from ml_agg_info.c */
795: grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
796: grid_info->zoltan_timers = 0;
797: grid_info->smoothing_steps = 4; /* only relevant to hypergraph / fast hypergraph */
798: }
799: } else {
800: PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
801: }
802: }
804: if (pc_ml->OldHierarchy) {
805: PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
806: } else {
807: PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
808: }
809: if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
810: pc_ml->Nlevels = Nlevels;
811: fine_level = Nlevels - 1;
813: PCMGSetLevels(pc,Nlevels,NULL);
814: /* set default smoothers */
815: for (level=1; level<=fine_level; level++) {
816: PCMGGetSmoother(pc,level,&smoother);
817: KSPSetType(smoother,KSPRICHARDSON);
818: KSPGetPC(smoother,&subpc);
819: PCSetType(subpc,PCSOR);
820: }
821: PetscObjectOptionsBegin((PetscObject)pc);
822: PCSetFromOptions_MG(PetscOptionsObject,pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
823: PetscOptionsEnd();
825: PetscMalloc1(Nlevels,&gridctx);
827: pc_ml->gridctx = gridctx;
829: /* wrap ML matrices by PETSc shell matrices at coarsened grids.
830: Level 0 is the finest grid for ML, but coarsest for PETSc! */
831: gridctx[fine_level].A = A;
833: level = fine_level - 1;
834: /* TODO: support for GPUs */
835: if (size == 1) { /* convert ML P, R and A into seqaij format */
836: for (mllevel=1; mllevel<Nlevels; mllevel++) {
837: mlmat = &(ml_object->Pmat[mllevel]);
838: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
839: mlmat = &(ml_object->Rmat[mllevel-1]);
840: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);
842: mlmat = &(ml_object->Amat[mllevel]);
843: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
844: level--;
845: }
846: } else { /* convert ML P and R into shell format, ML A into mpiaij format */
847: for (mllevel=1; mllevel<Nlevels; mllevel++) {
848: mlmat = &(ml_object->Pmat[mllevel]);
849: MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
850: mlmat = &(ml_object->Rmat[mllevel-1]);
851: MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);
853: mlmat = &(ml_object->Amat[mllevel]);
854: MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
855: level--;
856: }
857: }
859: /* create vectors and ksp at all levels */
860: for (level=0; level<fine_level; level++) {
861: level1 = level + 1;
863: MatCreateVecs(gridctx[level].A,&gridctx[level].x,&gridctx[level].b);
864: MatCreateVecs(gridctx[level1].A,NULL,&gridctx[level1].r);
865: PCMGSetX(pc,level,gridctx[level].x);
866: PCMGSetRhs(pc,level,gridctx[level].b);
867: PCMGSetR(pc,level1,gridctx[level1].r);
869: if (level == 0) {
870: PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
871: } else {
872: PCMGGetSmoother(pc,level,&gridctx[level].ksp);
873: }
874: }
875: PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);
877: /* create coarse level and the interpolation between the levels */
878: for (level=0; level<fine_level; level++) {
879: level1 = level + 1;
881: PCMGSetInterpolation(pc,level1,gridctx[level].P);
882: PCMGSetRestriction(pc,level1,gridctx[level].R);
883: if (level > 0) {
884: PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);
885: }
886: KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);
887: }
888: PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);
889: KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);
891: /* put coordinate info in levels */
892: if (pc_ml->dim) {
893: PetscInt i,j,dim = pc_ml->dim;
894: PetscInt bs, nloc;
895: PC subpc;
896: PetscReal *array;
898: level = fine_level;
899: for (mllevel = 0; mllevel < Nlevels; mllevel++) {
900: ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
901: MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm;
903: MatGetBlockSize (gridctx[level].A, &bs);
904: MatGetLocalSize (gridctx[level].A, NULL, &nloc);
905: nloc /= bs; /* number of local nodes */
907: VecCreate(comm,&gridctx[level].coords);
908: VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);
909: VecSetType(gridctx[level].coords,VECMPI);
910: VecGetArray(gridctx[level].coords,&array);
911: for (j = 0; j < nloc; j++) {
912: for (i = 0; i < dim; i++) {
913: switch (i) {
914: case 0: array[dim * j + i] = grid_info->x[j]; break;
915: case 1: array[dim * j + i] = grid_info->y[j]; break;
916: case 2: array[dim * j + i] = grid_info->z[j]; break;
917: default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
918: }
919: }
920: }
922: /* passing coordinates to smoothers/coarse solver, should they need them */
923: KSPGetPC(gridctx[level].ksp,&subpc);
924: PCSetCoordinates(subpc,dim,nloc,array);
925: VecRestoreArray(gridctx[level].coords,&array);
926: level--;
927: }
928: }
930: /* setupcalled is set to 0 so that MG is setup from scratch */
931: pc->setupcalled = 0;
932: PCSetUp_MG(pc);
933: return(0);
934: }
936: /* -------------------------------------------------------------------------- */
937: /*
938: PCDestroy_ML - Destroys the private context for the ML preconditioner
939: that was created with PCCreate_ML().
941: Input Parameter:
942: . pc - the preconditioner context
944: Application Interface Routine: PCDestroy()
945: */
946: PetscErrorCode PCDestroy_ML(PC pc)
947: {
949: PC_MG *mg = (PC_MG*)pc->data;
950: PC_ML *pc_ml= (PC_ML*)mg->innerctx;
953: PCReset_ML(pc);
954: PetscFree(pc_ml);
955: PCDestroy_MG(pc);
956: PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);
957: return(0);
958: }
960: PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
961: {
963: PetscInt indx,PrintLevel,partindx;
964: const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
965: const char *part[] = {"Zoltan","ParMETIS"};
966: #if defined(HAVE_ML_ZOLTAN)
967: const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
968: #endif
969: PC_MG *mg = (PC_MG*)pc->data;
970: PC_ML *pc_ml = (PC_ML*)mg->innerctx;
971: PetscMPIInt size;
972: MPI_Comm comm;
975: PetscObjectGetComm((PetscObject)pc,&comm);
976: MPI_Comm_size(comm,&size);
977: PetscOptionsHead(PetscOptionsObject,"ML options");
979: PrintLevel = 0;
980: indx = 0;
981: partindx = 0;
983: PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);
984: PetscStackCall("ML_Set_PrintLevel",ML_Set_PrintLevel(PrintLevel));
985: PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);
986: PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);
987: PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);
989: pc_ml->CoarsenScheme = indx;
991: PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);
992: PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);
993: PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);
994: PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);
995: PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);
996: PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);
997: PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);
998: PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);
999: /*
1000: The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over.
1001: This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1003: We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1004: combination of options and ML's exit(1) explanations don't help matters.
1005: */
1006: if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1007: if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1008: if (pc_ml->EnergyMinimization == 4) {PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");}
1009: if (pc_ml->EnergyMinimization) {
1010: PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);
1011: }
1012: if (pc_ml->EnergyMinimization == 2) {
1013: /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1014: PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);
1015: }
1016: /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1017: if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1018: PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);
1019: /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1020: if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1021: PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);
1022: /*
1023: ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls
1024: ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1025: ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the
1026: functionality in the old function, so some users may still want to use it. Note that many options are ignored in
1027: this context, but ML doesn't provide a way to find out which ones.
1028: */
1029: PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);
1030: PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);
1031: if (pc_ml->Repartition) {
1032: PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);
1033: PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);
1034: PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);
1035: #if defined(HAVE_ML_ZOLTAN)
1036: partindx = 0;
1037: PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);
1039: pc_ml->RepartitionType = partindx;
1040: if (!partindx) {
1041: PetscInt zindx = 0;
1043: PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);
1045: pc_ml->ZoltanScheme = zindx;
1046: }
1047: #else
1048: partindx = 1;
1049: PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);
1050: pc_ml->RepartitionType = partindx;
1051: if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1052: #endif
1053: PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);
1054: PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);
1055: }
1056: PetscOptionsTail();
1057: return(0);
1058: }
1060: /* -------------------------------------------------------------------------- */
1061: /*
1062: PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1063: and sets this as the private data within the generic preconditioning
1064: context, PC, that was created within PCCreate().
1066: Input Parameter:
1067: . pc - the preconditioner context
1069: Application Interface Routine: PCCreate()
1070: */
1072: /*MC
1073: PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1074: fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1075: operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1076: and the restriction/interpolation operators wrapped as PETSc shell matrices.
1078: Options Database Key:
1079: Multigrid options(inherited):
1080: + -pc_mg_cycles <1> - 1 for V cycle, 2 for W-cycle (MGSetCycles)
1081: . -pc_mg_distinct_smoothup - Should one configure the up and down smoothers separately (PCMGSetDistinctSmoothUp)
1082: - -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1084: ML options:
1085: + -pc_ml_PrintLevel <0> - Print level (ML_Set_PrintLevel)
1086: . -pc_ml_maxNlevels <10> - Maximum number of levels (None)
1087: . -pc_ml_maxCoarseSize <1> - Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1088: . -pc_ml_CoarsenScheme <Uncoupled> - (one of) Uncoupled Coupled MIS METIS
1089: . -pc_ml_DampingFactor <1.33333> - P damping factor (ML_Aggregate_Set_DampingFactor)
1090: . -pc_ml_Threshold <0> - Smoother drop tol (ML_Aggregate_Set_Threshold)
1091: . -pc_ml_SpectralNormScheme_Anorm <false> - Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1092: . -pc_ml_repartition <false> - Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1093: . -pc_ml_repartitionMaxMinRatio <1.3> - Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1094: . -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1095: . -pc_ml_repartitionPutOnSingleProc <5000> - Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1096: . -pc_ml_repartitionType <Zoltan> - Repartitioning library to use (ML_Repartition_Set_Partitioner)
1097: . -pc_ml_repartitionZoltanScheme <RCB> - Repartitioning scheme to use (None)
1098: . -pc_ml_Aux <false> - Aggregate using auxiliary coordinate-based laplacian (None)
1099: - -pc_ml_AuxThreshold <0.0> - Auxiliary smoother drop tol (None)
1101: Level: intermediate
1103: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1104: PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetDistinctSmoothUp(),
1105: PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1106: PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1107: PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1108: M*/
1110: PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1111: {
1113: PC_ML *pc_ml;
1114: PC_MG *mg;
1117: /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1118: PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */
1119: PetscObjectChangeTypeName((PetscObject)pc,PCML);
1120: /* Since PCMG tries to use DM assocated with PC must delete it */
1121: DMDestroy(&pc->dm);
1122: PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);
1123: mg = (PC_MG*)pc->data;
1125: /* create a supporting struct and attach it to pc */
1126: PetscNewLog(pc,&pc_ml);
1127: mg->innerctx = pc_ml;
1129: pc_ml->ml_object = 0;
1130: pc_ml->agg_object = 0;
1131: pc_ml->gridctx = 0;
1132: pc_ml->PetscMLdata = 0;
1133: pc_ml->Nlevels = -1;
1134: pc_ml->MaxNlevels = 10;
1135: pc_ml->MaxCoarseSize = 1;
1136: pc_ml->CoarsenScheme = 1;
1137: pc_ml->Threshold = 0.0;
1138: pc_ml->DampingFactor = 4.0/3.0;
1139: pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1140: pc_ml->size = 0;
1141: pc_ml->dim = 0;
1142: pc_ml->nloc = 0;
1143: pc_ml->coords = 0;
1144: pc_ml->Repartition = PETSC_FALSE;
1145: pc_ml->MaxMinRatio = 1.3;
1146: pc_ml->MinPerProc = 512;
1147: pc_ml->PutOnSingleProc = 5000;
1148: pc_ml->RepartitionType = 0;
1149: pc_ml->ZoltanScheme = 0;
1150: pc_ml->Aux = PETSC_FALSE;
1151: pc_ml->AuxThreshold = 0.0;
1153: /* allow for coordinates to be passed */
1154: PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);
1156: /* overwrite the pointers of PCMG by the functions of PCML */
1157: pc->ops->setfromoptions = PCSetFromOptions_ML;
1158: pc->ops->setup = PCSetUp_ML;
1159: pc->ops->reset = PCReset_ML;
1160: pc->ops->destroy = PCDestroy_ML;
1161: return(0);
1162: }