Actual source code: ml.c

petsc-3.3-p7 2013-05-11
  2: /* 
  3:    Provides an interface to the ML smoothed Aggregation
  4:    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
  5:                                     Jed Brown, see [PETSC #18321, #18449]. 
  6: */
  7: #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
  8: #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
  9: #include <../src/mat/impls/aij/seq/aij.h>
 10: #include <../src/mat/impls/aij/mpi/mpiaij.h>

 12: #include <math.h>
 13: EXTERN_C_BEGIN
 14: /* HAVE_CONFIG_H flag is required by ML include files */
 15: #if !defined(HAVE_CONFIG_H)
 16: #define HAVE_CONFIG_H
 17: #endif
 18: #include <ml_include.h>
 19: EXTERN_C_END

 21: typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
 22: static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};

 24: /* The context (data structure) at each grid level */
 25: typedef struct {
 26:   Vec        x,b,r;           /* global vectors */
 27:   Mat        A,P,R;
 28:   KSP        ksp;
 29: } GridCtx;

 31: /* The context used to input PETSc matrix into ML at fine grid */
 32: typedef struct {
 33:   Mat          A;      /* Petsc matrix in aij format */
 34:   Mat          Aloc;   /* local portion of A to be used by ML */
 35:   Vec          x,y;
 36:   ML_Operator  *mlmat;
 37:   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
 38: } FineGridCtx;

 40: /* The context associates a ML matrix with a PETSc shell matrix */
 41: typedef struct {
 42:   Mat          A;       /* PETSc shell matrix associated with mlmat */
 43:   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
 44:   Vec          y, work;
 45: } Mat_MLShell;

 47: /* Private context for the ML preconditioner */
 48: typedef struct {
 49:   ML                *ml_object;
 50:   ML_Aggregate      *agg_object;
 51:   GridCtx           *gridctx;
 52:   FineGridCtx       *PetscMLdata;
 53:   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
 54:   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol;
 55:   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
 56:   PetscBool         reuse_interpolation;
 57:   PCMLNullSpaceType nulltype;
 58:   PetscMPIInt       size; /* size of communicator for pc->pmat */
 59: } PC_ML;

 63: static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
 64: {
 66:   PetscInt       m,i,j,k=0,row,*aj;
 67:   PetscScalar    *aa;
 68:   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
 69:   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;

 71:   MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
 72:   for (i = 0; i<N_requested_rows; i++) {
 73:     row   = requested_rows[i];
 74:     row_lengths[i] = a->ilen[row];
 75:     if (allocated_space < k+row_lengths[i]) return(0);
 76:     if ( (row >= 0) || (row <= (m-1)) ) {
 77:       aj = a->j + a->i[row];
 78:       aa = a->a + a->i[row];
 79:       for (j=0; j<row_lengths[i]; j++){
 80:         columns[k]  = aj[j];
 81:         values[k++] = aa[j];
 82:       }
 83:     }
 84:   }
 85:   return(1);
 86: }

 90: static PetscErrorCode PetscML_comm(double p[],void *ML_data)
 91: {
 93:   FineGridCtx    *ml=(FineGridCtx*)ML_data;
 94:   Mat            A=ml->A;
 95:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
 96:   PetscMPIInt    size;
 97:   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
 98:   PetscScalar    *array;

101:   MPI_Comm_size(((PetscObject)A)->comm,&size);
102:   if (size == 1) return 0;
103: 
104:   VecPlaceArray(ml->y,p);
105:   VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
106:   VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
107:   VecResetArray(ml->y);
108:   VecGetArray(a->lvec,&array);
109:   for (i=in_length; i<out_length; i++){
110:     p[i] = array[i-in_length];
111:   }
112:   VecRestoreArray(a->lvec,&array);
113:   return(0);
114: }

118: static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
119: {
121:   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
122:   Mat            A=ml->A, Aloc=ml->Aloc;
123:   PetscMPIInt    size;
124:   PetscScalar    *pwork=ml->pwork;
125:   PetscInt       i;

128:   MPI_Comm_size(((PetscObject)A)->comm,&size);
129:   if (size == 1){
130:     VecPlaceArray(ml->x,p);
131:   } else {
132:     for (i=0; i<in_length; i++) pwork[i] = p[i];
133:     PetscML_comm(pwork,ml);
134:     VecPlaceArray(ml->x,pwork);
135:   }
136:   VecPlaceArray(ml->y,ap);
137:   MatMult(Aloc,ml->x,ml->y);
138:   VecResetArray(ml->x);
139:   VecResetArray(ml->y);
140:   return(0);
141: }

145: static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
146: {
147:   PetscErrorCode   ierr;
148:   Mat_MLShell      *shell;
149:   PetscScalar      *xarray,*yarray;
150:   PetscInt         x_length,y_length;
151: 
153:   MatShellGetContext(A,(void **)&shell);
154:   VecGetArray(x,&xarray);
155:   VecGetArray(y,&yarray);
156:   x_length = shell->mlmat->invec_leng;
157:   y_length = shell->mlmat->outvec_leng;
158:   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
159:   VecRestoreArray(x,&xarray);
160:   VecRestoreArray(y,&yarray);
161:   return(0);
162: }

166: /* Computes y = w + A * x
167:    It is possible that w == y, but not x == y
168: */
169: static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
170: {
171:   Mat_MLShell   *shell;
172:   PetscScalar   *xarray,*yarray;
173:   PetscInt       x_length,y_length;
175: 
177:   MatShellGetContext(A, (void **) &shell);
178:   if (y == w) {
179:     if (!shell->work) {
180:       VecDuplicate(y, &shell->work);
181:     }
182:     VecGetArray(x,           &xarray);
183:     VecGetArray(shell->work, &yarray);
184:     x_length = shell->mlmat->invec_leng;
185:     y_length = shell->mlmat->outvec_leng;
186:     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
187:     VecRestoreArray(x,           &xarray);
188:     VecRestoreArray(shell->work, &yarray);
189:     VecAXPY(y, 1.0, shell->work);
190:   } else {
191:     VecGetArray(x, &xarray);
192:     VecGetArray(y, &yarray);
193:     x_length = shell->mlmat->invec_leng;
194:     y_length = shell->mlmat->outvec_leng;
195:     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
196:     VecRestoreArray(x, &xarray);
197:     VecRestoreArray(y, &yarray);
198:     VecAXPY(y, 1.0, w);
199:   }
200:   return(0);
201: }

203: /* newtype is ignored since only handles one case */
206: static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
207: {
208:   PetscErrorCode  ierr;
209:   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
210:   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
211:   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
212:   PetscScalar     *aa=a->a,*ba=b->a,*ca;
213:   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
214:   PetscInt        *ci,*cj,ncols;

217:   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);

219:   if (scall == MAT_INITIAL_MATRIX){
220:     PetscMalloc((1+am)*sizeof(PetscInt),&ci);
221:     ci[0] = 0;
222:     for (i=0; i<am; i++){
223:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
224:     }
225:     PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
226:     PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);

228:     k = 0;
229:     for (i=0; i<am; i++){
230:       /* diagonal portion of A */
231:       ncols = ai[i+1] - ai[i];
232:       for (j=0; j<ncols; j++) {
233:         cj[k]   = *aj++;
234:         ca[k++] = *aa++;
235:       }
236:       /* off-diagonal portion of A */
237:       ncols = bi[i+1] - bi[i];
238:       for (j=0; j<ncols; j++) {
239:         cj[k]   = an + (*bj); bj++;
240:         ca[k++] = *ba++;
241:       }
242:     }
243:     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);

245:     /* put together the new matrix */
246:     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
247:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);

249:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
250:     /* Since these are PETSc arrays, change flags to free them as necessary. */
251:     mat = (Mat_SeqAIJ*)(*Aloc)->data;
252:     mat->free_a       = PETSC_TRUE;
253:     mat->free_ij      = PETSC_TRUE;

255:     mat->nonew    = 0;
256:   } else if (scall == MAT_REUSE_MATRIX){
257:     mat=(Mat_SeqAIJ*)(*Aloc)->data;
258:     ci = mat->i; cj = mat->j; ca = mat->a;
259:     for (i=0; i<am; i++) {
260:       /* diagonal portion of A */
261:       ncols = ai[i+1] - ai[i];
262:       for (j=0; j<ncols; j++) *ca++ = *aa++;
263:       /* off-diagonal portion of A */
264:       ncols = bi[i+1] - bi[i];
265:       for (j=0; j<ncols; j++) *ca++ = *ba++;
266:     }
267:   } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
268:   return(0);
269: }

271: extern PetscErrorCode MatDestroy_Shell(Mat);
274: static PetscErrorCode MatDestroy_ML(Mat A)
275: {
277:   Mat_MLShell    *shell;

280:   MatShellGetContext(A,(void **)&shell);
281:   VecDestroy(&shell->y);
282:   if (shell->work) {VecDestroy(&shell->work);}
283:   PetscFree(shell);
284:   MatDestroy_Shell(A);
285:   PetscObjectChangeTypeName((PetscObject)A,0);
286:   return(0);
287: }

291: static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
292: {
293:   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
294:   PetscErrorCode        ierr;
295:   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
296:   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
297:   PetscScalar           *ml_vals=matdata->values,*aa;
298: 
300:   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
301:   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
302:     if (reuse){
303:       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
304:       aij->i = ml_rowptr;
305:       aij->j = ml_cols;
306:       aij->a = ml_vals;
307:     } else {
308:       /* sort ml_cols and ml_vals */
309:       PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
310:       for (i=0; i<m; i++) {
311:         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
312:       }
313:       aj = ml_cols; aa = ml_vals;
314:       for (i=0; i<m; i++){
315:         PetscSortIntWithScalarArray(nnz[i],aj,aa);
316:         aj += nnz[i]; aa += nnz[i];
317:       }
318:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);
319:       PetscFree(nnz);
320:     }
321:     return(0);
322:   }

324:   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
325:   if (reuse) {
326:     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
327:   } else {
328:     MatCreate(PETSC_COMM_SELF,newmat);
329:     MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
330:     MatSetType(*newmat,MATSEQAIJ);

332:     PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
333:     nz_max = 1;
334:     for (i=0; i<m; i++) {
335:       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
336:       if (nnz[i] > nz_max) nz_max = nnz[i];
337:     }
338:     MatSeqAIJSetPreallocation(*newmat,0,nnz);
339:   }
340:   PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);
341:   for (i=0; i<m; i++) {
342:     PetscInt ncols;
343:     k = 0;
344:     /* diagonal entry */
345:     aj[k] = i; aa[k++] = ml_vals[i];
346:     /* off diagonal entries */
347:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
348:       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
349:     }
350:     ncols = ml_cols[i+1] - ml_cols[i] + 1;
351:     /* sort aj and aa */
352:     PetscSortIntWithScalarArray(ncols,aj,aa);
353:     MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);
354:   }
355:   MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
356:   MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);

358:   PetscFree2(aa,aj);
359:   PetscFree(nnz);
360:   return(0);
361: }

365: static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
366: {
368:   PetscInt       m,n;
369:   ML_Comm        *MLcomm;
370:   Mat_MLShell    *shellctx;

373:   m = mlmat->outvec_leng;
374:   n = mlmat->invec_leng;

376:   if (reuse){
377:     MatShellGetContext(*newmat,(void **)&shellctx);
378:     shellctx->mlmat = mlmat;
379:     return(0);
380:   }

382:   MLcomm = mlmat->comm;
383:   PetscNew(Mat_MLShell,&shellctx);
384:   MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
385:   MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
386:   MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);
387:   shellctx->A         = *newmat;
388:   shellctx->mlmat     = mlmat;
389:   shellctx->work      = PETSC_NULL;
390:   VecCreate(MLcomm->USR_comm,&shellctx->y);
391:   VecSetSizes(shellctx->y,m,PETSC_DECIDE);
392:   VecSetFromOptions(shellctx->y);
393:   (*newmat)->ops->destroy = MatDestroy_ML;
394:   return(0);
395: }

399: static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
400: {
401:   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
402:   PetscInt              *ml_cols=matdata->columns,*aj;
403:   PetscScalar           *ml_vals=matdata->values,*aa;
404:   PetscErrorCode        ierr;
405:   PetscInt              i,j,k,*gordering;
406:   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
407:   Mat                   A;

410:   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
411:   n = mlmat->invec_leng;
412:   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);

414:   if (reuse) {
415:     A = *newmat;
416:     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
417:   } else {
418:     PetscInt *nnzA,*nnzB,*nnz;
419:     MatCreate(mlmat->comm->USR_comm,&A);
420:     MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
421:     MatSetType(A,MATMPIAIJ);
422:     PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);

424:     nz_max = 0;
425:     for (i=0; i<m; i++){
426:       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
427:       if (nz_max < nnz[i]) nz_max = nnz[i];
428:       nnzA[i] = 1; /* diag */
429:       for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
430:         if (ml_cols[j] < m) nnzA[i]++;
431:       }
432:       nnzB[i] = nnz[i] - nnzA[i];
433:     }
434:     MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);
435:     PetscFree3(nnzA,nnzB,nnz);
436:   }

438:   /* insert mat values -- remap row and column indices */
439:   nz_max++;
440:   PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);
441:   /* create global row numbering for a ML_Operator */
442:   ML_build_global_numbering(mlmat,&gordering,"rows");
443:   for (i=0; i<m; i++) {
444:     PetscInt ncols;
445:     row = gordering[i];
446:     k = 0;
447:     /* diagonal entry */
448:     aj[k] = row; aa[k++] = ml_vals[i];
449:     /* off diagonal entries */
450:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
451:       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
452:     }
453:     ncols = ml_cols[i+1] - ml_cols[i] + 1;
454:     MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);
455:   }
456:   ML_free(gordering);
457:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
458:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
459:   *newmat = A;

461:   PetscFree2(aa,aj);
462:   return(0);
463: }

465: /* -----------------------------------------------------------------------------*/
468: PetscErrorCode PCReset_ML(PC pc)
469: {
470:   PetscErrorCode  ierr;
471:   PC_MG           *mg = (PC_MG*)pc->data;
472:   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
473:   PetscInt        level,fine_level=pc_ml->Nlevels-1;

476:   ML_Aggregate_Destroy(&pc_ml->agg_object);
477:   ML_Destroy(&pc_ml->ml_object);

479:   if (pc_ml->PetscMLdata) {
480:     PetscFree(pc_ml->PetscMLdata->pwork);
481:     MatDestroy(&pc_ml->PetscMLdata->Aloc);
482:     VecDestroy(&pc_ml->PetscMLdata->x);
483:     VecDestroy(&pc_ml->PetscMLdata->y);
484:   }
485:   PetscFree(pc_ml->PetscMLdata);

487:   if (pc_ml->gridctx) {
488:     for (level=0; level<fine_level; level++){
489:       if (pc_ml->gridctx[level].A){MatDestroy(&pc_ml->gridctx[level].A);}
490:       if (pc_ml->gridctx[level].P){MatDestroy(&pc_ml->gridctx[level].P);}
491:       if (pc_ml->gridctx[level].R){MatDestroy(&pc_ml->gridctx[level].R);}
492:       if (pc_ml->gridctx[level].x){VecDestroy(&pc_ml->gridctx[level].x);}
493:       if (pc_ml->gridctx[level].b){VecDestroy(&pc_ml->gridctx[level].b);}
494:       if (pc_ml->gridctx[level+1].r){VecDestroy(&pc_ml->gridctx[level+1].r);}
495:     }
496:   }
497:   PetscFree(pc_ml->gridctx);
498:   return(0);
499: }
500: /* -------------------------------------------------------------------------- */
501: /*
502:    PCSetUp_ML - Prepares for the use of the ML preconditioner
503:                     by setting data structures and options.   

505:    Input Parameter:
506: .  pc - the preconditioner context

508:    Application Interface Routine: PCSetUp()

510:    Notes:
511:    The interface routine PCSetUp() is not usually called directly by
512:    the user, but instead is called by PCApply() if necessary.
513: */
514: extern PetscErrorCode PCSetFromOptions_MG(PC);
515: extern PetscErrorCode PCReset_MG(PC);

519: PetscErrorCode PCSetUp_ML(PC pc)
520: {
521:   PetscErrorCode  ierr;
522:   PetscMPIInt     size;
523:   FineGridCtx     *PetscMLdata;
524:   ML              *ml_object;
525:   ML_Aggregate    *agg_object;
526:   ML_Operator     *mlmat;
527:   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
528:   Mat             A,Aloc;
529:   GridCtx         *gridctx;
530:   PC_MG           *mg = (PC_MG*)pc->data;
531:   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
532:   PetscBool       isSeq, isMPI;
533:   KSP             smoother;
534:   PC              subpc;
535:   PetscInt        mesh_level, old_mesh_level;

538:   A = pc->pmat;
539:   MPI_Comm_size(((PetscObject)A)->comm,&size);

541:   if (pc->setupcalled) {
542:     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
543:       /*
544:        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
545:        multiple solves in which the matrix is not changing too quickly.
546:        */
547:       ml_object = pc_ml->ml_object;
548:       gridctx = pc_ml->gridctx;
549:       Nlevels = pc_ml->Nlevels;
550:       fine_level = Nlevels - 1;
551:       gridctx[fine_level].A = A;

553:       PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
554:       PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
555:       if (isMPI){
556:         MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);
557:       } else if (isSeq) {
558:         Aloc = A;
559:         PetscObjectReference((PetscObject)Aloc);
560:       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

562:       MatGetSize(Aloc,&m,&nlocal_allcols);
563:       PetscMLdata = pc_ml->PetscMLdata;
564:       MatDestroy(&PetscMLdata->Aloc);
565:       PetscMLdata->A    = A;
566:       PetscMLdata->Aloc = Aloc;
567:       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
568:       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

570:       mesh_level = ml_object->ML_finest_level;
571:       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
572:         old_mesh_level = mesh_level;
573:         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;

575:         /* clean and regenerate A */
576:         mlmat = &(ml_object->Amat[mesh_level]);
577:         ML_Operator_Clean(mlmat);
578:         ML_Operator_Init(mlmat,ml_object->comm);
579:         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
580:       }

582:       level = fine_level - 1;
583:       if (size == 1) { /* convert ML P, R and A into seqaij format */
584:         for (mllevel=1; mllevel<Nlevels; mllevel++){
585:           mlmat = &(ml_object->Amat[mllevel]);
586:           MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);
587:           level--;
588:         }
589:       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
590:         for (mllevel=1; mllevel<Nlevels; mllevel++){
591:           mlmat  = &(ml_object->Amat[mllevel]);
592:           MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);
593:           level--;
594:         }
595:       }

597:       for (level=0; level<fine_level; level++) {
598:         if (level > 0){
599:           PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
600:         }
601:         KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);
602:       }
603:       PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
604:       KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);

606:       PCSetUp_MG(pc);
607:       return(0);
608:     } else {
609:       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
610:       PCReset_ML(pc);
611:       PCReset_MG(pc);
612:     }
613:   }

615:   /* setup special features of PCML */
616:   /*--------------------------------*/
617:   /* covert A to Aloc to be used by ML at fine grid */
618:   pc_ml->size = size;
619:   PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
620:   PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
621:   if (isMPI){
622:     MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);
623:   } else if (isSeq) {
624:     Aloc = A;
625:     PetscObjectReference((PetscObject)Aloc);
626:   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

628:   /* create and initialize struct 'PetscMLdata' */
629:   PetscNewLog(pc,FineGridCtx,&PetscMLdata);
630:   pc_ml->PetscMLdata = PetscMLdata;
631:   PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);

633:   VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);
634:   VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);
635:   VecSetType(PetscMLdata->x,VECSEQ);

637:   VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);
638:   VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);
639:   VecSetType(PetscMLdata->y,VECSEQ);
640:   PetscMLdata->A    = A;
641:   PetscMLdata->Aloc = Aloc;
642: 
643:   /* create ML discretization matrix at fine grid */
644:   /* ML requires input of fine-grid matrix. It determines nlevels. */
645:   MatGetSize(Aloc,&m,&nlocal_allcols);
646:   MatGetBlockSize(A,&bs);
647:   ML_Create(&ml_object,pc_ml->MaxNlevels);
648:   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
649:   pc_ml->ml_object = ml_object;
650:   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
651:   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
652:   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

654:   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);

656:   /* aggregation */
657:   ML_Aggregate_Create(&agg_object);
658:   pc_ml->agg_object = agg_object;

660:   {
661:     MatNullSpace mnull;
662:     MatGetNearNullSpace(A,&mnull);
663:     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
664:       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
665:       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
666:       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
667:     }
668:     switch (pc_ml->nulltype) {
669:     case PCML_NULLSPACE_USER: {
670:       PetscScalar *nullvec;
671:       const PetscScalar *v;
672:       PetscBool has_const;
673:       PetscInt i,j,mlocal,nvec,M;
674:       const Vec *vecs;
675:       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
676:       MatGetSize(A,&M,PETSC_NULL);
677:       MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);
678:       MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);
679:       PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);
680:       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
681:       for (i=0; i<nvec; i++) {
682:         VecGetArrayRead(vecs[i],&v);
683:         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
684:         VecRestoreArrayRead(vecs[i],&v);
685:       }
686:       ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);
687:       PetscFree(nullvec);
688:     } break;
689:     case PCML_NULLSPACE_BLOCK:
690:       ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);
691:       break;
692:     case PCML_NULLSPACE_SCALAR:
693:       break;
694:     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
695:     }
696:   }
697:   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
698:   /* set options */
699:   switch (pc_ml->CoarsenScheme) {
700:   case 1:
701:     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
702:   case 2:
703:     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
704:   case 3:
705:     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
706:   }
707:   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
708:   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
709:   if (pc_ml->SpectralNormScheme_Anorm){
710:     ML_Set_SpectralNormScheme_Anorm(ml_object);
711:   }
712:   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
713:   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
714:   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
715:   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
716:   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
717:   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;

719:   if (pc_ml->OldHierarchy) {
720:     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
721:   } else {
722:     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
723:   }
724:   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
725:   pc_ml->Nlevels = Nlevels;
726:   fine_level = Nlevels - 1;

728:   PCMGSetLevels(pc,Nlevels,PETSC_NULL);
729:   /* set default smoothers */
730:   for (level=1; level<=fine_level; level++){
731:     if (size == 1){
732:       PCMGGetSmoother(pc,level,&smoother);
733:       KSPSetType(smoother,KSPRICHARDSON);
734:       KSPGetPC(smoother,&subpc);
735:       PCSetType(subpc,PCSOR);
736:     } else {
737:       PCMGGetSmoother(pc,level,&smoother);
738:       KSPSetType(smoother,KSPRICHARDSON);
739:       KSPGetPC(smoother,&subpc);
740:       PCSetType(subpc,PCSOR);
741:     }
742:   }
743:   PetscObjectOptionsBegin((PetscObject)pc);
744:   PCSetFromOptions_MG(pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
745:   PetscOptionsEnd();

747:   PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);
748:   pc_ml->gridctx = gridctx;

750:   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
751:      Level 0 is the finest grid for ML, but coarsest for PETSc! */
752:   gridctx[fine_level].A = A;

754:   level = fine_level - 1;
755:   if (size == 1){ /* convert ML P, R and A into seqaij format */
756:     for (mllevel=1; mllevel<Nlevels; mllevel++){
757:       mlmat = &(ml_object->Pmat[mllevel]);
758:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
759:       mlmat = &(ml_object->Rmat[mllevel-1]);
760:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);
761: 
762:       mlmat = &(ml_object->Amat[mllevel]);
763:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
764:       level--;
765:     }
766:   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
767:     for (mllevel=1; mllevel<Nlevels; mllevel++){
768:       mlmat  = &(ml_object->Pmat[mllevel]);
769:       MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
770:       mlmat  = &(ml_object->Rmat[mllevel-1]);
771:       MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);

773:       mlmat  = &(ml_object->Amat[mllevel]);
774:       MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
775:       level--;
776:     }
777:   }

779:   /* create vectors and ksp at all levels */
780:   for (level=0; level<fine_level; level++){
781:     level1 = level + 1;
782:     VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);
783:     VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);
784:     VecSetType(gridctx[level].x,VECMPI);
785:     PCMGSetX(pc,level,gridctx[level].x);
786: 
787:     VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);
788:     VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);
789:     VecSetType(gridctx[level].b,VECMPI);
790:     PCMGSetRhs(pc,level,gridctx[level].b);
791: 
792:     VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);
793:     VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);
794:     VecSetType(gridctx[level1].r,VECMPI);
795:     PCMGSetR(pc,level1,gridctx[level1].r);

797:     if (level == 0){
798:       PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
799:     } else {
800:       PCMGGetSmoother(pc,level,&gridctx[level].ksp);
801:     }
802:   }
803:   PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);

805:   /* create coarse level and the interpolation between the levels */
806:   for (level=0; level<fine_level; level++){
807:     level1 = level + 1;
808:     PCMGSetInterpolation(pc,level1,gridctx[level].P);
809:     PCMGSetRestriction(pc,level1,gridctx[level].R);
810:     if (level > 0){
811:       PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
812:     }
813:     KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);
814:   }
815:   PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
816:   KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);

818:   /* setupcalled is set to 0 so that MG is setup from scratch */
819:   pc->setupcalled = 0;
820:   PCSetUp_MG(pc);
821:   return(0);
822: }

824: /* -------------------------------------------------------------------------- */
825: /*
826:    PCDestroy_ML - Destroys the private context for the ML preconditioner
827:    that was created with PCCreate_ML().

829:    Input Parameter:
830: .  pc - the preconditioner context

832:    Application Interface Routine: PCDestroy()
833: */
836: PetscErrorCode PCDestroy_ML(PC pc)
837: {
838:   PetscErrorCode  ierr;
839:   PC_MG           *mg = (PC_MG*)pc->data;
840:   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;

843:   PCReset_ML(pc);
844:   PetscFree(pc_ml);
845:   PCDestroy_MG(pc);
846:   return(0);
847: }

851: PetscErrorCode PCSetFromOptions_ML(PC pc)
852: {
853:   PetscErrorCode  ierr;
854:   PetscInt        indx,PrintLevel;
855:   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
856:   PC_MG           *mg = (PC_MG*)pc->data;
857:   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
858:   PetscMPIInt     size;
859:   MPI_Comm        comm = ((PetscObject)pc)->comm;

862:   MPI_Comm_size(comm,&size);
863:   PetscOptionsHead("ML options");
864:   PrintLevel    = 0;
865:   indx          = 0;
866:   PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);
867:   ML_Set_PrintLevel(PrintLevel);
868:   PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);
869:   PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);
870:   PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);
871:   pc_ml->CoarsenScheme = indx;
872:   PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);
873:   PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);
874:   PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);
875:   PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);
876:   PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);
877:   PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);
878:   PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);
879:   PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);
880:   /*
881:     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
882:     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.

884:     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
885:     combination of options and ML's exit(1) explanations don't help matters.
886:   */
887:   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
888:   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
889:   if (pc_ml->EnergyMinimization == 4) {PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");}
890:   if (pc_ml->EnergyMinimization) {
891:     PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);
892:   }
893:   if (pc_ml->EnergyMinimization == 2) {
894:     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
895:     PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);
896:   }
897:   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
898:   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
899:   PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);
900:   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
901:   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
902:   PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);
903:   /*
904:     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
905:     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
906:     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
907:     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
908:     this context, but ML doesn't provide a way to find out which ones.
909:    */
910:   PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);
911:   PetscOptionsTail();
912:   return(0);
913: }

915: /* -------------------------------------------------------------------------- */
916: /*
917:    PCCreate_ML - Creates a ML preconditioner context, PC_ML, 
918:    and sets this as the private data within the generic preconditioning 
919:    context, PC, that was created within PCCreate().

921:    Input Parameter:
922: .  pc - the preconditioner context

924:    Application Interface Routine: PCCreate()
925: */

927: /*MC
928:      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 
929:        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 
930:        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
931:        and the restriction/interpolation operators wrapped as PETSc shell matrices.

933:    Options Database Key: 
934:    Multigrid options(inherited)
935: +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
936: .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
937: .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
938:    -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
939:    ML options:
940: .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
941: .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
942: .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
943: .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
944: .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
945: .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
946: -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)

948:    Level: intermediate

950:   Concepts: multigrid
951:  
952: .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 
953:            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
954:            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
955:            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
956:            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()      
957: M*/

959: EXTERN_C_BEGIN
962: PetscErrorCode  PCCreate_ML(PC pc)
963: {
964:   PetscErrorCode  ierr;
965:   PC_ML           *pc_ml;
966:   PC_MG           *mg;

969:   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
970:   PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */
971:   PetscObjectChangeTypeName((PetscObject)pc,PCML);
972:   /* Since PCMG tries to use DM assocated with PC must delete it */
973:   DMDestroy(&pc->dm);
974:   mg = (PC_MG*)pc->data;
975:   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */

977:   /* create a supporting struct and attach it to pc */
978:   PetscNewLog(pc,PC_ML,&pc_ml);
979:   mg->innerctx = pc_ml;

981:   pc_ml->ml_object     = 0;
982:   pc_ml->agg_object    = 0;
983:   pc_ml->gridctx       = 0;
984:   pc_ml->PetscMLdata   = 0;
985:   pc_ml->Nlevels       = -1;
986:   pc_ml->MaxNlevels    = 10;
987:   pc_ml->MaxCoarseSize = 1;
988:   pc_ml->CoarsenScheme = 1;
989:   pc_ml->Threshold     = 0.0;
990:   pc_ml->DampingFactor = 4.0/3.0;
991:   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
992:   pc_ml->size          = 0;

994:   /* overwrite the pointers of PCMG by the functions of PCML */
995:   pc->ops->setfromoptions = PCSetFromOptions_ML;
996:   pc->ops->setup          = PCSetUp_ML;
997:   pc->ops->reset          = PCReset_ML;
998:   pc->ops->destroy        = PCDestroy_ML;
999:   return(0);
1000: }
1001: EXTERN_C_END