Actual source code: gamg.c
petsc-3.12.5 2020-03-29
1: /*
2: GAMG geometric-algebric multigrid PC - Mark Adams 2011
3: */
4: #include <petsc/private/matimpl.h>
5: #include <../src/ksp/pc/impls/gamg/gamg.h>
6: #include <../src/ksp/pc/impls/bjacobi/bjacobi.h> /* Hack to access same_local_solves */
8: #if defined PETSC_GAMG_USE_LOG
9: PetscLogEvent petsc_gamg_setup_events[NUM_SET];
10: #endif
12: #if defined PETSC_USE_LOG
13: PetscLogEvent PC_GAMGGraph_AGG;
14: PetscLogEvent PC_GAMGGraph_GEO;
15: PetscLogEvent PC_GAMGCoarsen_AGG;
16: PetscLogEvent PC_GAMGCoarsen_GEO;
17: PetscLogEvent PC_GAMGProlongator_AGG;
18: PetscLogEvent PC_GAMGProlongator_GEO;
19: PetscLogEvent PC_GAMGOptProlongator_AGG;
20: #endif
22: /* #define GAMG_STAGES */
23: #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
24: static PetscLogStage gamg_stages[PETSC_GAMG_MAXLEVELS];
25: #endif
27: static PetscFunctionList GAMGList = 0;
28: static PetscBool PCGAMGPackageInitialized;
30: /* ----------------------------------------------------------------------------- */
31: PetscErrorCode PCReset_GAMG(PC pc)
32: {
34: PC_MG *mg = (PC_MG*)pc->data;
35: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
38: PetscFree(pc_gamg->data);
39: pc_gamg->data_sz = 0;
40: PetscFree(pc_gamg->orig_data);
41: return(0);
42: }
44: /* -------------------------------------------------------------------------- */
45: /*
46: PCGAMGCreateLevel_GAMG: create coarse op with RAP. repartition and/or reduce number
47: of active processors.
49: Input Parameter:
50: . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
51: 'pc_gamg->data_sz' are changed via repartitioning/reduction.
52: . Amat_fine - matrix on this fine (k) level
53: . cr_bs - coarse block size
54: In/Output Parameter:
55: . a_P_inout - prolongation operator to the next level (k-->k-1)
56: . a_nactive_proc - number of active procs
57: Output Parameter:
58: . a_Amat_crs - coarse matrix that is created (k-1)
59: */
61: static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc,Mat Amat_fine,PetscInt cr_bs,Mat *a_P_inout,Mat *a_Amat_crs,PetscMPIInt *a_nactive_proc,IS * Pcolumnperm, PetscBool is_last)
62: {
63: PetscErrorCode ierr;
64: PC_MG *mg = (PC_MG*)pc->data;
65: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
66: Mat Cmat,Pold=*a_P_inout;
67: MPI_Comm comm;
68: PetscMPIInt rank,size,new_size,nactive=*a_nactive_proc;
69: PetscInt ncrs_eq,ncrs,f_bs;
72: PetscObjectGetComm((PetscObject)Amat_fine,&comm);
73: MPI_Comm_rank(comm, &rank);
74: MPI_Comm_size(comm, &size);
75: MatGetBlockSize(Amat_fine, &f_bs);
76: MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat);
78: if (Pcolumnperm) *Pcolumnperm = NULL;
80: /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
81: MatGetLocalSize(Cmat, &ncrs_eq, NULL);
82: if (pc_gamg->data_cell_rows>0) {
83: ncrs = pc_gamg->data_sz/pc_gamg->data_cell_cols/pc_gamg->data_cell_rows;
84: } else {
85: PetscInt bs;
86: MatGetBlockSize(Cmat, &bs);
87: ncrs = ncrs_eq/bs;
88: }
89: /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
90: if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) new_size = 1;
91: else {
92: PetscInt ncrs_eq_glob;
93: MatGetSize(Cmat, &ncrs_eq_glob, NULL);
94: new_size = (PetscMPIInt)((float)ncrs_eq_glob/(float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
95: if (!new_size) new_size = 1; /* not likely, posible? */
96: else if (new_size >= nactive) new_size = nactive; /* no change, rare */
97: }
99: if (new_size==nactive) {
100: *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
101: if (new_size < size) {
102: /* odd case where multiple coarse grids are on one processor or no coarsening ... */
103: PetscInfo1(pc,"reduced grid using same number of processors (%d) as last grid (use larger coarse grid)\n",nactive);
104: if (pc_gamg->cpu_pin_coarse_grids) {
105: MatPinToCPU(*a_Amat_crs,PETSC_TRUE);
106: MatPinToCPU(*a_P_inout,PETSC_TRUE);
107: }
108: }
109: /* we know that the grid structure can be reused in MatPtAP */
110: } else { /* reduce active processors - we know that the grid structure can NOT be reused in MatPtAP */
111: PetscInt *counts,*newproc_idx,ii,jj,kk,strideNew,*tidx,ncrs_new,ncrs_eq_new,nloc_old,expand_factor=1,rfactor=1;
112: IS is_eq_newproc,is_eq_num,is_eq_num_prim,new_eq_indices;
113: nloc_old = ncrs_eq/cr_bs;
114: if (ncrs_eq % cr_bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ncrs_eq %D not divisible by cr_bs %D",ncrs_eq,cr_bs);
115: #if defined PETSC_GAMG_USE_LOG
116: PetscLogEventBegin(petsc_gamg_setup_events[SET12],0,0,0,0);
117: #endif
118: /* get new_size and rfactor */
119: if (pc_gamg->layout_type==PCGAMG_LAYOUT_SPREAD || !pc_gamg->repart) {
120: /* find factor */
121: if (new_size == 1) rfactor = size; /* don't modify */
122: else {
123: PetscReal best_fact = 0.;
124: jj = -1;
125: for (kk = 1 ; kk <= size ; kk++) {
126: if (!(size%kk)) { /* a candidate */
127: PetscReal nactpe = (PetscReal)size/(PetscReal)kk, fact = nactpe/(PetscReal)new_size;
128: if (fact > 1.0) fact = 1./fact; /* keep fact < 1 */
129: if (fact > best_fact) {
130: best_fact = fact; jj = kk;
131: }
132: }
133: }
134: if (jj != -1) rfactor = jj;
135: else rfactor = 1; /* a prime */
136: if (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) expand_factor = 1;
137: else expand_factor = rfactor;
138: }
139: new_size = size/rfactor; /* make new size one that is factor */
140: if (new_size==nactive) {
141: *a_Amat_crs = Cmat; /* output - no repartitioning or reduction, bail out because nested here */
142: PetscInfo2(pc,"Finding factorable processor set stopped reduction: new_size=%d, neq(loc)=%D\n",new_size,ncrs_eq);
143: #if defined PETSC_GAMG_USE_LOG
144: PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);
145: #endif
146: return(0);
147: }
148: }
149: /* make 'is_eq_newproc' */
150: PetscMalloc1(size, &counts);
151: if (pc_gamg->repart) {
152: /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates of primal part accordingly */
153: Mat adj;
154: PetscInfo4(pc,"Repartition: size (active): %d --> %d, %D local equations, using %s process layout\n",*a_nactive_proc, new_size, ncrs_eq, (pc_gamg->layout_type==PCGAMG_LAYOUT_COMPACT) ? "compact" : "spread");
155: /* get 'adj' */
156: if (cr_bs == 1) {
157: MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);
158: } else {
159: /* make a scalar matrix to partition (no Stokes here) */
160: Mat tMat;
161: PetscInt Istart_crs,Iend_crs,ncols,jj,Ii;
162: const PetscScalar *vals;
163: const PetscInt *idx;
164: PetscInt *d_nnz, *o_nnz, M, N;
165: static PetscInt llev = 0; /* ugly but just used for debugging */
166: MatType mtype;
168: PetscMalloc2(ncrs, &d_nnz,ncrs, &o_nnz);
169: MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs);
170: MatGetSize(Cmat, &M, &N);
171: for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
172: MatGetRow(Cmat,Ii,&ncols,0,0);
173: d_nnz[jj] = ncols/cr_bs;
174: o_nnz[jj] = ncols/cr_bs;
175: MatRestoreRow(Cmat,Ii,&ncols,0,0);
176: if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
177: if (o_nnz[jj] > (M/cr_bs-ncrs)) o_nnz[jj] = M/cr_bs-ncrs;
178: }
180: MatGetType(Amat_fine,&mtype);
181: MatCreate(comm, &tMat);
182: MatSetSizes(tMat, ncrs, ncrs,PETSC_DETERMINE, PETSC_DETERMINE);
183: MatSetType(tMat,mtype);
184: MatSeqAIJSetPreallocation(tMat,0,d_nnz);
185: MatMPIAIJSetPreallocation(tMat,0,d_nnz,0,o_nnz);
186: PetscFree2(d_nnz,o_nnz);
188: for (ii = Istart_crs; ii < Iend_crs; ii++) {
189: PetscInt dest_row = ii/cr_bs;
190: MatGetRow(Cmat,ii,&ncols,&idx,&vals);
191: for (jj = 0; jj < ncols; jj++) {
192: PetscInt dest_col = idx[jj]/cr_bs;
193: PetscScalar v = 1.0;
194: MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES);
195: }
196: MatRestoreRow(Cmat,ii,&ncols,&idx,&vals);
197: }
198: MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);
199: MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);
201: if (llev++ == -1) {
202: PetscViewer viewer; char fname[32];
203: PetscSNPrintf(fname,sizeof(fname),"part_mat_%D.mat",llev);
204: PetscViewerBinaryOpen(comm,fname,FILE_MODE_WRITE,&viewer);
205: MatView(tMat, viewer);
206: PetscViewerDestroy(&viewer);
207: }
208: MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);
209: MatDestroy(&tMat);
210: } /* create 'adj' */
212: { /* partition: get newproc_idx */
213: char prefix[256];
214: const char *pcpre;
215: const PetscInt *is_idx;
216: MatPartitioning mpart;
217: IS proc_is;
219: MatPartitioningCreate(comm, &mpart);
220: MatPartitioningSetAdjacency(mpart, adj);
221: PCGetOptionsPrefix(pc, &pcpre);
222: PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");
223: PetscObjectSetOptionsPrefix((PetscObject)mpart,prefix);
224: MatPartitioningSetFromOptions(mpart);
225: MatPartitioningSetNParts(mpart, new_size);
226: MatPartitioningApply(mpart, &proc_is);
227: MatPartitioningDestroy(&mpart);
229: /* collect IS info */
230: PetscMalloc1(ncrs_eq, &newproc_idx);
231: ISGetIndices(proc_is, &is_idx);
232: for (kk = jj = 0 ; kk < nloc_old ; kk++) {
233: for (ii = 0 ; ii < cr_bs ; ii++, jj++) {
234: newproc_idx[jj] = is_idx[kk] * expand_factor; /* distribution */
235: }
236: }
237: ISRestoreIndices(proc_is, &is_idx);
238: ISDestroy(&proc_is);
239: }
240: MatDestroy(&adj);
242: ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc);
243: PetscFree(newproc_idx);
244: } else { /* simple aggregation of parts -- 'is_eq_newproc' */
245: PetscInt targetPE;
246: if (new_size==nactive) {
247: *a_Amat_crs = Cmat; /* output - no repartitioning or reduction, bail out because nested here */
248: PetscFree(counts);
249: PetscInfo2(pc,"Aggregate processors noop: new_size=%d, neq(loc)=%D\n",new_size,ncrs_eq);
250: #if defined PETSC_GAMG_USE_LOG
251: PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);
252: #endif
253: return(0);
254: }
255: PetscInfo1(pc,"Number of equations (loc) %D with simple aggregation\n",ncrs_eq);
256: targetPE = (rank/rfactor)*expand_factor;
257: ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc);
258: } /* end simple 'is_eq_newproc' */
260: /*
261: Create an index set from the is_eq_newproc index set to indicate the mapping TO
262: */
263: ISPartitioningToNumbering(is_eq_newproc, &is_eq_num);
264: is_eq_num_prim = is_eq_num;
265: /*
266: Determine how many equations/vertices are assigned to each processor
267: */
268: ISPartitioningCount(is_eq_newproc, size, counts);
269: ncrs_eq_new = counts[rank];
270: ISDestroy(&is_eq_newproc);
271: ncrs_new = ncrs_eq_new/cr_bs;
273: PetscFree(counts);
274: #if defined PETSC_GAMG_USE_LOG
275: PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);
276: #endif
277: /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxilary data into some complex abstracted thing */
278: {
279: Vec src_crd, dest_crd;
280: const PetscInt *idx,ndata_rows=pc_gamg->data_cell_rows,ndata_cols=pc_gamg->data_cell_cols,node_data_sz=ndata_rows*ndata_cols;
281: VecScatter vecscat;
282: PetscScalar *array;
283: IS isscat;
284: /* move data (for primal equations only) */
285: /* Create a vector to contain the newly ordered element information */
286: VecCreate(comm, &dest_crd);
287: VecSetSizes(dest_crd, node_data_sz*ncrs_new, PETSC_DECIDE);
288: VecSetType(dest_crd,VECSTANDARD); /* this is needed! */
289: /*
290: There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
291: a block size of ...). Note, ISs are expanded into equation space by 'cr_bs'.
292: */
293: PetscMalloc1(ncrs*node_data_sz, &tidx);
294: ISGetIndices(is_eq_num_prim, &idx);
295: for (ii=0,jj=0; ii<ncrs; ii++) {
296: PetscInt id = idx[ii*cr_bs]/cr_bs; /* get node back */
297: for (kk=0; kk<node_data_sz; kk++, jj++) tidx[jj] = id*node_data_sz + kk;
298: }
299: ISRestoreIndices(is_eq_num_prim, &idx);
300: ISCreateGeneral(comm, node_data_sz*ncrs, tidx, PETSC_COPY_VALUES, &isscat);
301: PetscFree(tidx);
302: /*
303: Create a vector to contain the original vertex information for each element
304: */
305: VecCreateSeq(PETSC_COMM_SELF, node_data_sz*ncrs, &src_crd);
306: for (jj=0; jj<ndata_cols; jj++) {
307: const PetscInt stride0=ncrs*pc_gamg->data_cell_rows;
308: for (ii=0; ii<ncrs; ii++) {
309: for (kk=0; kk<ndata_rows; kk++) {
310: PetscInt ix = ii*ndata_rows + kk + jj*stride0, jx = ii*node_data_sz + kk*ndata_cols + jj;
311: PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
312: VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES);
313: }
314: }
315: }
316: VecAssemblyBegin(src_crd);
317: VecAssemblyEnd(src_crd);
318: /*
319: Scatter the element vertex information (still in the original vertex ordering)
320: to the correct processor
321: */
322: VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat);
323: ISDestroy(&isscat);
324: VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);
325: VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);
326: VecScatterDestroy(&vecscat);
327: VecDestroy(&src_crd);
328: /*
329: Put the element vertex data into a new allocation of the gdata->ele
330: */
331: PetscFree(pc_gamg->data);
332: PetscMalloc1(node_data_sz*ncrs_new, &pc_gamg->data);
334: pc_gamg->data_sz = node_data_sz*ncrs_new;
335: strideNew = ncrs_new*ndata_rows;
337: VecGetArray(dest_crd, &array);
338: for (jj=0; jj<ndata_cols; jj++) {
339: for (ii=0; ii<ncrs_new; ii++) {
340: for (kk=0; kk<ndata_rows; kk++) {
341: PetscInt ix = ii*ndata_rows + kk + jj*strideNew, jx = ii*node_data_sz + kk*ndata_cols + jj;
342: pc_gamg->data[ix] = PetscRealPart(array[jx]);
343: }
344: }
345: }
346: VecRestoreArray(dest_crd, &array);
347: VecDestroy(&dest_crd);
348: }
349: /* move A and P (columns) with new layout */
350: #if defined PETSC_GAMG_USE_LOG
351: PetscLogEventBegin(petsc_gamg_setup_events[SET13],0,0,0,0);
352: #endif
353: /*
354: Invert for MatCreateSubMatrix
355: */
356: ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices);
357: ISSort(new_eq_indices); /* is this needed? */
358: ISSetBlockSize(new_eq_indices, cr_bs);
359: if (is_eq_num != is_eq_num_prim) {
360: ISDestroy(&is_eq_num_prim); /* could be same as 'is_eq_num' */
361: }
362: if (Pcolumnperm) {
363: PetscObjectReference((PetscObject)new_eq_indices);
364: *Pcolumnperm = new_eq_indices;
365: }
366: ISDestroy(&is_eq_num);
367: #if defined PETSC_GAMG_USE_LOG
368: PetscLogEventEnd(petsc_gamg_setup_events[SET13],0,0,0,0);
369: PetscLogEventBegin(petsc_gamg_setup_events[SET14],0,0,0,0);
370: #endif
371: /* 'a_Amat_crs' output */
372: {
373: Mat mat;
374: MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat);
375: *a_Amat_crs = mat;
376: }
377: MatDestroy(&Cmat);
379: #if defined PETSC_GAMG_USE_LOG
380: PetscLogEventEnd(petsc_gamg_setup_events[SET14],0,0,0,0);
381: #endif
382: /* prolongator */
383: {
384: IS findices;
385: PetscInt Istart,Iend;
386: Mat Pnew;
388: MatGetOwnershipRange(Pold, &Istart, &Iend);
389: #if defined PETSC_GAMG_USE_LOG
390: PetscLogEventBegin(petsc_gamg_setup_events[SET15],0,0,0,0);
391: #endif
392: ISCreateStride(comm,Iend-Istart,Istart,1,&findices);
393: ISSetBlockSize(findices,f_bs);
394: MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew);
395: ISDestroy(&findices);
397: #if defined PETSC_GAMG_USE_LOG
398: PetscLogEventEnd(petsc_gamg_setup_events[SET15],0,0,0,0);
399: #endif
400: MatDestroy(a_P_inout);
402: /* output - repartitioned */
403: *a_P_inout = Pnew;
404: }
405: ISDestroy(&new_eq_indices);
407: *a_nactive_proc = new_size; /* output */
409: /* pinning on reduced grids, not a bad heuristic and optimization gets folded into process reduction optimization */
410: if (pc_gamg->cpu_pin_coarse_grids) {
411: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
412: static PetscInt llev = 2;
413: PetscInfo1(pc,"Pinning level %D to the CPU\n",llev++);
414: #endif
415: MatPinToCPU(*a_Amat_crs,PETSC_TRUE);
416: MatPinToCPU(*a_P_inout,PETSC_TRUE);
417: if (1) { /* lvec is created, need to pin it, this is done in MatSetUpMultiply_MPIAIJ. Hack */
418: Mat A = *a_Amat_crs, P = *a_P_inout;
419: PetscMPIInt size;
420: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
421: if (size > 1) {
422: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data, *p = (Mat_MPIAIJ*)P->data;
423: VecPinToCPU(a->lvec,PETSC_TRUE);
424: VecPinToCPU(p->lvec,PETSC_TRUE);
425: }
426: }
427: }
428: }
429: return(0);
430: }
432: /* -------------------------------------------------------------------------- */
433: /*
434: PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
435: by setting data structures and options.
437: Input Parameter:
438: . pc - the preconditioner context
440: */
441: PetscErrorCode PCSetUp_GAMG(PC pc)
442: {
444: PC_MG *mg = (PC_MG*)pc->data;
445: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
446: Mat Pmat = pc->pmat;
447: PetscInt fine_level,level,level1,bs,M,N,qq,lidx,nASMBlocksArr[PETSC_GAMG_MAXLEVELS];
448: MPI_Comm comm;
449: PetscMPIInt rank,size,nactivepe;
450: Mat Aarr[PETSC_GAMG_MAXLEVELS],Parr[PETSC_GAMG_MAXLEVELS];
451: IS *ASMLocalIDsArr[PETSC_GAMG_MAXLEVELS];
452: PetscLogDouble nnz0=0.,nnztot=0.;
453: MatInfo info;
454: PetscBool is_last = PETSC_FALSE;
457: PetscObjectGetComm((PetscObject)pc,&comm);
458: MPI_Comm_rank(comm,&rank);
459: MPI_Comm_size(comm,&size);
461: if (pc_gamg->setup_count++ > 0) {
462: if ((PetscBool)(!pc_gamg->reuse_prol)) {
463: /* reset everything */
464: PCReset_MG(pc);
465: pc->setupcalled = 0;
466: } else {
467: PC_MG_Levels **mglevels = mg->levels;
468: /* just do Galerkin grids */
469: Mat B,dA,dB;
471: if (!pc->setupcalled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCSetUp() has not been called yet");
472: if (pc_gamg->Nlevels > 1) {
473: /* currently only handle case where mat and pmat are the same on coarser levels */
474: KSPGetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,&dA,&dB);
475: /* (re)set to get dirty flag */
476: KSPSetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,dA,dB);
478: for (level=pc_gamg->Nlevels-2; level>=0; level--) {
479: /* 2nd solve, matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
480: if (pc_gamg->setup_count==2 /* && pc_gamg->repart||reduction */) {
481: PetscInfo2(pc,"new RAP after first solve level %D, %D setup\n",level,pc_gamg->setup_count);
482: MatPtAP(dB,mglevels[level+1]->interpolate,MAT_INITIAL_MATRIX,2.0,&B);
483: MatDestroy(&mglevels[level]->A);
484: mglevels[level]->A = B;
485: } else {
486: PetscInfo2(pc,"RAP after first solve reusing matrix level %D, %D setup\n",level,pc_gamg->setup_count);
487: KSPGetOperators(mglevels[level]->smoothd,NULL,&B);
488: MatPtAP(dB,mglevels[level+1]->interpolate,MAT_REUSE_MATRIX,1.0,&B);
489: }
490: KSPSetOperators(mglevels[level]->smoothd,B,B);
491: dB = B;
492: }
493: }
495: PCSetUp_MG(pc);
496: return(0);
497: }
498: }
500: if (!pc_gamg->data) {
501: if (pc_gamg->orig_data) {
502: MatGetBlockSize(Pmat, &bs);
503: MatGetLocalSize(Pmat, &qq, NULL);
505: pc_gamg->data_sz = (qq/bs)*pc_gamg->orig_data_cell_rows*pc_gamg->orig_data_cell_cols;
506: pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
507: pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
509: PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data);
510: for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
511: } else {
512: if (!pc_gamg->ops->createdefaultdata) SETERRQ(comm,PETSC_ERR_PLIB,"'createdefaultdata' not set(?) need to support NULL data");
513: pc_gamg->ops->createdefaultdata(pc,Pmat);
514: }
515: }
517: /* cache original data for reuse */
518: if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
519: PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data);
520: for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
521: pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
522: pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
523: }
525: /* get basic dims */
526: MatGetBlockSize(Pmat, &bs);
527: MatGetSize(Pmat, &M, &N);
529: MatGetInfo(Pmat,MAT_GLOBAL_SUM,&info); /* global reduction */
530: nnz0 = info.nz_used;
531: nnztot = info.nz_used;
532: PetscInfo6(pc,"level %d) N=%D, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n",0,M,pc_gamg->data_cell_rows,pc_gamg->data_cell_cols,(int)(nnz0/(PetscReal)M+0.5),size);
534: /* Get A_i and R_i */
535: for (level=0, Aarr[0]=Pmat, nactivepe = size; level < (pc_gamg->Nlevels-1) && (!level || M>pc_gamg->coarse_eq_limit); level++) {
536: pc_gamg->current_level = level;
537: level1 = level + 1;
538: #if defined PETSC_GAMG_USE_LOG
539: PetscLogEventBegin(petsc_gamg_setup_events[SET1],0,0,0,0);
540: #if (defined GAMG_STAGES)
541: PetscLogStagePush(gamg_stages[level]);
542: #endif
543: #endif
544: { /* construct prolongator */
545: Mat Gmat;
546: PetscCoarsenData *agg_lists;
547: Mat Prol11;
549: pc_gamg->ops->graph(pc,Aarr[level], &Gmat);
550: pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists);
551: pc_gamg->ops->prolongator(pc,Aarr[level],Gmat,agg_lists,&Prol11);
553: /* could have failed to create new level */
554: if (Prol11) {
555: /* get new block size of coarse matrices */
556: MatGetBlockSizes(Prol11, NULL, &bs);
558: if (pc_gamg->ops->optprolongator) {
559: /* smooth */
560: pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11);
561: }
563: if (pc_gamg->use_aggs_in_asm) {
564: PetscInt bs;
565: MatGetBlockSizes(Prol11, &bs, NULL);
566: PetscCDGetASMBlocks(agg_lists, bs, Gmat, &nASMBlocksArr[level], &ASMLocalIDsArr[level]);
567: }
569: Parr[level1] = Prol11;
570: } else Parr[level1] = NULL; /* failed to coarsen */
572: MatDestroy(&Gmat);
573: PetscCDDestroy(agg_lists);
574: } /* construct prolongator scope */
575: #if defined PETSC_GAMG_USE_LOG
576: PetscLogEventEnd(petsc_gamg_setup_events[SET1],0,0,0,0);
577: #endif
578: if (!level) Aarr[0] = Pmat; /* use Pmat for finest level setup */
579: if (!Parr[level1]) { /* failed to coarsen */
580: PetscInfo1(pc,"Stop gridding, level %D\n",level);
581: #if defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES
582: PetscLogStagePop();
583: #endif
584: break;
585: }
586: #if defined PETSC_GAMG_USE_LOG
587: PetscLogEventBegin(petsc_gamg_setup_events[SET2],0,0,0,0);
588: #endif
589: MatGetSize(Parr[level1], &M, &N); /* N is next M, a loop test variables */
590: if (is_last) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Is last ????????");
591: if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
592: if (level1 == pc_gamg->Nlevels-1) is_last = PETSC_TRUE;
593: pc_gamg->ops->createlevel(pc, Aarr[level], bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last);
595: #if defined PETSC_GAMG_USE_LOG
596: PetscLogEventEnd(petsc_gamg_setup_events[SET2],0,0,0,0);
597: #endif
598: MatGetSize(Aarr[level1], &M, &N); /* M is loop test variables */
599: MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info);
600: nnztot += info.nz_used;
601: PetscInfo5(pc,"%d) N=%D, n data cols=%d, nnz/row (ave)=%d, %d active pes\n",level1,M,pc_gamg->data_cell_cols,(int)(info.nz_used/(PetscReal)M),nactivepe);
603: #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
604: PetscLogStagePop();
605: #endif
606: /* stop if one node or one proc -- could pull back for singular problems */
607: if ( (pc_gamg->data_cell_cols && M/pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M/bs < 2) ) {
608: PetscInfo2(pc,"HARD stop of coarsening on level %D. Grid too small: %D block nodes\n",level,M/bs);
609: level++;
610: break;
611: }
612: } /* levels */
613: PetscFree(pc_gamg->data);
615: PetscInfo2(pc,"%D levels, grid complexity = %g\n",level+1,nnztot/nnz0);
616: pc_gamg->Nlevels = level + 1;
617: fine_level = level;
618: PCMGSetLevels(pc,pc_gamg->Nlevels,NULL);
620: if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
621: /* set default smoothers & set operators */
622: for (lidx = 1, level = pc_gamg->Nlevels-2; lidx <= fine_level; lidx++, level--) {
623: KSP smoother;
624: PC subpc;
626: PCMGGetSmoother(pc, lidx, &smoother);
627: KSPGetPC(smoother, &subpc);
629: KSPSetNormType(smoother, KSP_NORM_NONE);
630: /* set ops */
631: KSPSetOperators(smoother, Aarr[level], Aarr[level]);
632: PCMGSetInterpolation(pc, lidx, Parr[level+1]);
634: /* set defaults */
635: KSPSetType(smoother, KSPCHEBYSHEV);
637: /* set blocks for ASM smoother that uses the 'aggregates' */
638: if (pc_gamg->use_aggs_in_asm) {
639: PetscInt sz;
640: IS *iss;
642: sz = nASMBlocksArr[level];
643: iss = ASMLocalIDsArr[level];
644: PCSetType(subpc, PCASM);
645: PCASMSetOverlap(subpc, 0);
646: PCASMSetType(subpc,PC_ASM_BASIC);
647: if (!sz) {
648: IS is;
649: ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);
650: PCASMSetLocalSubdomains(subpc, 1, NULL, &is);
651: ISDestroy(&is);
652: } else {
653: PetscInt kk;
654: PCASMSetLocalSubdomains(subpc, sz, NULL, iss);
655: for (kk=0; kk<sz; kk++) {
656: ISDestroy(&iss[kk]);
657: }
658: PetscFree(iss);
659: }
660: ASMLocalIDsArr[level] = NULL;
661: nASMBlocksArr[level] = 0;
662: } else {
663: PCSetType(subpc, PCSOR);
664: }
665: }
666: {
667: /* coarse grid */
668: KSP smoother,*k2; PC subpc,pc2; PetscInt ii,first;
669: Mat Lmat = Aarr[(level=pc_gamg->Nlevels-1)]; lidx = 0;
670: PCMGGetSmoother(pc, lidx, &smoother);
671: KSPSetOperators(smoother, Lmat, Lmat);
672: if (!pc_gamg->use_parallel_coarse_grid_solver) {
673: KSPSetNormType(smoother, KSP_NORM_NONE);
674: KSPGetPC(smoother, &subpc);
675: PCSetType(subpc, PCBJACOBI);
676: PCSetUp(subpc);
677: PCBJacobiGetSubKSP(subpc,&ii,&first,&k2);
678: if (ii != 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ii %D is not one",ii);
679: KSPGetPC(k2[0],&pc2);
680: PCSetType(pc2, PCLU);
681: PCFactorSetShiftType(pc2,MAT_SHIFT_INBLOCKS);
682: KSPSetTolerances(k2[0],PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,1);
683: KSPSetType(k2[0], KSPPREONLY);
684: /* This flag gets reset by PCBJacobiGetSubKSP(), but our BJacobi really does the same algorithm everywhere (and in
685: * fact, all but one process will have zero dofs), so we reset the flag to avoid having PCView_BJacobi attempt to
686: * view every subdomain as though they were different. */
687: ((PC_BJacobi*)subpc->data)->same_local_solves = PETSC_TRUE;
688: }
689: }
691: /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
692: PetscObjectOptionsBegin((PetscObject)pc);
693: PCSetFromOptions_MG(PetscOptionsObject,pc);
694: PetscOptionsEnd();
695: PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);
697: /* clean up */
698: for (level=1; level<pc_gamg->Nlevels; level++) {
699: MatDestroy(&Parr[level]);
700: MatDestroy(&Aarr[level]);
701: }
702: PCSetUp_MG(pc);
703: } else {
704: KSP smoother;
705: PetscInfo(pc,"One level solver used (system is seen as DD). Using default solver.\n");
706: PCMGGetSmoother(pc, 0, &smoother);
707: KSPSetOperators(smoother, Aarr[0], Aarr[0]);
708: KSPSetType(smoother, KSPPREONLY);
709: PCSetUp_MG(pc);
710: }
711: return(0);
712: }
714: /* ------------------------------------------------------------------------- */
715: /*
716: PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
717: that was created with PCCreate_GAMG().
719: Input Parameter:
720: . pc - the preconditioner context
722: Application Interface Routine: PCDestroy()
723: */
724: PetscErrorCode PCDestroy_GAMG(PC pc)
725: {
727: PC_MG *mg = (PC_MG*)pc->data;
728: PC_GAMG *pc_gamg= (PC_GAMG*)mg->innerctx;
731: PCReset_GAMG(pc);
732: if (pc_gamg->ops->destroy) {
733: (*pc_gamg->ops->destroy)(pc);
734: }
735: PetscFree(pc_gamg->ops);
736: PetscFree(pc_gamg->gamg_type_name);
737: PetscFree(pc_gamg);
738: PCDestroy_MG(pc);
739: return(0);
740: }
742: /*@
743: PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction.
745: Logically Collective on PC
747: Input Parameters:
748: + pc - the preconditioner context
749: - n - the number of equations
752: Options Database Key:
753: . -pc_gamg_process_eq_limit <limit>
755: Notes:
756: GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
757: that has degrees of freedom
759: Level: intermediate
761: .seealso: PCGAMGSetCoarseEqLim()
762: @*/
763: PetscErrorCode PCGAMGSetProcEqLim(PC pc, PetscInt n)
764: {
769: PetscTryMethod(pc,"PCGAMGSetProcEqLim_C",(PC,PetscInt),(pc,n));
770: return(0);
771: }
773: static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
774: {
775: PC_MG *mg = (PC_MG*)pc->data;
776: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
779: if (n>0) pc_gamg->min_eq_proc = n;
780: return(0);
781: }
783: /*@
784: PCGAMGSetCoarseEqLim - Set maximum number of equations on coarsest grid.
786: Collective on PC
788: Input Parameters:
789: + pc - the preconditioner context
790: - n - maximum number of equations to aim for
792: Options Database Key:
793: . -pc_gamg_coarse_eq_limit <limit>
795: Notes: For example -pc_gamg_coarse_eq_limit 1000 will stop coarsening once the coarse grid
796: has less than 1000 unknowns.
798: Level: intermediate
800: .seealso: PCGAMGSetProcEqLim()
801: @*/
802: PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
803: {
808: PetscTryMethod(pc,"PCGAMGSetCoarseEqLim_C",(PC,PetscInt),(pc,n));
809: return(0);
810: }
812: static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
813: {
814: PC_MG *mg = (PC_MG*)pc->data;
815: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
818: if (n>0) pc_gamg->coarse_eq_limit = n;
819: return(0);
820: }
822: /*@
823: PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids
825: Collective on PC
827: Input Parameters:
828: + pc - the preconditioner context
829: - n - PETSC_TRUE or PETSC_FALSE
831: Options Database Key:
832: . -pc_gamg_repartition <true,false>
834: Notes:
835: this will generally improve the loading balancing of the work on each level
837: Level: intermediate
839: .seealso: ()
840: @*/
841: PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
842: {
847: PetscTryMethod(pc,"PCGAMGSetRepartition_C",(PC,PetscBool),(pc,n));
848: return(0);
849: }
851: static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
852: {
853: PC_MG *mg = (PC_MG*)pc->data;
854: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
857: pc_gamg->repart = n;
858: return(0);
859: }
861: /*@
862: PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding algebraic multigrid preconditioner
864: Collective on PC
866: Input Parameters:
867: + pc - the preconditioner context
868: - n - PETSC_TRUE or PETSC_FALSE
870: Options Database Key:
871: . -pc_gamg_reuse_interpolation <true,false>
873: Level: intermediate
875: Notes:
876: this may negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
877: rebuilding the preconditioner quicker.
879: .seealso: ()
880: @*/
881: PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
882: {
887: PetscTryMethod(pc,"PCGAMGSetReuseInterpolation_C",(PC,PetscBool),(pc,n));
888: return(0);
889: }
891: static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
892: {
893: PC_MG *mg = (PC_MG*)pc->data;
894: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
897: pc_gamg->reuse_prol = n;
898: return(0);
899: }
901: /*@
902: PCGAMGASMSetUseAggs - Have the PCGAMG smoother on each level use the aggregates defined by the coarsening process as the subdomains for the additive Schwarz preconditioner.
904: Collective on PC
906: Input Parameters:
907: + pc - the preconditioner context
908: - flg - PETSC_TRUE to use aggregates, PETSC_FALSE to not
910: Options Database Key:
911: . -pc_gamg_asm_use_agg
913: Level: intermediate
915: .seealso: ()
916: @*/
917: PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
918: {
923: PetscTryMethod(pc,"PCGAMGASMSetUseAggs_C",(PC,PetscBool),(pc,flg));
924: return(0);
925: }
927: static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
928: {
929: PC_MG *mg = (PC_MG*)pc->data;
930: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
933: pc_gamg->use_aggs_in_asm = flg;
934: return(0);
935: }
937: /*@
938: PCGAMGSetUseParallelCoarseGridSolve - allow a parallel coarse grid solver
940: Collective on PC
942: Input Parameters:
943: + pc - the preconditioner context
944: - flg - PETSC_TRUE to not force coarse grid onto one processor
946: Options Database Key:
947: . -pc_gamg_use_parallel_coarse_grid_solver
949: Level: intermediate
951: .seealso: PCGAMGSetCoarseGridLayoutType(), PCGAMGSetCpuPinCoarseGrids()
952: @*/
953: PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve(PC pc, PetscBool flg)
954: {
959: PetscTryMethod(pc,"PCGAMGSetUseParallelCoarseGridSolve_C",(PC,PetscBool),(pc,flg));
960: return(0);
961: }
963: static PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
964: {
965: PC_MG *mg = (PC_MG*)pc->data;
966: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
969: pc_gamg->use_parallel_coarse_grid_solver = flg;
970: return(0);
971: }
973: /*@
974: PCGAMGSetCpuPinCoarseGrids - pin reduced grids to CPU
976: Collective on PC
978: Input Parameters:
979: + pc - the preconditioner context
980: - flg - PETSC_TRUE to pin coarse grids to CPU
982: Options Database Key:
983: . -pc_gamg_cpu_pin_coarse_grids
985: Level: intermediate
987: .seealso: PCGAMGSetCoarseGridLayoutType(), PCGAMGSetUseParallelCoarseGridSolve()
988: @*/
989: PetscErrorCode PCGAMGSetCpuPinCoarseGrids(PC pc, PetscBool flg)
990: {
995: PetscTryMethod(pc,"PCGAMGSetCpuPinCoarseGrids_C",(PC,PetscBool),(pc,flg));
996: return(0);
997: }
999: static PetscErrorCode PCGAMGSetCpuPinCoarseGrids_GAMG(PC pc, PetscBool flg)
1000: {
1001: PC_MG *mg = (PC_MG*)pc->data;
1002: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1005: pc_gamg->cpu_pin_coarse_grids = flg;
1006: return(0);
1007: }
1009: /*@
1010: PCGAMGSetCoarseGridLayoutType - place reduce grids on processors with natural order (compact type)
1012: Collective on PC
1014: Input Parameters:
1015: + pc - the preconditioner context
1016: - flg - Layout type
1018: Options Database Key:
1019: . -pc_gamg_coarse_grid_layout_type
1021: Level: intermediate
1023: .seealso: PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetCpuPinCoarseGrids()
1024: @*/
1025: PetscErrorCode PCGAMGSetCoarseGridLayoutType(PC pc, PCGAMGLayoutType flg)
1026: {
1031: PetscTryMethod(pc,"PCGAMGSetCoarseGridLayoutType_C",(PC,PCGAMGLayoutType),(pc,flg));
1032: return(0);
1033: }
1035: static PetscErrorCode PCGAMGSetCoarseGridLayoutType_GAMG(PC pc, PCGAMGLayoutType flg)
1036: {
1037: PC_MG *mg = (PC_MG*)pc->data;
1038: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1041: pc_gamg->layout_type = flg;
1042: return(0);
1043: }
1045: /*@
1046: PCGAMGSetNlevels - Sets the maximum number of levels PCGAMG will use
1048: Not collective on PC
1050: Input Parameters:
1051: + pc - the preconditioner
1052: - n - the maximum number of levels to use
1054: Options Database Key:
1055: . -pc_mg_levels
1057: Level: intermediate
1059: .seealso: ()
1060: @*/
1061: PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
1062: {
1067: PetscTryMethod(pc,"PCGAMGSetNlevels_C",(PC,PetscInt),(pc,n));
1068: return(0);
1069: }
1071: static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
1072: {
1073: PC_MG *mg = (PC_MG*)pc->data;
1074: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1077: pc_gamg->Nlevels = n;
1078: return(0);
1079: }
1081: /*@
1082: PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
1084: Not collective on PC
1086: Input Parameters:
1087: + pc - the preconditioner context
1088: - threshold - the threshold value, 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1090: Options Database Key:
1091: . -pc_gamg_threshold <threshold>
1093: Notes:
1094: Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1095: Before coarsening or aggregating the graph, GAMG removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1097: Level: intermediate
1099: .seealso: PCGAMGFilterGraph(), PCGAMGSetSquareGraph()
1100: @*/
1101: PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1102: {
1107: PetscTryMethod(pc,"PCGAMGSetThreshold_C",(PC,PetscReal[],PetscInt),(pc,v,n));
1108: return(0);
1109: }
1111: static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1112: {
1113: PC_MG *mg = (PC_MG*)pc->data;
1114: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1115: PetscInt i;
1117: for (i=0;i<n;i++) pc_gamg->threshold[i] = v[i];
1118: do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_GAMG_MAXLEVELS);
1119: return(0);
1120: }
1122: /*@
1123: PCGAMGSetThresholdScale - Relative threshold reduction at each level
1125: Not collective on PC
1127: Input Parameters:
1128: + pc - the preconditioner context
1129: - scale - the threshold value reduction, ussually < 1.0
1131: Options Database Key:
1132: . -pc_gamg_threshold_scale <v>
1134: Level: advanced
1136: .seealso: ()
1137: @*/
1138: PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1139: {
1144: PetscTryMethod(pc,"PCGAMGSetThresholdScale_C",(PC,PetscReal),(pc,v));
1145: return(0);
1146: }
1148: static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1149: {
1150: PC_MG *mg = (PC_MG*)pc->data;
1151: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1153: pc_gamg->threshold_scale = v;
1154: return(0);
1155: }
1157: /*@C
1158: PCGAMGSetType - Set solution method
1160: Collective on PC
1162: Input Parameters:
1163: + pc - the preconditioner context
1164: - type - PCGAMGAGG, PCGAMGGEO, or PCGAMGCLASSICAL
1166: Options Database Key:
1167: . -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply
1169: Level: intermediate
1171: .seealso: PCGAMGGetType(), PCGAMG, PCGAMGType
1172: @*/
1173: PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1174: {
1179: PetscTryMethod(pc,"PCGAMGSetType_C",(PC,PCGAMGType),(pc,type));
1180: return(0);
1181: }
1183: /*@C
1184: PCGAMGGetType - Get solution method
1186: Collective on PC
1188: Input Parameter:
1189: . pc - the preconditioner context
1191: Output Parameter:
1192: . type - the type of algorithm used
1194: Level: intermediate
1196: .seealso: PCGAMGSetType(), PCGAMGType
1197: @*/
1198: PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1199: {
1204: PetscUseMethod(pc,"PCGAMGGetType_C",(PC,PCGAMGType*),(pc,type));
1205: return(0);
1206: }
1208: static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1209: {
1210: PC_MG *mg = (PC_MG*)pc->data;
1211: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1214: *type = pc_gamg->type;
1215: return(0);
1216: }
1218: static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1219: {
1220: PetscErrorCode ierr,(*r)(PC);
1221: PC_MG *mg = (PC_MG*)pc->data;
1222: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1225: pc_gamg->type = type;
1226: PetscFunctionListFind(GAMGList,type,&r);
1227: if (!r) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown GAMG type %s given",type);
1228: if (pc_gamg->ops->destroy) {
1229: (*pc_gamg->ops->destroy)(pc);
1230: PetscMemzero(pc_gamg->ops,sizeof(struct _PCGAMGOps));
1231: pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1232: /* cleaning up common data in pc_gamg - this should disapear someday */
1233: pc_gamg->data_cell_cols = 0;
1234: pc_gamg->data_cell_rows = 0;
1235: pc_gamg->orig_data_cell_cols = 0;
1236: pc_gamg->orig_data_cell_rows = 0;
1237: PetscFree(pc_gamg->data);
1238: pc_gamg->data_sz = 0;
1239: }
1240: PetscFree(pc_gamg->gamg_type_name);
1241: PetscStrallocpy(type,&pc_gamg->gamg_type_name);
1242: (*r)(pc);
1243: return(0);
1244: }
1246: /* -------------------------------------------------------------------------- */
1247: /*
1248: PCMGGetGridComplexity - compute coarse grid complexity of MG hierarchy
1250: Input Parameter:
1251: . pc - the preconditioner context
1253: Output Parameter:
1254: . gc - grid complexity = sum_i(nnz_i) / nnz_0
1256: Level: advanced
1257: */
1258: static PetscErrorCode PCMGGetGridComplexity(PC pc, PetscReal *gc)
1259: {
1261: PC_MG *mg = (PC_MG*)pc->data;
1262: PC_MG_Levels **mglevels = mg->levels;
1263: PetscInt lev;
1264: PetscLogDouble nnz0 = 0, sgc = 0;
1265: MatInfo info;
1268: if (!pc->setupcalled) {
1269: *gc = 0;
1270: return(0);
1271: }
1272: if (!mg->nlevels) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MG has no levels");
1273: for (lev=0; lev<mg->nlevels; lev++) {
1274: Mat dB;
1275: KSPGetOperators(mglevels[lev]->smoothd,NULL,&dB);
1276: MatGetInfo(dB,MAT_GLOBAL_SUM,&info); /* global reduction */
1277: sgc += info.nz_used;
1278: if (lev==mg->nlevels-1) nnz0 = info.nz_used;
1279: }
1280: if (nnz0 > 0) *gc = (PetscReal)(sgc/nnz0);
1281: else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number for grid points on finest level is not available");
1282: return(0);
1283: }
1285: static PetscErrorCode PCView_GAMG(PC pc,PetscViewer viewer)
1286: {
1287: PetscErrorCode ierr,i;
1288: PC_MG *mg = (PC_MG*)pc->data;
1289: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1290: PetscReal gc=0;
1292: PetscViewerASCIIPrintf(viewer," GAMG specific options\n");
1293: PetscViewerASCIIPrintf(viewer," Threshold for dropping small values in graph on each level =");
1294: for (i=0;i<pc_gamg->current_level;i++) {
1295: PetscViewerASCIIPrintf(viewer," %g",(double)pc_gamg->threshold[i]);
1296: }
1297: PetscViewerASCIIPrintf(viewer,"\n");
1298: PetscViewerASCIIPrintf(viewer," Threshold scaling factor for each level not specified = %g\n",(double)pc_gamg->threshold_scale);
1299: if (pc_gamg->use_aggs_in_asm) {
1300: PetscViewerASCIIPrintf(viewer," Using aggregates from coarsening process to define subdomains for PCASM\n");
1301: }
1302: if (pc_gamg->use_parallel_coarse_grid_solver) {
1303: PetscViewerASCIIPrintf(viewer," Using parallel coarse grid solver (all coarse grid equations not put on one process)\n");
1304: }
1305: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
1306: if (pc_gamg->cpu_pin_coarse_grids) {
1307: /* PetscViewerASCIIPrintf(viewer," Pinning coarse grids to the CPU)\n"); */
1308: }
1309: #endif
1310: /* if (pc_gamg->layout_type==PCGAMG_LAYOUT_COMPACT) { */
1311: /* PetscViewerASCIIPrintf(viewer," Put reduced grids on processes in natural order (ie, 0,1,2...)\n"); */
1312: /* } else { */
1313: /* PetscViewerASCIIPrintf(viewer," Put reduced grids on whole machine (ie, 0,1*f,2*f...,np-f)\n"); */
1314: /* } */
1315: if (pc_gamg->ops->view) {
1316: (*pc_gamg->ops->view)(pc,viewer);
1317: }
1318: PCMGGetGridComplexity(pc,&gc);
1319: PetscViewerASCIIPrintf(viewer," Complexity: grid = %g\n",gc);
1320: return(0);
1321: }
1323: PetscErrorCode PCSetFromOptions_GAMG(PetscOptionItems *PetscOptionsObject,PC pc)
1324: {
1326: PC_MG *mg = (PC_MG*)pc->data;
1327: PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1328: PetscBool flag;
1329: MPI_Comm comm;
1330: char prefix[256];
1331: PetscInt i,n;
1332: const char *pcpre;
1333: static const char *LayoutTypes[] = {"compact","spread","PCGAMGLayoutType","PC_GAMG_LAYOUT",0};
1335: PetscObjectGetComm((PetscObject)pc,&comm);
1336: PetscOptionsHead(PetscOptionsObject,"GAMG options");
1337: {
1338: char tname[256];
1339: PetscOptionsFList("-pc_gamg_type","Type of AMG method","PCGAMGSetType",GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag);
1340: if (flag) {
1341: PCGAMGSetType(pc,tname);
1342: }
1343: PetscOptionsBool("-pc_gamg_repartition","Repartion coarse grids","PCGAMGSetRepartition",pc_gamg->repart,&pc_gamg->repart,NULL);
1344: PetscOptionsBool("-pc_gamg_reuse_interpolation","Reuse prolongation operator","PCGAMGReuseInterpolation",pc_gamg->reuse_prol,&pc_gamg->reuse_prol,NULL);
1345: PetscOptionsBool("-pc_gamg_asm_use_agg","Use aggregation aggregates for ASM smoother","PCGAMGASMSetUseAggs",pc_gamg->use_aggs_in_asm,&pc_gamg->use_aggs_in_asm,NULL);
1346: PetscOptionsBool("-pc_gamg_use_parallel_coarse_grid_solver","Use parallel coarse grid solver (otherwise put last grid on one process)","PCGAMGSetUseParallelCoarseGridSolve",pc_gamg->use_parallel_coarse_grid_solver,&pc_gamg->use_parallel_coarse_grid_solver,NULL);
1347: PetscOptionsBool("-pc_gamg_cpu_pin_coarse_grids","Pin coarse grids to the CPU","PCGAMGSetCpuPinCoarseGrids",pc_gamg->cpu_pin_coarse_grids,&pc_gamg->cpu_pin_coarse_grids,NULL);
1348: PetscOptionsEnum("-pc_gamg_coarse_grid_layout_type","compact: place reduced grids on processes in natural order; spread: distribute to whole machine for more memory bandwidth","PCGAMGSetCoarseGridLayoutType",LayoutTypes,(PetscEnum)pc_gamg->layout_type,(PetscEnum*)&pc_gamg->layout_type,NULL);
1349: PetscOptionsInt("-pc_gamg_process_eq_limit","Limit (goal) on number of equations per process on coarse grids","PCGAMGSetProcEqLim",pc_gamg->min_eq_proc,&pc_gamg->min_eq_proc,NULL);
1350: PetscOptionsInt("-pc_gamg_coarse_eq_limit","Limit on number of equations for the coarse grid","PCGAMGSetCoarseEqLim",pc_gamg->coarse_eq_limit,&pc_gamg->coarse_eq_limit,NULL);
1351: PetscOptionsReal("-pc_gamg_threshold_scale","Scaling of threshold for each level not specified","PCGAMGSetThresholdScale",pc_gamg->threshold_scale,&pc_gamg->threshold_scale,NULL);
1352: n = PETSC_GAMG_MAXLEVELS;
1353: PetscOptionsRealArray("-pc_gamg_threshold","Relative threshold to use for dropping edges in aggregation graph","PCGAMGSetThreshold",pc_gamg->threshold,&n,&flag);
1354: if (!flag || n < PETSC_GAMG_MAXLEVELS) {
1355: if (!flag) n = 1;
1356: i = n;
1357: do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_GAMG_MAXLEVELS);
1358: }
1359: PetscOptionsInt("-pc_mg_levels","Set number of MG levels","PCGAMGSetNlevels",pc_gamg->Nlevels,&pc_gamg->Nlevels,NULL);
1361: /* set options for subtype */
1362: if (pc_gamg->ops->setfromoptions) {(*pc_gamg->ops->setfromoptions)(PetscOptionsObject,pc);}
1363: }
1364: PCGetOptionsPrefix(pc, &pcpre);
1365: PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");
1366: PetscOptionsTail();
1367: return(0);
1368: }
1370: /* -------------------------------------------------------------------------- */
1371: /*MC
1372: PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1374: Options Database Keys:
1375: + -pc_gamg_type <type> - one of agg, geo, or classical
1376: . -pc_gamg_repartition <true,default=false> - repartition the degrees of freedom accross the coarse grids as they are determined
1377: . -pc_gamg_reuse_interpolation <true,default=false> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations
1378: . -pc_gamg_asm_use_agg <true,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1379: . -pc_gamg_process_eq_limit <limit, default=50> - GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit>
1380: equations on each process that has degrees of freedom
1381: . -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1382: . -pc_gamg_threshold[] <thresh,default=0> - Before aggregating the graph GAMG will remove small values from the graph on each level
1383: - -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1385: Options Database Keys for default Aggregation:
1386: + -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation
1387: . -pc_gamg_sym_graph <true,default=false> - symmetrize the graph before computing the aggregation
1388: - -pc_gamg_square_graph <n,default=1> - number of levels to square the graph before aggregating it
1390: Multigrid options:
1391: + -pc_mg_cycles <v> - v or w, see PCMGSetCycleType()
1392: . -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1393: . -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1394: - -pc_mg_levels <levels> - Number of levels of multigrid to use.
1397: Notes:
1398: In order to obtain good performance for PCGAMG for vector valued problems you must
1399: Call MatSetBlockSize() to indicate the number of degrees of freedom per grid point
1400: Call MatSetNearNullSpace() (or PCSetCoordinates() if solving the equations of elasticity) to indicate the near null space of the operator
1401: See the Users Manual Chapter 4 for more details
1403: Level: intermediate
1405: .seealso: PCCreate(), PCSetType(), MatSetBlockSize(), PCMGType, PCSetCoordinates(), MatSetNearNullSpace(), PCGAMGSetType(), PCGAMGAGG, PCGAMGGEO, PCGAMGCLASSICAL, PCGAMGSetProcEqLim(),
1406: PCGAMGSetCoarseEqLim(), PCGAMGSetRepartition(), PCGAMGRegister(), PCGAMGSetReuseInterpolation(), PCGAMGASMSetUseAggs(), PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetNlevels(), PCGAMGSetThreshold(), PCGAMGGetType(), PCGAMGSetReuseInterpolation()
1407: M*/
1409: PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1410: {
1411: PetscErrorCode ierr,i;
1412: PC_GAMG *pc_gamg;
1413: PC_MG *mg;
1416: /* register AMG type */
1417: PCGAMGInitializePackage();
1419: /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1420: PCSetType(pc, PCMG);
1421: PetscObjectChangeTypeName((PetscObject)pc, PCGAMG);
1423: /* create a supporting struct and attach it to pc */
1424: PetscNewLog(pc,&pc_gamg);
1425: PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);
1426: mg = (PC_MG*)pc->data;
1427: mg->innerctx = pc_gamg;
1429: PetscNewLog(pc,&pc_gamg->ops);
1431: pc_gamg->setup_count = 0;
1432: /* these should be in subctx but repartitioning needs simple arrays */
1433: pc_gamg->data_sz = 0;
1434: pc_gamg->data = 0;
1436: /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1437: pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1438: pc->ops->setup = PCSetUp_GAMG;
1439: pc->ops->reset = PCReset_GAMG;
1440: pc->ops->destroy = PCDestroy_GAMG;
1441: mg->view = PCView_GAMG;
1443: PetscObjectComposeFunction((PetscObject)pc,"PCMGGetLevels_C",PCMGGetLevels_MG);
1444: PetscObjectComposeFunction((PetscObject)pc,"PCMGSetLevels_C",PCMGSetLevels_MG);
1445: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetProcEqLim_C",PCGAMGSetProcEqLim_GAMG);
1446: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseEqLim_C",PCGAMGSetCoarseEqLim_GAMG);
1447: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetRepartition_C",PCGAMGSetRepartition_GAMG);
1448: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetReuseInterpolation_C",PCGAMGSetReuseInterpolation_GAMG);
1449: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGASMSetUseAggs_C",PCGAMGASMSetUseAggs_GAMG);
1450: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetUseParallelCoarseGridSolve_C",PCGAMGSetUseParallelCoarseGridSolve_GAMG);
1451: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCpuPinCoarseGrids_C",PCGAMGSetCpuPinCoarseGrids_GAMG);
1452: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseGridLayoutType_C",PCGAMGSetCoarseGridLayoutType_GAMG);
1453: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThreshold_C",PCGAMGSetThreshold_GAMG);
1454: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThresholdScale_C",PCGAMGSetThresholdScale_GAMG);
1455: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetType_C",PCGAMGSetType_GAMG);
1456: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGGetType_C",PCGAMGGetType_GAMG);
1457: PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetNlevels_C",PCGAMGSetNlevels_GAMG);
1458: pc_gamg->repart = PETSC_FALSE;
1459: pc_gamg->reuse_prol = PETSC_FALSE;
1460: pc_gamg->use_aggs_in_asm = PETSC_FALSE;
1461: pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1462: pc_gamg->cpu_pin_coarse_grids = PETSC_FALSE;
1463: pc_gamg->layout_type = PCGAMG_LAYOUT_SPREAD;
1464: pc_gamg->min_eq_proc = 50;
1465: pc_gamg->coarse_eq_limit = 50;
1466: for (i=0;i<PETSC_GAMG_MAXLEVELS;i++) pc_gamg->threshold[i] = 0.;
1467: pc_gamg->threshold_scale = 1.;
1468: pc_gamg->Nlevels = PETSC_GAMG_MAXLEVELS;
1469: pc_gamg->current_level = 0; /* don't need to init really */
1470: pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1472: /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1473: PCGAMGSetType(pc,PCGAMGAGG);
1474: return(0);
1475: }
1477: /*@C
1478: PCGAMGInitializePackage - This function initializes everything in the PCGAMG package. It is called
1479: from PCInitializePackage().
1481: Level: developer
1483: .seealso: PetscInitialize()
1484: @*/
1485: PetscErrorCode PCGAMGInitializePackage(void)
1486: {
1490: if (PCGAMGPackageInitialized) return(0);
1491: PCGAMGPackageInitialized = PETSC_TRUE;
1492: PetscFunctionListAdd(&GAMGList,PCGAMGGEO,PCCreateGAMG_GEO);
1493: PetscFunctionListAdd(&GAMGList,PCGAMGAGG,PCCreateGAMG_AGG);
1494: PetscFunctionListAdd(&GAMGList,PCGAMGCLASSICAL,PCCreateGAMG_Classical);
1495: PetscRegisterFinalize(PCGAMGFinalizePackage);
1497: /* general events */
1498: PetscLogEventRegister("PCGAMGGraph_AGG", 0, &PC_GAMGGraph_AGG);
1499: PetscLogEventRegister("PCGAMGGraph_GEO", PC_CLASSID, &PC_GAMGGraph_GEO);
1500: PetscLogEventRegister("PCGAMGCoarse_AGG", PC_CLASSID, &PC_GAMGCoarsen_AGG);
1501: PetscLogEventRegister("PCGAMGCoarse_GEO", PC_CLASSID, &PC_GAMGCoarsen_GEO);
1502: PetscLogEventRegister("PCGAMGProl_AGG", PC_CLASSID, &PC_GAMGProlongator_AGG);
1503: PetscLogEventRegister("PCGAMGProl_GEO", PC_CLASSID, &PC_GAMGProlongator_GEO);
1504: PetscLogEventRegister("PCGAMGPOpt_AGG", PC_CLASSID, &PC_GAMGOptProlongator_AGG);
1506: #if defined PETSC_GAMG_USE_LOG
1507: PetscLogEventRegister("GAMG: createProl", PC_CLASSID, &petsc_gamg_setup_events[SET1]);
1508: PetscLogEventRegister(" Graph", PC_CLASSID, &petsc_gamg_setup_events[GRAPH]);
1509: /* PetscLogEventRegister(" G.Mat", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_MAT]); */
1510: /* PetscLogEventRegister(" G.Filter", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_FILTER]); */
1511: /* PetscLogEventRegister(" G.Square", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_SQR]); */
1512: PetscLogEventRegister(" MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[SET4]);
1513: PetscLogEventRegister(" geo: growSupp", PC_CLASSID, &petsc_gamg_setup_events[SET5]);
1514: PetscLogEventRegister(" geo: triangle", PC_CLASSID, &petsc_gamg_setup_events[SET6]);
1515: PetscLogEventRegister(" search-set", PC_CLASSID, &petsc_gamg_setup_events[FIND_V]);
1516: PetscLogEventRegister(" SA: col data", PC_CLASSID, &petsc_gamg_setup_events[SET7]);
1517: PetscLogEventRegister(" SA: frmProl0", PC_CLASSID, &petsc_gamg_setup_events[SET8]);
1518: PetscLogEventRegister(" SA: smooth", PC_CLASSID, &petsc_gamg_setup_events[SET9]);
1519: PetscLogEventRegister("GAMG: partLevel", PC_CLASSID, &petsc_gamg_setup_events[SET2]);
1520: PetscLogEventRegister(" repartition", PC_CLASSID, &petsc_gamg_setup_events[SET12]);
1521: PetscLogEventRegister(" Invert-Sort", PC_CLASSID, &petsc_gamg_setup_events[SET13]);
1522: PetscLogEventRegister(" Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14]);
1523: PetscLogEventRegister(" Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15]);
1525: /* PetscLogEventRegister(" PL move data", PC_CLASSID, &petsc_gamg_setup_events[SET13]); */
1526: /* PetscLogEventRegister("GAMG: fix", PC_CLASSID, &petsc_gamg_setup_events[SET10]); */
1527: /* PetscLogEventRegister("GAMG: set levels", PC_CLASSID, &petsc_gamg_setup_events[SET11]); */
1528: /* create timer stages */
1529: #if defined GAMG_STAGES
1530: {
1531: char str[32];
1532: PetscInt lidx;
1533: sprintf(str,"MG Level %d (finest)",0);
1534: PetscLogStageRegister(str, &gamg_stages[0]);
1535: for (lidx=1; lidx<9; lidx++) {
1536: sprintf(str,"MG Level %d",lidx);
1537: PetscLogStageRegister(str, &gamg_stages[lidx]);
1538: }
1539: }
1540: #endif
1541: #endif
1542: return(0);
1543: }
1545: /*@C
1546: PCGAMGFinalizePackage - This function frees everything from the PCGAMG package. It is
1547: called from PetscFinalize() automatically.
1549: Level: developer
1551: .seealso: PetscFinalize()
1552: @*/
1553: PetscErrorCode PCGAMGFinalizePackage(void)
1554: {
1558: PCGAMGPackageInitialized = PETSC_FALSE;
1559: PetscFunctionListDestroy(&GAMGList);
1560: return(0);
1561: }
1563: /*@C
1564: PCGAMGRegister - Register a PCGAMG implementation.
1566: Input Parameters:
1567: + type - string that will be used as the name of the GAMG type.
1568: - create - function for creating the gamg context.
1570: Level: advanced
1572: .seealso: PCGAMGType, PCGAMG, PCGAMGSetType()
1573: @*/
1574: PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1575: {
1579: PCGAMGInitializePackage();
1580: PetscFunctionListAdd(&GAMGList,type,create);
1581: return(0);
1582: }