Actual source code: ex62.c
petsc-3.14.6 2021-03-30
1: static char help[] = "Stokes Problem discretized with finite elements,\n\
2: using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
4: /*
5: For the isoviscous Stokes problem, which we discretize using the finite
6: element method on an unstructured mesh, the weak form equations are
8: < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9: < q, -\nabla\cdot u > = 0
11: Viewing:
13: To produce nice output, use
15: -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
17: You can get a LaTeX view of the mesh, with point numbering using
19: -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
21: The data layout can be viewed using
23: -dm_petscsection_view
25: Lots of information about the FEM assembly can be printed using
27: -dm_plex_print_fem 3
28: */
30: #include <petscdmplex.h>
31: #include <petscsnes.h>
32: #include <petscds.h>
33: #include <petscbag.h>
35: // TODO: Plot residual by fields after each smoother iterate
37: typedef enum {SOL_QUADRATIC, SOL_TRIG, SOL_UNKNOWN} SolType;
38: const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
40: typedef struct {
41: PetscScalar mu; /* dynamic shear viscosity */
42: } Parameter;
44: typedef struct {
45: /* Domain and mesh definition */
46: char filename[PETSC_MAX_PATH_LEN];
47: /* Problem definition */
48: PetscBag bag; /* Problem parameters */
49: SolType sol; /* MMS solution */
50: } AppCtx;
52: static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
53: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
54: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
55: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
56: {
57: const PetscReal mu = PetscRealPart(constants[0]);
58: const PetscInt Nc = uOff[1]-uOff[0];
59: PetscInt c, d;
61: for (c = 0; c < Nc; ++c) {
62: for (d = 0; d < dim; ++d) {
63: f1[c*dim+d] = mu * (u_x[c*dim+d] + u_x[d*dim+c]);
64: }
65: f1[c*dim+c] -= u[uOff[1]];
66: }
67: }
69: static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
70: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
71: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
72: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
73: {
74: PetscInt d;
75: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d*dim+d];
76: }
78: static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
79: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
80: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
81: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
82: {
83: PetscInt d;
84: for (d = 0; d < dim; ++d) g1[d*dim+d] = -1.0; /* < q, -\nabla\cdot u > */
85: }
87: static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux,
88: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
89: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
90: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
91: {
92: PetscInt d;
93: for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* -< \nabla\cdot v, p > */
94: }
96: static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
97: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
98: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
99: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
100: {
101: const PetscReal mu = PetscRealPart(constants[0]);
102: const PetscInt Nc = uOff[1]-uOff[0];
103: PetscInt c, d;
105: for (c = 0; c < Nc; ++c) {
106: for (d = 0; d < dim; ++d) {
107: g3[((c*Nc+c)*dim+d)*dim+d] += mu; /* < \nabla v, \nabla u > */
108: g3[((c*Nc+d)*dim+d)*dim+c] += mu; /* < \nabla v, {\nabla u}^T > */
109: }
110: }
111: }
113: static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux,
114: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
115: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
116: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
117: {
118: const PetscReal mu = PetscRealPart(constants[0]);
120: g0[0] = 1.0/mu;
121: }
123: /* Quadratic MMS Solution
124: 2D:
126: u = x^2 + y^2
127: v = 2 x^2 - 2xy
128: p = x + y - 1
129: f = <1 - 4 mu, 1 - 4 mu>
131: so that
133: e(u) = (grad u + grad u^T) = / 4x 4x \
134: \ 4x -4x /
135: div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
136: \nabla \cdot u = 2x - 2x = 0
138: 3D:
140: u = 2 x^2 + y^2 + z^2
141: v = 2 x^2 - 2xy
142: w = 2 x^2 - 2xz
143: p = x + y + z - 3/2
144: f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
146: so that
148: e(u) = (grad u + grad u^T) = / 8x 4x 4x \
149: | 4x -4x 0 |
150: \ 4x 0 -4x /
151: div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
152: \nabla \cdot u = 4x - 2x - 2x = 0
153: */
154: static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
155: {
156: PetscInt c;
158: u[0] = (dim-1)*PetscSqr(x[0]);
159: for (c = 1; c < Nc; ++c) {
160: u[0] += PetscSqr(x[c]);
161: u[c] = 2.0*PetscSqr(x[0]) - 2.0*x[0]*x[c];
162: }
163: return 0;
164: }
166: static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
167: {
168: PetscInt d;
170: u[0] = -0.5*dim;
171: for (d = 0; d < dim; ++d) u[0] += x[d];
172: return 0;
173: }
175: static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
176: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
177: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
178: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
179: {
180: const PetscReal mu = PetscRealPart(constants[0]);
181: PetscInt d;
183: f0[0] = (dim-1)*4.0*mu - 1.0;
184: for (d = 1; d < dim; ++d) f0[d] = 4.0*mu - 1.0;
185: }
187: /* Trigonometric MMS Solution
188: 2D:
190: u = sin(pi x) + sin(pi y)
191: v = -pi cos(pi x) y
192: p = sin(2 pi x) + sin(2 pi y)
193: f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
195: so that
197: e(u) = (grad u + grad u^T) = / 2pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y \
198: \ pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) /
199: div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
200: \nabla \cdot u = pi cos(pi x) - pi cos(pi x) = 0
202: 3D:
204: u = 2 sin(pi x) + sin(pi y) + sin(pi z)
205: v = -pi cos(pi x) y
206: w = -pi cos(pi x) z
207: p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
208: f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
210: so that
212: e(u) = (grad u + grad u^T) = / 4pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y pi cos(pi z) + pi^2 sin(pi x) z \
213: | pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) 0 |
214: \ pi cos(pi z) + pi^2 sin(pi x) z 0 -2pi cos(pi x) /
215: div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
216: \nabla \cdot u = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
217: */
218: static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
219: {
220: PetscInt c;
222: u[0] = (dim-1)*PetscSinReal(PETSC_PI*x[0]);
223: for (c = 1; c < Nc; ++c) {
224: u[0] += PetscSinReal(PETSC_PI*x[c]);
225: u[c] = -PETSC_PI*PetscCosReal(PETSC_PI*x[0]) * x[c];
226: }
227: return 0;
228: }
230: static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
231: {
232: PetscInt d;
234: for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0*PETSC_PI*x[d]);
235: return 0;
236: }
238: static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
239: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
240: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
241: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
242: {
243: const PetscReal mu = PetscRealPart(constants[0]);
244: PetscInt d;
246: f0[0] = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[0]) - (dim-1)*mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[0]);
247: for (d = 1; d < dim; ++d) {
248: f0[0] -= mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[d]);
249: f0[d] = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[d]) + mu*PetscPowRealInt(PETSC_PI, 3)*PetscCosReal(PETSC_PI*x[0])*x[d];
250: }
251: }
253: static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
254: {
255: PetscInt sol;
259: options->filename[0] = '\0';
260: options->sol = SOL_QUADRATIC;
262: PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
263: sol = options->sol;
264: PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, (sizeof(SolTypes)/sizeof(SolTypes[0]))-3, SolTypes[options->sol], &sol, NULL);
265: options->sol = (SolType) sol;
266: PetscOptionsEnd();
267: return(0);
268: }
270: static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
271: {
272: size_t len;
276: PetscStrlen(user->filename, &len);
277: if (len) {DMPlexCreateFromFile(comm, user->filename, PETSC_TRUE, dm);}
278: else {DMPlexCreateBoxMesh(comm, 2, PETSC_TRUE, NULL, NULL, NULL, NULL, PETSC_TRUE, dm);}
279: DMSetFromOptions(*dm);
280: DMViewFromOptions(*dm, NULL, "-dm_view");
281: return(0);
282: }
284: static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
285: {
286: Parameter *p;
290: /* setup PETSc parameter bag */
291: PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag);
292: PetscBagGetData(ctx->bag, (void **) &p);
293: PetscBagSetName(ctx->bag, "par", "Stokes Parameters");
294: PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s");
295: PetscBagSetFromOptions(ctx->bag);
296: {
297: PetscViewer viewer;
298: PetscViewerFormat format;
299: PetscBool flg;
301: PetscOptionsGetViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg);
302: if (flg) {
303: PetscViewerPushFormat(viewer, format);
304: PetscBagView(ctx->bag, viewer);
305: PetscViewerFlush(viewer);
306: PetscViewerPopFormat(viewer);
307: PetscViewerDestroy(&viewer);
308: }
309: }
310: return(0);
311: }
313: static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
314: {
315: PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
316: PetscDS ds;
317: const PetscInt id = 1;
318: PetscErrorCode ierr;
321: DMGetDS(dm, &ds);
322: switch (user->sol) {
323: case SOL_QUADRATIC:
324: PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u);
325: exactFuncs[0] = quadratic_u;
326: exactFuncs[1] = quadratic_p;
327: break;
328: case SOL_TRIG:
329: PetscDSSetResidual(ds, 0, f0_trig_u, f1_u);
330: exactFuncs[0] = trig_u;
331: exactFuncs[1] = trig_p;
332: break;
333: default: SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%D)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
334: }
335: PetscDSSetResidual(ds, 1, f0_p, NULL);
336: PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu);
337: PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL);
338: PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL);
339: PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu);
340: PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL);
342: PetscDSSetExactSolution(ds, 0, exactFuncs[0], user);
343: PetscDSSetExactSolution(ds, 1, exactFuncs[1], user);
345: DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", "marker", 0, 0, NULL, (void (*)(void)) exactFuncs[0], NULL, 1, &id, user);
347: /* Make constant values available to pointwise functions */
348: {
349: Parameter *param;
350: PetscScalar constants[1];
352: PetscBagGetData(user->bag, (void **) ¶m);
353: constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
354: PetscDSSetConstants(ds, 1, constants);
355: }
356: return(0);
357: }
359: static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
360: {
361: PetscInt c;
362: for (c = 0; c < Nc; ++c) u[c] = 0.0;
363: return 0;
364: }
365: static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
366: {
367: PetscInt c;
368: for (c = 0; c < Nc; ++c) u[c] = 1.0;
369: return 0;
370: }
372: static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
373: {
374: Vec vec;
375: PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero, zero};
376: PetscErrorCode ierr;
379: if (origField != 1) SETERRQ1(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Field %D should be 1 for pressure", origField);
380: funcs[field] = one;
381: {
382: PetscDS ds;
383: DMGetDS(dm, &ds);
384: PetscObjectViewFromOptions((PetscObject) ds, NULL, "-ds_view");
385: }
386: DMCreateGlobalVector(dm, &vec);
387: DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec);
388: VecNormalize(vec, NULL);
389: MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace);
390: VecDestroy(&vec);
391: /* New style for field null spaces */
392: {
393: PetscObject pressure;
394: MatNullSpace nullspacePres;
396: DMGetField(dm, field, NULL, &pressure);
397: MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres);
398: PetscObjectCompose(pressure, "nullspace", (PetscObject) nullspacePres);
399: MatNullSpaceDestroy(&nullspacePres);
400: }
401: return(0);
402: }
404: static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
405: {
406: DM cdm = dm;
407: PetscQuadrature q = NULL;
408: DMPolytopeType ct;
409: PetscBool simplex;
410: PetscInt dim, Nf = 2, f, Nc[2], cStart;
411: const char *name[2] = {"velocity", "pressure"};
412: const char *prefix[2] = {"vel_", "pres_"};
413: PetscErrorCode ierr;
416: DMGetDimension(dm, &dim);
417: DMPlexGetHeightStratum(dm, 0, &cStart, NULL);
418: DMPlexGetCellType(dm, cStart, &ct);
419: simplex = DMPolytopeTypeGetNumVertices(ct) == DMPolytopeTypeGetDim(ct)+1 ? PETSC_TRUE : PETSC_FALSE;
420: Nc[0] = dim;
421: Nc[1] = 1;
422: for (f = 0; f < Nf; ++f) {
423: PetscFE fe;
425: PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe);
426: PetscObjectSetName((PetscObject) fe, name[f]);
427: if (!q) {PetscFEGetQuadrature(fe, &q);}
428: PetscFESetQuadrature(fe, q);
429: DMSetField(dm, f, NULL, (PetscObject) fe);
430: PetscFEDestroy(&fe);
431: }
432: DMCreateDS(dm);
433: (*setupEqn)(dm, user);
434: while (cdm) {
435: DMCopyDisc(dm, cdm);
436: DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace);
437: DMGetCoarseDM(cdm, &cdm);
438: }
439: return(0);
440: }
442: int main(int argc, char **argv)
443: {
444: SNES snes;
445: DM dm;
446: Vec u;
447: AppCtx user;
450: PetscInitialize(&argc, &argv, NULL, help);if (ierr) return ierr;
451: ProcessOptions(PETSC_COMM_WORLD, &user);
452: CreateMesh(PETSC_COMM_WORLD, &user, &dm);
453: SNESCreate(PetscObjectComm((PetscObject) dm), &snes);
454: SNESSetDM(snes, dm);
455: DMSetApplicationContext(dm, &user);
457: SetupParameters(PETSC_COMM_WORLD, &user);
458: SetupProblem(dm, SetupEqn, &user);
459: DMPlexCreateClosureIndex(dm, NULL);
461: DMCreateGlobalVector(dm, &u);
462: DMPlexSetSNESLocalFEM(dm, &user, &user, &user);
463: SNESSetFromOptions(snes);
464: DMSNESCheckFromOptions(snes, u);
465: PetscObjectSetName((PetscObject) u, "Solution");
466: {
467: Mat J;
468: MatNullSpace sp;
470: SNESSetUp(snes);
471: CreatePressureNullSpace(dm, 1, 1, &sp);
472: SNESGetJacobian(snes, &J, NULL, NULL, NULL);
473: MatSetNullSpace(J, sp);
474: MatNullSpaceDestroy(&sp);
475: PetscObjectSetName((PetscObject) J, "Jacobian");
476: MatViewFromOptions(J, NULL, "-J_view");
477: }
478: SNESSolve(snes, NULL, u);
480: VecDestroy(&u);
481: SNESDestroy(&snes);
482: DMDestroy(&dm);
483: PetscBagDestroy(&user.bag);
484: PetscFinalize();
485: return ierr;
486: }
487: /*TEST
489: test:
490: suffix: 2d_p2_p1_check
491: requires: triangle
492: args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
494: test:
495: suffix: 2d_p2_p1_check_parallel
496: nsize: {{2 3 5}}
497: requires: triangle
498: args: -sol quadratic -dm_refine 2 -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
500: test:
501: suffix: 3d_p2_p1_check
502: requires: ctetgen
503: args: -sol quadratic -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
505: test:
506: suffix: 3d_p2_p1_check_parallel
507: nsize: {{2 3 5}}
508: requires: ctetgen
509: args: -sol quadratic -dm_refine 2 -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
511: test:
512: suffix: 2d_p2_p1_conv
513: requires: triangle
514: # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
515: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
516: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
517: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
518: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
520: test:
521: suffix: 2d_p2_p1_conv_gamg
522: requires: triangle
523: args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
524: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
525: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
527: test:
528: suffix: 3d_p2_p1_conv
529: requires: ctetgen !single
530: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
531: args: -sol trig -dm_plex_box_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
532: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
533: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
534: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
536: test:
537: suffix: 2d_q2_q1_check
538: args: -sol quadratic -dm_plex_box_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
540: test:
541: suffix: 3d_q2_q1_check
542: args: -sol quadratic -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
544: test:
545: suffix: 2d_q2_q1_conv
546: # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
547: args: -sol trig -dm_plex_box_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
548: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
549: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
550: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
552: test:
553: suffix: 3d_q2_q1_conv
554: requires: !single
555: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
556: args: -sol trig -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
557: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
558: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
559: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
561: test:
562: suffix: 2d_p3_p2_check
563: requires: triangle
564: args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
566: test:
567: suffix: 3d_p3_p2_check
568: requires: ctetgen !single
569: args: -sol quadratic -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
571: test:
572: suffix: 2d_p3_p2_conv
573: requires: triangle
574: # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
575: args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
576: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
577: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
578: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
580: test:
581: suffix: 3d_p3_p2_conv
582: requires: ctetgen long_runtime
583: # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
584: args: -sol trig -dm_plex_box_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
585: -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
586: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
587: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
589: test:
590: suffix: 2d_q1_p0_conv
591: requires: !single
592: # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
593: args: -sol quadratic -dm_plex_box_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
594: -ksp_atol 1e-10 -ksp_error_if_not_converged -petscds_jac_pre 0 \
595: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
596: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd
598: test:
599: suffix: 3d_q1_p0_conv
600: requires: !single
601: # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
602: args: -sol quadratic -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
603: -ksp_atol 1e-10 -ksp_error_if_not_converged -petscds_jac_pre 0 \
604: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
605: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd
607: # Stokes preconditioners
608: # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
609: test:
610: suffix: 2d_p2_p1_block_diagonal
611: requires: triangle
612: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
613: -snes_error_if_not_converged \
614: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
615: -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
616: # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
617: test:
618: suffix: 2d_p2_p1_block_triangular
619: requires: triangle
620: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
621: -snes_error_if_not_converged \
622: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
623: -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
624: # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
625: test:
626: suffix: 2d_p2_p1_schur_diagonal
627: requires: triangle
628: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
629: -snes_error_if_not_converged \
630: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
631: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
632: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
633: # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
634: test:
635: suffix: 2d_p2_p1_schur_upper
636: requires: triangle
637: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
638: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
639: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
640: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
641: # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
642: test:
643: suffix: 2d_p2_p1_schur_lower
644: requires: triangle
645: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
646: -snes_error_if_not_converged \
647: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
648: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
649: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
650: # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
651: test:
652: suffix: 2d_p2_p1_schur_full
653: requires: triangle
654: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
655: -snes_error_if_not_converged \
656: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
657: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
658: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
659: # Full Schur + Velocity GMG
660: test:
661: suffix: 2d_p2_p1_gmg_vcycle
662: requires: triangle
663: args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
664: -snes_error_if_not_converged \
665: -ksp_type fgmres -ksp_atol 1e-9 -ksp_error_if_not_converged -pc_use_amat \
666: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
667: -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
668: # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
669: test:
670: suffix: 2d_p2_p1_simple
671: requires: triangle
672: args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
673: -snes_error_if_not_converged \
674: -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
675: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
676: -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
677: -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
678: # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
679: test:
680: suffix: 2d_p2_p1_fetidp
681: requires: triangle mumps
682: nsize: 5
683: args: -sol quadratic -dm_refine 2 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
684: -snes_error_if_not_converged \
685: -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
686: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
687: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
688: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
689: test:
690: suffix: 2d_q2_q1_fetidp
691: requires: mumps
692: nsize: 5
693: args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine 2 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
694: -snes_error_if_not_converged \
695: -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
696: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
697: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
698: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
699: test:
700: suffix: 3d_p2_p1_fetidp
701: requires: ctetgen mumps suitesparse
702: nsize: 5
703: args: -sol quadratic -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
704: -snes_error_if_not_converged \
705: -ksp_type fetidp -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
706: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
707: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
708: -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
709: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
710: -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
711: -fetidp_bddelta_pc_factor_mat_ordering_type external \
712: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
713: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
714: test:
715: suffix: 3d_q2_q1_fetidp
716: requires: suitesparse
717: nsize: 5
718: args: -sol quadratic -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
719: -snes_error_if_not_converged \
720: -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
721: -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
722: -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
723: -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
724: -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
725: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
726: -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
727: # BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
728: test:
729: suffix: 2d_p2_p1_bddc
730: nsize: 2
731: requires: triangle !single
732: args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
733: -snes_error_if_not_converged \
734: -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
735: -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
736: # Vanka
737: test:
738: suffix: 2d_q1_p0_vanka
739: requires: double !complex
740: args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
741: -snes_rtol 1.0e-4 \
742: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
743: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
744: -sub_ksp_type preonly -sub_pc_type lu
745: test:
746: suffix: 2d_q1_p0_vanka_denseinv
747: requires: double !complex
748: args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
749: -snes_rtol 1.0e-4 \
750: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
751: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
752: -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
753: # Vanka smoother
754: test:
755: suffix: 2d_q1_p0_gmg_vanka
756: requires: double !complex
757: args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
758: -snes_rtol 1.0e-4 \
759: -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
760: -pc_type mg \
761: -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
762: -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
763: -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
764: -mg_coarse_pc_type svd
766: TEST*/