Actual source code: ex62.c

  1: static char help[] = "Stokes Problem discretized with finite elements,\n\
  2: using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";

  4: /*
  5: For the isoviscous Stokes problem, which we discretize using the finite
  6: element method on an unstructured mesh, the weak form equations are

  8:   < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
  9:   < q, -\nabla\cdot u >                                                   = 0

 11: Viewing:

 13: To produce nice output, use

 15:   -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append

 17: You can get a LaTeX view of the mesh, with point numbering using

 19:   -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0

 21: The data layout can be viewed using

 23:   -dm_petscsection_view

 25: Lots of information about the FEM assembly can be printed using

 27:   -dm_plex_print_fem 3
 28: */

 30: #include <petscdmplex.h>
 31: #include <petscsnes.h>
 32: #include <petscds.h>
 33: #include <petscbag.h>

 35: // TODO: Plot residual by fields after each smoother iterate

 37: typedef enum {SOL_QUADRATIC, SOL_TRIG, SOL_UNKNOWN} SolType;
 38: const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};

 40: typedef struct {
 41:   PetscScalar mu; /* dynamic shear viscosity */
 42: } Parameter;

 44: typedef struct {
 45:   PetscBag bag; /* Problem parameters */
 46:   SolType  sol; /* MMS solution */
 47: } AppCtx;

 49: static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
 50:                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
 51:                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
 52:                  PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
 53: {
 54:   const PetscReal mu = PetscRealPart(constants[0]);
 55:   const PetscInt  Nc = uOff[1]-uOff[0];
 56:   PetscInt        c, d;

 58:   for (c = 0; c < Nc; ++c) {
 59:     for (d = 0; d < dim; ++d) {
 60:       f1[c*dim+d] = mu * (u_x[c*dim+d] + u_x[d*dim+c]);
 61:     }
 62:     f1[c*dim+c] -= u[uOff[1]];
 63:   }
 64: }

 66: static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
 67:                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
 68:                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
 69:                  PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
 70: {
 71:   PetscInt d;
 72:   for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d*dim+d];
 73: }

 75: static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
 76:                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
 77:                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
 78:                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
 79: {
 80:   PetscInt d;
 81:   for (d = 0; d < dim; ++d) g1[d*dim+d] = -1.0; /* < q, -\nabla\cdot u > */
 82: }

 84: static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux,
 85:                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
 86:                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
 87:                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
 88: {
 89:   PetscInt d;
 90:   for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* -< \nabla\cdot v, p > */
 91: }

 93: static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
 94:                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
 95:                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
 96:                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
 97: {
 98:   const PetscReal mu = PetscRealPart(constants[0]);
 99:   const PetscInt  Nc = uOff[1]-uOff[0];
100:   PetscInt        c, d;

102:   for (c = 0; c < Nc; ++c) {
103:     for (d = 0; d < dim; ++d) {
104:       g3[((c*Nc+c)*dim+d)*dim+d] += mu; /* < \nabla v, \nabla u > */
105:       g3[((c*Nc+d)*dim+d)*dim+c] += mu; /* < \nabla v, {\nabla u}^T > */
106:     }
107:   }
108: }

110: static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux,
111:                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
112:                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
113:                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
114: {
115:   const PetscReal mu = PetscRealPart(constants[0]);

117:   g0[0] = 1.0/mu;
118: }

120: /* Quadratic MMS Solution
121:    2D:

123:      u = x^2 + y^2
124:      v = 2 x^2 - 2xy
125:      p = x + y - 1
126:      f = <1 - 4 mu, 1 - 4 mu>

128:    so that

130:      e(u) = (grad u + grad u^T) = / 4x  4x \
131:                                   \ 4x -4x /
132:      div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
133:      \nabla \cdot u             = 2x - 2x = 0

135:    3D:

137:      u = 2 x^2 + y^2 + z^2
138:      v = 2 x^2 - 2xy
139:      w = 2 x^2 - 2xz
140:      p = x + y + z - 3/2
141:      f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>

143:    so that

145:      e(u) = (grad u + grad u^T) = / 8x  4x  4x \
146:                                   | 4x -4x  0  |
147:                                   \ 4x  0  -4x /
148:      div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
149:      \nabla \cdot u             = 4x - 2x - 2x = 0
150: */
151: static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
152: {
153:   PetscInt c;

155:   u[0] = (dim-1)*PetscSqr(x[0]);
156:   for (c = 1; c < Nc; ++c) {
157:     u[0] += PetscSqr(x[c]);
158:     u[c]  = 2.0*PetscSqr(x[0]) - 2.0*x[0]*x[c];
159:   }
160:   return 0;
161: }

163: static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
164: {
165:   PetscInt d;

167:   u[0] = -0.5*dim;
168:   for (d = 0; d < dim; ++d) u[0] += x[d];
169:   return 0;
170: }

172: static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
173:                            const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
174:                            const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
175:                            PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
176: {
177:   const PetscReal mu = PetscRealPart(constants[0]);
178:   PetscInt        d;

180:   f0[0] = (dim-1)*4.0*mu - 1.0;
181:   for (d = 1; d < dim; ++d) f0[d] = 4.0*mu - 1.0;
182: }

184: /* Trigonometric MMS Solution
185:    2D:

187:      u = sin(pi x) + sin(pi y)
188:      v = -pi cos(pi x) y
189:      p = sin(2 pi x) + sin(2 pi y)
190:      f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>

192:    so that

194:      e(u) = (grad u + grad u^T) = /        2pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y \
195:                                   \ pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)          /
196:      div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
197:      \nabla \cdot u             = pi cos(pi x) - pi cos(pi x) = 0

199:    3D:

201:      u = 2 sin(pi x) + sin(pi y) + sin(pi z)
202:      v = -pi cos(pi x) y
203:      w = -pi cos(pi x) z
204:      p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
205:      f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>

207:    so that

209:      e(u) = (grad u + grad u^T) = /        4pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y  pi cos(pi z) + pi^2 sin(pi x) z \
210:                                   | pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)                        0                  |
211:                                   \ pi cos(pi z) + pi^2 sin(pi x) z               0                         -2pi cos(pi x)            /
212:      div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
213:      \nabla \cdot u             = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
214: */
215: static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
216: {
217:   PetscInt c;

219:   u[0] = (dim-1)*PetscSinReal(PETSC_PI*x[0]);
220:   for (c = 1; c < Nc; ++c) {
221:     u[0] += PetscSinReal(PETSC_PI*x[c]);
222:     u[c]  = -PETSC_PI*PetscCosReal(PETSC_PI*x[0]) * x[c];
223:   }
224:   return 0;
225: }

227: static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
228: {
229:   PetscInt d;

231:   for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0*PETSC_PI*x[d]);
232:   return 0;
233: }

235: static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
236:                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
237:                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
238:                       PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
239: {
240:   const PetscReal mu = PetscRealPart(constants[0]);
241:   PetscInt        d;

243:   f0[0] = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[0]) - (dim-1)*mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[0]);
244:   for (d = 1; d < dim; ++d) {
245:     f0[0] -= mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[d]);
246:     f0[d]  = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[d]) + mu*PetscPowRealInt(PETSC_PI, 3)*PetscCosReal(PETSC_PI*x[0])*x[d];
247:   }
248: }

250: static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
251: {
252:   PetscInt       sol;

256:   options->sol = SOL_QUADRATIC;

258:   PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
259:   sol  = options->sol;
260:   PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, (sizeof(SolTypes)/sizeof(SolTypes[0]))-3, SolTypes[options->sol], &sol, NULL);
261:   options->sol = (SolType) sol;
262:   PetscOptionsEnd();
263:   return(0);
264: }

266: static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
267: {

271:   DMCreate(comm, dm);
272:   DMSetType(*dm, DMPLEX);
273:   DMSetFromOptions(*dm);
274:   DMViewFromOptions(*dm, NULL, "-dm_view");
275:   return(0);
276: }

278: static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
279: {
280:   Parameter     *p;

284:   /* setup PETSc parameter bag */
285:   PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag);
286:   PetscBagGetData(ctx->bag, (void **) &p);
287:   PetscBagSetName(ctx->bag, "par", "Stokes Parameters");
288:   PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s");
289:   PetscBagSetFromOptions(ctx->bag);
290:   {
291:     PetscViewer       viewer;
292:     PetscViewerFormat format;
293:     PetscBool         flg;

295:     PetscOptionsGetViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg);
296:     if (flg) {
297:       PetscViewerPushFormat(viewer, format);
298:       PetscBagView(ctx->bag, viewer);
299:       PetscViewerFlush(viewer);
300:       PetscViewerPopFormat(viewer);
301:       PetscViewerDestroy(&viewer);
302:     }
303:   }
304:   return(0);
305: }

307: static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
308: {
309:   PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
310:   PetscDS          ds;
311:   DMLabel          label;
312:   const PetscInt   id = 1;
313:   PetscErrorCode   ierr;

316:   DMGetDS(dm, &ds);
317:   switch (user->sol) {
318:     case SOL_QUADRATIC:
319:       PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u);
320:       exactFuncs[0] = quadratic_u;
321:       exactFuncs[1] = quadratic_p;
322:       break;
323:     case SOL_TRIG:
324:       PetscDSSetResidual(ds, 0, f0_trig_u, f1_u);
325:       exactFuncs[0] = trig_u;
326:       exactFuncs[1] = trig_p;
327:       break;
328:     default: SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%D)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
329:   }
330:   PetscDSSetResidual(ds, 1, f0_p, NULL);
331:   PetscDSSetJacobian(ds, 0, 0, NULL, NULL,  NULL,  g3_uu);
332:   PetscDSSetJacobian(ds, 0, 1, NULL, NULL,  g2_up, NULL);
333:   PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL,  NULL);
334:   PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu);
335:   PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL);

337:   PetscDSSetExactSolution(ds, 0, exactFuncs[0], user);
338:   PetscDSSetExactSolution(ds, 1, exactFuncs[1], user);

340:   DMGetLabel(dm, "marker", &label);
341:   DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (void (*)(void)) exactFuncs[0], NULL, user, NULL);

343:   /* Make constant values available to pointwise functions */
344:   {
345:     Parameter  *param;
346:     PetscScalar constants[1];

348:     PetscBagGetData(user->bag, (void **) &param);
349:     constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
350:     PetscDSSetConstants(ds, 1, constants);
351:   }
352:   return(0);
353: }

355: static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
356: {
357:   PetscInt c;
358:   for (c = 0; c < Nc; ++c) u[c] = 0.0;
359:   return 0;
360: }
361: static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
362: {
363:   PetscInt c;
364:   for (c = 0; c < Nc; ++c) u[c] = 1.0;
365:   return 0;
366: }

368: static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
369: {
370:   Vec              vec;
371:   PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero, one};
372:   PetscErrorCode   ierr;

375:   if (origField != 1) SETERRQ1(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Field %D should be 1 for pressure", origField);
376:   funcs[field] = one;
377:   {
378:     PetscDS ds;
379:     DMGetDS(dm, &ds);
380:     PetscObjectViewFromOptions((PetscObject) ds, NULL, "-ds_view");
381:   }
382:   DMCreateGlobalVector(dm, &vec);
383:   DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec);
384:   VecNormalize(vec, NULL);
385:   MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace);
386:   VecDestroy(&vec);
387:   /* New style for field null spaces */
388:   {
389:     PetscObject  pressure;
390:     MatNullSpace nullspacePres;

392:     DMGetField(dm, field, NULL, &pressure);
393:     MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres);
394:     PetscObjectCompose(pressure, "nullspace", (PetscObject) nullspacePres);
395:     MatNullSpaceDestroy(&nullspacePres);
396:   }
397:   return(0);
398: }

400: static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
401: {
402:   DM              cdm = dm;
403:   PetscQuadrature q   = NULL;
404:   PetscBool       simplex;
405:   PetscInt        dim, Nf = 2, f, Nc[2];
406:   const char     *name[2]   = {"velocity", "pressure"};
407:   const char     *prefix[2] = {"vel_",     "pres_"};
408:   PetscErrorCode  ierr;

411:   DMGetDimension(dm, &dim);
412:   DMPlexIsSimplex(dm, &simplex);
413:   Nc[0] = dim;
414:   Nc[1] = 1;
415:   for (f = 0; f < Nf; ++f) {
416:     PetscFE fe;

418:     PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe);
419:     PetscObjectSetName((PetscObject) fe, name[f]);
420:     if (!q) {PetscFEGetQuadrature(fe, &q);}
421:     PetscFESetQuadrature(fe, q);
422:     DMSetField(dm, f, NULL, (PetscObject) fe);
423:     PetscFEDestroy(&fe);
424:   }
425:   DMCreateDS(dm);
426:   (*setupEqn)(dm, user);
427:   while (cdm) {
428:     DMCopyDisc(dm, cdm);
429:     DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace);
430:     DMGetCoarseDM(cdm, &cdm);
431:   }
432:   return(0);
433: }

435: int main(int argc, char **argv)
436: {
437:   SNES           snes;
438:   DM             dm;
439:   Vec            u;
440:   AppCtx         user;

443:   PetscInitialize(&argc, &argv, NULL, help);if (ierr) return ierr;
444:   ProcessOptions(PETSC_COMM_WORLD, &user);
445:   CreateMesh(PETSC_COMM_WORLD, &user, &dm);
446:   SNESCreate(PetscObjectComm((PetscObject) dm), &snes);
447:   SNESSetDM(snes, dm);
448:   DMSetApplicationContext(dm, &user);

450:   SetupParameters(PETSC_COMM_WORLD, &user);
451:   SetupProblem(dm, SetupEqn, &user);
452:   DMPlexCreateClosureIndex(dm, NULL);

454:   DMCreateGlobalVector(dm, &u);
455:   DMPlexSetSNESLocalFEM(dm, &user, &user, &user);
456:   SNESSetFromOptions(snes);
457:   DMSNESCheckFromOptions(snes, u);
458:   PetscObjectSetName((PetscObject) u, "Solution");
459:   {
460:     Mat          J;
461:     MatNullSpace sp;

463:     SNESSetUp(snes);
464:     CreatePressureNullSpace(dm, 1, 1, &sp);
465:     SNESGetJacobian(snes, &J, NULL, NULL, NULL);
466:     MatSetNullSpace(J, sp);
467:     MatNullSpaceDestroy(&sp);
468:     PetscObjectSetName((PetscObject) J, "Jacobian");
469:     MatViewFromOptions(J, NULL, "-J_view");
470:   }
471:   SNESSolve(snes, NULL, u);

473:   VecDestroy(&u);
474:   SNESDestroy(&snes);
475:   DMDestroy(&dm);
476:   PetscBagDestroy(&user.bag);
477:   PetscFinalize();
478:   return ierr;
479: }
480: /*TEST

482:   test:
483:     suffix: 2d_p2_p1_check
484:     requires: triangle
485:     args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001

487:   test:
488:     suffix: 2d_p2_p1_check_parallel
489:     nsize: {{2 3 5}}
490:     requires: triangle
491:     args: -sol quadratic -dm_refine 2 -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001

493:   test:
494:     suffix: 3d_p2_p1_check
495:     requires: ctetgen
496:     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001

498:   test:
499:     suffix: 3d_p2_p1_check_parallel
500:     nsize: {{2 3 5}}
501:     requires: ctetgen
502:     args: -sol quadratic -dm_refine 2 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001

504:   test:
505:     suffix: 2d_p2_p1_conv
506:     requires: triangle
507:     # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
508:     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
509:       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
510:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
511:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu

513:   test:
514:     suffix: 2d_p2_p1_conv_gamg
515:     requires: triangle
516:     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
517:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
518:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd

520:   test:
521:     suffix: 3d_p2_p1_conv
522:     requires: ctetgen !single
523:     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
524:     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
525:       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
526:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
527:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu

529:   test:
530:     suffix: 2d_q2_q1_check
531:     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001

533:   test:
534:     suffix: 3d_q2_q1_check
535:     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001

537:   test:
538:     suffix: 2d_q2_q1_conv
539:     # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
540:     args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
541:       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
542:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
543:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu

545:   test:
546:     suffix: 3d_q2_q1_conv
547:     requires: !single
548:     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
549:     args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
550:       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
551:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
552:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu

554:   test:
555:     suffix: 2d_p3_p2_check
556:     requires: triangle
557:     args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001

559:   test:
560:     suffix: 3d_p3_p2_check
561:     requires: ctetgen !single
562:     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001

564:   test:
565:     suffix: 2d_p3_p2_conv
566:     requires: triangle
567:     # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
568:     args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
569:       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
570:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
571:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu

573:   test:
574:     suffix: 3d_p3_p2_conv
575:     requires: ctetgen long_runtime
576:     # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
577:     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
578:       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
579:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
580:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu

582:   test:
583:     suffix: 2d_q1_p0_conv
584:     requires: !single
585:     # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
586:     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
587:       -ksp_atol 1e-10 -ksp_error_if_not_converged -petscds_jac_pre 0 \
588:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
589:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd

591:   test:
592:     suffix: 3d_q1_p0_conv
593:     requires: !single
594:     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
595:     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
596:       -ksp_atol 1e-10 -ksp_error_if_not_converged -petscds_jac_pre 0 \
597:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
598:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd

600:   # Stokes preconditioners
601:   #   Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
602:   test:
603:     suffix: 2d_p2_p1_block_diagonal
604:     requires: triangle
605:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
606:       -snes_error_if_not_converged \
607:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
608:       -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
609:   #   Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
610:   test:
611:     suffix: 2d_p2_p1_block_triangular
612:     requires: triangle
613:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
614:       -snes_error_if_not_converged \
615:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
616:       -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
617:   #   Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
618:   test:
619:     suffix: 2d_p2_p1_schur_diagonal
620:     requires: triangle
621:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
622:       -snes_error_if_not_converged \
623:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
624:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
625:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
626:   #   Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
627:   test:
628:     suffix: 2d_p2_p1_schur_upper
629:     requires: triangle
630:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
631:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
632:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
633:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
634:   #   Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
635:   test:
636:     suffix: 2d_p2_p1_schur_lower
637:     requires: triangle
638:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
639:       -snes_error_if_not_converged \
640:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
641:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
642:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
643:   #   Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
644:   test:
645:     suffix: 2d_p2_p1_schur_full
646:     requires: triangle
647:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
648:       -snes_error_if_not_converged \
649:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
650:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
651:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
652:   #   Full Schur + Velocity GMG
653:   test:
654:     suffix: 2d_p2_p1_gmg_vcycle
655:     requires: triangle
656:     args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
657:       -snes_error_if_not_converged \
658:       -ksp_type fgmres -ksp_atol 1e-9 -ksp_error_if_not_converged -pc_use_amat \
659:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
660:         -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
661:   #   SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
662:   test:
663:     suffix: 2d_p2_p1_simple
664:     requires: triangle
665:     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
666:       -snes_error_if_not_converged \
667:       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
668:       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
669:         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
670:         -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
671:   #   FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
672:   test:
673:     suffix: 2d_p2_p1_fetidp
674:     requires: triangle mumps
675:     nsize: 5
676:     args: -sol quadratic -dm_refine 2 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
677:       -snes_error_if_not_converged \
678:       -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
679:       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
680:         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
681:         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
682:   test:
683:     suffix: 2d_q2_q1_fetidp
684:     requires: mumps
685:     nsize: 5
686:     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
687:       -snes_error_if_not_converged \
688:       -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
689:       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
690:         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
691:         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
692:   test:
693:     suffix: 3d_p2_p1_fetidp
694:     requires: ctetgen mumps suitesparse
695:     nsize: 5
696:     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
697:       -snes_error_if_not_converged \
698:       -ksp_type fetidp -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
699:       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
700:         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
701:         -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
702:         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
703:         -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
704:         -fetidp_bddelta_pc_factor_mat_ordering_type external \
705:         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
706:         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
707:   test:
708:     suffix: 3d_q2_q1_fetidp
709:     requires: suitesparse
710:     nsize: 5
711:     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
712:       -snes_error_if_not_converged \
713:       -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
714:       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
715:         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
716:         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
717:         -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
718:         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
719:         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
720:   #   BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
721:   test:
722:     suffix: 2d_p2_p1_bddc
723:     nsize: 2
724:     requires: triangle !single
725:     args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
726:       -snes_error_if_not_converged \
727:       -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
728:         -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
729:   #   Vanka
730:   test:
731:     suffix: 2d_q1_p0_vanka
732:     requires: double !complex
733:     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
734:       -snes_rtol 1.0e-4 \
735:       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
736:       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
737:         -sub_ksp_type preonly -sub_pc_type lu
738:   test:
739:     suffix: 2d_q1_p0_vanka_denseinv
740:     requires: double !complex
741:     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
742:       -snes_rtol 1.0e-4 \
743:       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
744:       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
745:         -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
746:   #   Vanka smoother
747:   test:
748:     suffix: 2d_q1_p0_gmg_vanka
749:     requires: double !complex
750:     args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
751:       -snes_rtol 1.0e-4 \
752:       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
753:       -pc_type mg \
754:         -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
755:         -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
756:           -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
757:         -mg_coarse_pc_type svd

759: TEST*/