Actual source code: ex46.c
petsc-3.14.6 2021-03-30
1: static char help[] = "Time dependent Navier-Stokes problem in 2d and 3d with finite elements.\n\
2: We solve the Navier-Stokes in a rectangular\n\
3: domain, using a parallel unstructured mesh (DMPLEX) to discretize it.\n\
4: This example supports discretized auxiliary fields (Re) as well as\n\
5: multilevel nonlinear solvers.\n\
6: Contributed by: Julian Andrej <juan@tf.uni-kiel.de>\n\n\n";
8: #include <petscdmplex.h>
9: #include <petscsnes.h>
10: #include <petscts.h>
11: #include <petscds.h>
13: /*
14: Navier-Stokes equation:
16: du/dt + u . grad u - \Delta u - grad p = f
17: div u = 0
18: */
20: typedef struct {
21: PetscInt dim;
22: PetscBool simplex;
23: PetscInt mms;
24: PetscErrorCode (**exactFuncs)(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx);
25: } AppCtx;
27: #define REYN 400.0
29: /* MMS1
31: u = t + x^2 + y^2;
32: v = t + 2*x^2 - 2*x*y;
33: p = x + y - 1;
35: f_x = -2*t*(x + y) + 2*x*y^2 - 4*x^2*y - 2*x^3 + 4.0/Re - 1.0
36: f_y = -2*t*x + 2*y^3 - 4*x*y^2 - 2*x^2*y + 4.0/Re - 1.0
38: so that
40: u_t + u \cdot \nabla u - 1/Re \Delta u + \nabla p + f = <1, 1> + <t (2x + 2y) + 2x^3 + 4x^2y - 2xy^2, t 2x + 2x^2y + 4xy^2 - 2y^3> - 1/Re <4, 4> + <1, 1>
41: + <-t (2x + 2y) + 2xy^2 - 4x^2y - 2x^3 + 4/Re - 1, -2xt + 2y^3 - 4xy^2 - 2x^2y + 4/Re - 1> = 0
42: \nabla \cdot u = 2x - 2x = 0
44: where
46: <u, v> . <<u_x, v_x>, <u_y, v_y>> = <u u_x + v u_y, u v_x + v v_y>
47: */
48: PetscErrorCode mms1_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
49: {
50: u[0] = time + x[0]*x[0] + x[1]*x[1];
51: u[1] = time + 2.0*x[0]*x[0] - 2.0*x[0]*x[1];
52: return 0;
53: }
55: PetscErrorCode mms1_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
56: {
57: *p = x[0] + x[1] - 1.0;
58: return 0;
59: }
61: /* MMS 2*/
63: static PetscErrorCode mms2_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
64: {
65: u[0] = PetscSinReal(time + x[0])*PetscSinReal(time + x[1]);
66: u[1] = PetscCosReal(time + x[0])*PetscCosReal(time + x[1]);
67: return 0;
68: }
70: static PetscErrorCode mms2_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
71: {
72: *p = PetscSinReal(time + x[0] - x[1]);
73: return 0;
74: }
76: static void f0_mms1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
77: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
78: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
79: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
80: {
81: const PetscReal Re = REYN;
82: const PetscInt Ncomp = dim;
83: PetscInt c, d;
85: for (c = 0; c < Ncomp; ++c) {
86: for (d = 0; d < dim; ++d) {
87: f0[c] += u[d] * u_x[c*dim+d];
88: }
89: }
90: f0[0] += u_t[0];
91: f0[1] += u_t[1];
93: f0[0] += -2.0*t*(x[0] + x[1]) + 2.0*x[0]*x[1]*x[1] - 4.0*x[0]*x[0]*x[1] - 2.0*x[0]*x[0]*x[0] + 4.0/Re - 1.0;
94: f0[1] += -2.0*t*x[0] + 2.0*x[1]*x[1]*x[1] - 4.0*x[0]*x[1]*x[1] - 2.0*x[0]*x[0]*x[1] + 4.0/Re - 1.0;
95: }
97: static void f0_mms2_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
98: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
99: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
100: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
101: {
102: const PetscReal Re = REYN;
103: const PetscInt Ncomp = dim;
104: PetscInt c, d;
106: for (c = 0; c < Ncomp; ++c) {
107: for (d = 0; d < dim; ++d) {
108: f0[c] += u[d] * u_x[c*dim+d];
109: }
110: }
111: f0[0] += u_t[0];
112: f0[1] += u_t[1];
114: f0[0] -= ( Re*((1.0L/2.0L)*PetscSinReal(2*t + 2*x[0]) + PetscSinReal(2*t + x[0] + x[1]) + PetscCosReal(t + x[0] - x[1])) + 2.0*PetscSinReal(t + x[0])*PetscSinReal(t + x[1]))/Re;
115: f0[1] -= (-Re*((1.0L/2.0L)*PetscSinReal(2*t + 2*x[1]) + PetscSinReal(2*t + x[0] + x[1]) + PetscCosReal(t + x[0] - x[1])) + 2.0*PetscCosReal(t + x[0])*PetscCosReal(t + x[1]))/Re;
116: }
118: static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
119: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
120: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
121: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
122: {
123: const PetscReal Re = REYN;
124: const PetscInt Ncomp = dim;
125: PetscInt comp, d;
127: for (comp = 0; comp < Ncomp; ++comp) {
128: for (d = 0; d < dim; ++d) {
129: f1[comp*dim+d] = 1.0/Re * u_x[comp*dim+d];
130: }
131: f1[comp*dim+comp] -= u[Ncomp];
132: }
133: }
135: static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
136: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
137: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
138: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
139: {
140: PetscInt d;
141: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] += u_x[d*dim+d];
142: }
144: static void f1_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
145: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
146: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
147: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
148: {
149: PetscInt d;
150: for (d = 0; d < dim; ++d) f1[d] = 0.0;
151: }
153: /*
154: (psi_i, u_j grad_j u_i) ==> (\psi_i, \phi_j grad_j u_i)
155: */
156: static void g0_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
157: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
158: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
159: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
160: {
161: PetscInt NcI = dim, NcJ = dim;
162: PetscInt fc, gc;
163: PetscInt d;
165: for (d = 0; d < dim; ++d) {
166: g0[d*dim+d] = u_tShift;
167: }
169: for (fc = 0; fc < NcI; ++fc) {
170: for (gc = 0; gc < NcJ; ++gc) {
171: g0[fc*NcJ+gc] += u_x[fc*NcJ+gc];
172: }
173: }
174: }
176: /*
177: (psi_i, u_j grad_j u_i) ==> (\psi_i, \u_j grad_j \phi_i)
178: */
179: static void g1_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
180: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
181: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
182: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
183: {
184: PetscInt NcI = dim;
185: PetscInt NcJ = dim;
186: PetscInt fc, gc, dg;
187: for (fc = 0; fc < NcI; ++fc) {
188: for (gc = 0; gc < NcJ; ++gc) {
189: for (dg = 0; dg < dim; ++dg) {
190: /* kronecker delta */
191: if (fc == gc) {
192: g1[(fc*NcJ+gc)*dim+dg] += u[dg];
193: }
194: }
195: }
196: }
197: }
199: /* < q, \nabla\cdot u >
200: NcompI = 1, NcompJ = dim */
201: static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
202: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
203: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
204: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
205: {
206: PetscInt d;
207: for (d = 0; d < dim; ++d) g1[d*dim+d] = 1.0; /* \frac{\partial\phi^{u_d}}{\partial x_d} */
208: }
210: /* -< \nabla\cdot v, p >
211: NcompI = dim, NcompJ = 1 */
212: static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux,
213: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
214: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
215: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
216: {
217: PetscInt d;
218: for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* \frac{\partial\psi^{u_d}}{\partial x_d} */
219: }
221: /* < \nabla v, \nabla u + {\nabla u}^T >
222: This just gives \nabla u, give the perdiagonal for the transpose */
223: static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
224: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
225: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
226: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
227: {
228: const PetscReal Re = REYN;
229: const PetscInt Ncomp = dim;
230: PetscInt compI, d;
232: for (compI = 0; compI < Ncomp; ++compI) {
233: for (d = 0; d < dim; ++d) {
234: g3[((compI*Ncomp+compI)*dim+d)*dim+d] = 1.0/Re;
235: }
236: }
237: }
239: static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
240: {
244: options->dim = 2;
245: options->simplex = PETSC_TRUE;
246: options->mms = 1;
248: PetscOptionsBegin(comm, "", "Navier-Stokes Equation Options", "DMPLEX");
249: PetscOptionsInt("-dim", "The topological mesh dimension", "ex46.c", options->dim, &options->dim, NULL);
250: PetscOptionsBool("-simplex", "Simplicial (true) or tensor (false) mesh", "ex46.c", options->simplex, &options->simplex, NULL);
251: PetscOptionsInt("-mms", "The manufactured solution to use", "ex46.c", options->mms, &options->mms, NULL);
252: PetscOptionsEnd();
253: return(0);
254: }
256: static PetscErrorCode CreateBCLabel(DM dm, const char name[])
257: {
258: DM plex;
259: DMLabel label;
263: DMCreateLabel(dm, name);
264: DMGetLabel(dm, name, &label);
265: DMConvert(dm, DMPLEX, &plex);
266: DMPlexMarkBoundaryFaces(plex, 1, label);
267: DMDestroy(&plex);
268: return(0);
269: }
271: static PetscErrorCode CreateMesh(MPI_Comm comm, DM *dm, AppCtx *ctx)
272: {
273: DM pdm = NULL;
274: const PetscInt dim = ctx->dim;
275: PetscBool hasLabel;
279: DMPlexCreateBoxMesh(comm, dim, ctx->simplex, NULL, NULL, NULL, NULL, PETSC_TRUE, dm);
280: PetscObjectSetName((PetscObject) *dm, "Mesh");
281: /* If no boundary marker exists, mark the whole boundary */
282: DMHasLabel(*dm, "marker", &hasLabel);
283: if (!hasLabel) {CreateBCLabel(*dm, "marker");}
284: /* Distribute mesh over processes */
285: DMPlexDistribute(*dm, 0, NULL, &pdm);
286: if (pdm) {
287: DMDestroy(dm);
288: *dm = pdm;
289: }
290: DMSetFromOptions(*dm);
291: DMViewFromOptions(*dm, NULL, "-dm_view");
292: return(0);
293: }
295: static PetscErrorCode SetupProblem(DM dm, AppCtx *ctx)
296: {
297: PetscDS prob;
298: const PetscInt id = 1;
302: DMGetDS(dm, &prob);
303: switch (ctx->mms) {
304: case 1:
305: PetscDSSetResidual(prob, 0, f0_mms1_u, f1_u);break;
306: case 2:
307: PetscDSSetResidual(prob, 0, f0_mms2_u, f1_u);break;
308: }
309: PetscDSSetResidual(prob, 1, f0_p, f1_p);
310: PetscDSSetJacobian(prob, 0, 0, g0_uu, g1_uu, NULL, g3_uu);
311: PetscDSSetJacobian(prob, 0, 1, NULL, NULL, g2_up, NULL);
312: PetscDSSetJacobian(prob, 1, 0, NULL, g1_pu, NULL, NULL);
313: switch (ctx->dim) {
314: case 2:
315: switch (ctx->mms) {
316: case 1:
317: ctx->exactFuncs[0] = mms1_u_2d;
318: ctx->exactFuncs[1] = mms1_p_2d;
319: break;
320: case 2:
321: ctx->exactFuncs[0] = mms2_u_2d;
322: ctx->exactFuncs[1] = mms2_p_2d;
323: break;
324: default:
325: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Invalid MMS %D", ctx->mms);
326: }
327: break;
328: default:
329: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Invalid dimension %D", ctx->dim);
330: }
331: DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", "marker", 0, 0, NULL, (void (*)(void)) ctx->exactFuncs[0], NULL, 1, &id, ctx);
332: return(0);
333: }
335: static PetscErrorCode SetupDiscretization(DM dm, AppCtx *ctx)
336: {
337: DM cdm = dm;
338: const PetscInt dim = ctx->dim;
339: PetscFE fe[2];
340: MPI_Comm comm;
341: PetscErrorCode ierr;
344: /* Create finite element */
345: PetscObjectGetComm((PetscObject) dm, &comm);
346: PetscFECreateDefault(comm, dim, dim, ctx->simplex, "vel_", PETSC_DEFAULT, &fe[0]);
347: PetscObjectSetName((PetscObject) fe[0], "velocity");
348: PetscFECreateDefault(comm, dim, 1, ctx->simplex, "pres_", PETSC_DEFAULT, &fe[1]);
349: PetscFECopyQuadrature(fe[0], fe[1]);
350: PetscObjectSetName((PetscObject) fe[1], "pressure");
351: /* Set discretization and boundary conditions for each mesh */
352: DMSetField(dm, 0, NULL, (PetscObject) fe[0]);
353: DMSetField(dm, 1, NULL, (PetscObject) fe[1]);
354: DMCreateDS(dm);
355: SetupProblem(dm, ctx);
356: while (cdm) {
357: PetscObject pressure;
358: MatNullSpace nsp;
359: PetscBool hasLabel;
361: DMGetField(cdm, 1, NULL, &pressure);
362: MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nsp);
363: PetscObjectCompose(pressure, "nullspace", (PetscObject) nsp);
364: MatNullSpaceDestroy(&nsp);
366: DMHasLabel(cdm, "marker", &hasLabel);
367: if (!hasLabel) {CreateBCLabel(cdm, "marker");}
368: DMCopyDisc(dm, cdm);
369: DMGetCoarseDM(cdm, &cdm);
370: }
371: PetscFEDestroy(&fe[0]);
372: PetscFEDestroy(&fe[1]);
373: return(0);
374: }
376: static PetscErrorCode MonitorError(TS ts, PetscInt step, PetscReal crtime, Vec u, void *ctx)
377: {
378: AppCtx *user = (AppCtx *) ctx;
379: DM dm;
380: PetscReal ferrors[2];
384: TSGetDM(ts, &dm);
385: DMComputeL2FieldDiff(dm, crtime, user->exactFuncs, NULL, u, ferrors);
386: PetscPrintf(PETSC_COMM_WORLD, "Timestep: %04d time = %-8.4g \t L_2 Error: [%2.3g, %2.3g]\n", (int) step, (double) crtime, (double) ferrors[0], (double) ferrors[1]);
387: return(0);
388: }
390: int main(int argc, char **argv)
391: {
392: AppCtx ctx;
393: DM dm;
394: TS ts;
395: Vec u, r;
398: PetscInitialize(&argc, &argv, NULL, help);if (ierr) return ierr;
399: ProcessOptions(PETSC_COMM_WORLD, &ctx);
400: CreateMesh(PETSC_COMM_WORLD, &dm, &ctx);
401: DMSetApplicationContext(dm, &ctx);
402: PetscMalloc1(2, &ctx.exactFuncs);
403: SetupDiscretization(dm, &ctx);
404: DMPlexCreateClosureIndex(dm, NULL);
406: DMCreateGlobalVector(dm, &u);
407: VecDuplicate(u, &r);
409: TSCreate(PETSC_COMM_WORLD, &ts);
410: TSMonitorSet(ts, MonitorError, &ctx, NULL);
411: TSSetDM(ts, dm);
412: DMTSSetBoundaryLocal(dm, DMPlexTSComputeBoundary, &ctx);
413: DMTSSetIFunctionLocal(dm, DMPlexTSComputeIFunctionFEM, &ctx);
414: DMTSSetIJacobianLocal(dm, DMPlexTSComputeIJacobianFEM, &ctx);
415: TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER);
416: TSSetFromOptions(ts);
418: DMProjectFunction(dm, 0.0, ctx.exactFuncs, NULL, INSERT_ALL_VALUES, u);
419: TSSolve(ts, u);
420: VecViewFromOptions(u, NULL, "-sol_vec_view");
422: VecDestroy(&u);
423: VecDestroy(&r);
424: TSDestroy(&ts);
425: DMDestroy(&dm);
426: PetscFree(ctx.exactFuncs);
427: PetscFinalize();
428: return ierr;
429: }
431: /*TEST
433: # Full solves
434: test:
435: suffix: 2d_p2p1_r1
436: requires: !single triangle
437: filter: sed -e "s~ATOL~RTOL~g" -e "s~ABS~RELATIVE~g"
438: args: -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ts_type beuler -ts_max_steps 10 -ts_dt 0.1 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1.0e-10 -fieldsplit_pressure_pc_type jacobi -ksp_monitor_short -ksp_converged_reason -snes_monitor_short -snes_converged_reason -ts_monitor
439: test:
440: suffix: 2d_p2p1_r2
441: requires: !single triangle
442: filter: sed -e "s~ATOL~RTOL~g" -e "s~ABS~RELATIVE~g"
443: args: -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ts_type beuler -ts_max_steps 10 -ts_dt 0.1 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1.0e-10 -fieldsplit_pressure_pc_type jacobi -ksp_monitor_short -ksp_converged_reason -snes_monitor_short -snes_converged_reason -ts_monitor
444: test:
445: suffix: 2d_q2q1_r1
446: requires: !single
447: filter: sed -e "s~ATOL~RTOL~g" -e "s~ABS~RELATIVE~g" -e "s~ 0\]~ 0.0\]~g"
448: args: -simplex 0 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ts_type beuler -ts_max_steps 10 -ts_dt 0.1 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1.0e-10 -fieldsplit_pressure_pc_type jacobi -ksp_monitor_short -ksp_converged_reason -snes_monitor_short -snes_converged_reason -ts_monitor
449: test:
450: suffix: 2d_q2q1_r2
451: requires: !single
452: filter: sed -e "s~ATOL~RTOL~g" -e "s~ABS~RELATIVE~g"
453: args: -simplex 0 -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ts_type beuler -ts_max_steps 10 -ts_dt 0.1 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1.0e-10 -fieldsplit_pressure_pc_type jacobi -ksp_monitor_short -ksp_converged_reason -snes_monitor_short -snes_converged_reason -ts_monitor
455: TEST*/