Actual source code: ex62.c
petsc-3.13.6 2020-09-29
1: static char help[] = "Stokes Problem in 2d and 3d with simplicial finite elements.\n\
2: We solve the Stokes problem in a rectangular\n\
3: domain, using a parallel unstructured mesh (DMPLEX) to discretize it.\n\n\n";
5: /*
6: The isoviscous Stokes problem, which we discretize using the finite
7: element method on an unstructured mesh. The weak form equations are
9: < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > + < v, f > = 0
10: < q, \nabla\cdot u > = 0
12: Viewing:
14: To produce nice output, use
16: -dm_refine 3 -show_error -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -sol_vec_view hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
18: You can get a LaTeX view of the mesh, with point numbering using
20: -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
22: The data layout can be viewed using
24: -dm_petscsection_view
26: Lots of information about the FEM assembly can be printed using
28: -dm_plex_print_fem 2
30: Field Data:
32: DMPLEX data is organized by point, and the closure operation just stacks up the
33: data from each sieve point in the closure. Thus, for a P_2-P_1 Stokes element, we
34: have
36: cl{e} = {f e_0 e_1 e_2 v_0 v_1 v_2}
37: x = [u_{e_0} v_{e_0} u_{e_1} v_{e_1} u_{e_2} v_{e_2} u_{v_0} v_{v_0} p_{v_0} u_{v_1} v_{v_1} p_{v_1} u_{v_2} v_{v_2} p_{v_2}]
39: The problem here is that we would like to loop over each field separately for
40: integration. Therefore, the closure visitor in DMPlexVecGetClosure() reorders
41: the data so that each field is contiguous
43: x' = [u_{e_0} v_{e_0} u_{e_1} v_{e_1} u_{e_2} v_{e_2} u_{v_0} v_{v_0} u_{v_1} v_{v_1} u_{v_2} v_{v_2} p_{v_0} p_{v_1} p_{v_2}]
45: Likewise, DMPlexVecSetClosure() takes data partitioned by field, and correctly
46: puts it into the Sieve ordering.
48: TODO:
49: - Update FETI test output
50: - Reorder mesh
51: - Check the q1-p0 Vanka domains are correct (I think its correct)
52: - Check scaling of iterates, right now it is bad
53: - Check the q2-q1 domains since convergence is bad
54: - Ask Patrick about domains
55: - Plot residual by fields after each smoother iterate
56: - Get Diskin checks going
57: */
59: #include <petscdmplex.h>
60: #include <petscsnes.h>
61: #include <petscds.h>
63: PetscInt spatialDim = 0;
65: typedef enum {NEUMANN, DIRICHLET, NUM_BC_TYPES} BCType;
66: const char *bcTypes[NUM_BC_TYPES+1] = {"neumann", "dirichlet", "unknown"};
67: typedef enum {RUN_FULL, RUN_TEST, NUM_RUN_TYPES} RunType;
68: const char *runTypes[NUM_RUN_TYPES+1] = {"full", "test", "unknown"};
69: typedef enum {SOL_QUADRATIC, SOL_CUBIC, SOL_TRIG, NUM_SOL_TYPES} SolType;
70: const char *solTypes[NUM_SOL_TYPES+1] = {"quadratic", "cubic", "trig", "unknown"};
72: typedef struct {
73: PetscInt debug; /* The debugging level */
74: RunType runType; /* Whether to run tests, or solve the full problem */
75: PetscLogEvent createMeshEvent;
76: PetscBool showInitial, showError;
77: /* Domain and mesh definition */
78: PetscInt dim; /* The topological mesh dimension */
79: PetscBool interpolate; /* Generate intermediate mesh elements */
80: PetscBool simplex; /* Use simplices or tensor product cells */
81: PetscInt cells[3]; /* The initial domain division */
82: PetscReal refinementLimit; /* The largest allowable cell volume */
83: PetscBool testPartition; /* Use a fixed partitioning for testing */
84: /* Problem definition */
85: BCType bcType;
86: SolType solType;
87: PetscErrorCode (**exactFuncs)(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx);
88: } AppCtx;
90: PetscErrorCode zero_scalar(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
91: {
92: u[0] = 0.0;
93: return 0;
94: }
95: PetscErrorCode zero_vector(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
96: {
97: PetscInt d;
98: for (d = 0; d < spatialDim; ++d) u[d] = 0.0;
99: return 0;
100: }
102: /*
103: In 2D we use exact solution:
105: u = x^2 + y^2
106: v = 2 x^2 - 2xy
107: p = x + y - 1
108: f_x = f_y = 3
110: so that
112: -\Delta u + \nabla p + f = <-4, -4> + <1, 1> + <3, 3> = 0
113: \nabla \cdot u = 2x - 2x = 0
114: */
115: PetscErrorCode quadratic_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
116: {
117: u[0] = x[0]*x[0] + x[1]*x[1];
118: u[1] = 2.0*x[0]*x[0] - 2.0*x[0]*x[1];
119: return 0;
120: }
122: PetscErrorCode linear_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
123: {
124: *p = x[0] + x[1] - 1.0;
125: return 0;
126: }
127: PetscErrorCode constant_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
128: {
129: *p = 1.0;
130: return 0;
131: }
133: /*
134: In 2D we use exact solution:
136: u = x^3 + y^3
137: v = 2 x^3 - 3 x^2 y
138: p = 3/2 x^2 + 3/2 y^2 - 1
139: f_x = 6 (x + y)
140: f_y = 12 x - 3 y
142: so that
144: -\Delta u + \nabla p + f = <-6 x - 6 y, -12 x + 6 y> + <3 x, 3 y> + <6 (x + y), 12 x - 6 y> = 0
145: \nabla \cdot u = 3 x^2 - 3 x^2 = 0
146: */
147: PetscErrorCode cubic_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
148: {
149: u[0] = x[0]*x[0]*x[0] + x[1]*x[1]*x[1];
150: u[1] = 2.0*x[0]*x[0]*x[0] - 3.0*x[0]*x[0]*x[1];
151: return 0;
152: }
154: PetscErrorCode quadratic_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
155: {
156: *p = 3.0*x[0]*x[0]/2.0 + 3.0*x[1]*x[1]/2.0 - 1.0;
157: return 0;
158: }
160: /*
161: In 2D we use exact solution:
163: u = sin(n pi x) + y^2
164: v = -sin(n pi y)
165: p = 3/2 x^2 + 3/2 y^2 - 1
166: f_x = 4 - 3x - n^2 pi^2 sin (n pi x)
167: f_y = - 3y + n^2 pi^2 sin(n pi y)
169: so that
171: -\Delta u + \nabla p + f = <n^2 pi^2 sin (n pi x) - 4, -n^2 pi^2 sin(n pi y)> + <3 x, 3 y> + <4 - 3x - n^2 pi^2 sin (n pi x), -3y + n^2 pi^2 sin(n pi y)> = 0
172: \nabla \cdot u = n pi cos(n pi x) - n pi cos(n pi y) = 0
173: */
174: PetscErrorCode trig_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
175: {
176: const PetscReal n = 1.0;
178: u[0] = PetscSinReal(n*PETSC_PI*x[0]) + x[1]*x[1];
179: u[1] = -PetscSinReal(n*PETSC_PI*x[1]);
180: return 0;
181: }
183: void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
184: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
185: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
186: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
187: {
188: PetscInt c;
189: for (c = 0; c < dim; ++c) f0[c] = 3.0;
190: }
192: void f0_cubic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
193: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
194: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
195: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
196: {
197: f0[0] = 3.0*x[0] + 6.0*x[1];
198: f0[1] = 12.0*x[0] - 9.0*x[1];
199: }
201: void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
202: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
203: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
204: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
205: {
206: const PetscReal n = 1.0;
208: f0[0] = 4.0 - 3.0*x[0] - PetscSqr(n*PETSC_PI)*PetscSinReal(n*PETSC_PI*x[0]);
209: f0[1] = -3.0*x[1] + PetscSqr(n*PETSC_PI)*PetscSinReal(n*PETSC_PI*x[1]);
210: }
212: /* gradU[comp*dim+d] = {u_x, u_y, v_x, v_y} or {u_x, u_y, u_z, v_x, v_y, v_z, w_x, w_y, w_z}
213: u[Ncomp] = {p} */
214: void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
215: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
216: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
217: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
218: {
219: const PetscInt Ncomp = dim;
220: PetscInt comp, d;
222: for (comp = 0; comp < Ncomp; ++comp) {
223: for (d = 0; d < dim; ++d) {
224: /* f1[comp*dim+d] = 0.5*(gradU[comp*dim+d] + gradU[d*dim+comp]); */
225: f1[comp*dim+d] = u_x[comp*dim+d];
226: }
227: f1[comp*dim+comp] -= u[Ncomp];
228: }
229: }
231: /* gradU[comp*dim+d] = {u_x, u_y, v_x, v_y} or {u_x, u_y, u_z, v_x, v_y, v_z, w_x, w_y, w_z} */
232: void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
233: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
234: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
235: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
236: {
237: PetscInt d;
238: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] += u_x[d*dim+d];
239: }
241: void f1_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
242: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
243: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
244: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
245: {
246: PetscInt d;
247: for (d = 0; d < dim; ++d) f1[d] = 0.0;
248: }
250: /* < q, \nabla\cdot u >
251: NcompI = 1, NcompJ = dim */
252: void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
253: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
254: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
255: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
256: {
257: PetscInt d;
258: for (d = 0; d < dim; ++d) g1[d*dim+d] = 1.0; /* \frac{\partial\phi^{u_d}}{\partial x_d} */
259: }
261: /* -< \nabla\cdot v, p >
262: NcompI = dim, NcompJ = 1 */
263: void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux,
264: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
265: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
266: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
267: {
268: PetscInt d;
269: for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* \frac{\partial\psi^{u_d}}{\partial x_d} */
270: }
272: /* < \nabla v, \nabla u + {\nabla u}^T >
273: This just gives \nabla u, give the perdiagonal for the transpose */
274: void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
275: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
276: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
277: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
278: {
279: const PetscInt Ncomp = dim;
280: PetscInt compI, d;
282: for (compI = 0; compI < Ncomp; ++compI) {
283: for (d = 0; d < dim; ++d) {
284: g3[((compI*Ncomp+compI)*dim+d)*dim+d] = 1.0;
285: }
286: }
287: }
289: /*
290: In 3D we use exact solution:
292: u = x^2 + y^2
293: v = y^2 + z^2
294: w = x^2 + y^2 - 2(x+y)z
295: p = x + y + z - 3/2
296: f_x = f_y = f_z = 3
298: so that
300: -\Delta u + \nabla p + f = <-4, -4, -4> + <1, 1, 1> + <3, 3, 3> = 0
301: \nabla \cdot u = 2x + 2y - 2(x + y) = 0
302: */
303: PetscErrorCode quadratic_u_3d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
304: {
305: u[0] = x[0]*x[0] + x[1]*x[1];
306: u[1] = x[1]*x[1] + x[2]*x[2];
307: u[2] = x[0]*x[0] + x[1]*x[1] - 2.0*(x[0] + x[1])*x[2];
308: return 0;
309: }
311: PetscErrorCode linear_p_3d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
312: {
313: *p = x[0] + x[1] + x[2] - 1.5;
314: return 0;
315: }
317: void pressure(PetscInt dim, PetscInt Nf, PetscInt NfAux,
318: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
319: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
320: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar p[])
321: {
322: p[0] = u[uOff[1]];
323: }
325: PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
326: {
327: PetscInt bc, run, sol, n;
331: options->debug = 0;
332: options->runType = RUN_FULL;
333: options->dim = 2;
334: options->interpolate = PETSC_FALSE;
335: options->simplex = PETSC_TRUE;
336: options->cells[0] = 3;
337: options->cells[1] = 3;
338: options->cells[2] = 3;
339: options->refinementLimit = 0.0;
340: options->testPartition = PETSC_FALSE;
341: options->bcType = DIRICHLET;
342: options->solType = SOL_QUADRATIC;
343: options->showInitial = PETSC_FALSE;
344: options->showError = PETSC_FALSE;
346: PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
347: PetscOptionsInt("-debug", "The debugging level", "ex62.c", options->debug, &options->debug, NULL);
348: run = options->runType;
349: PetscOptionsEList("-run_type", "The run type", "ex62.c", runTypes, NUM_RUN_TYPES, runTypes[options->runType], &run, NULL);
350: options->runType = (RunType) run;
351: PetscOptionsInt("-dim", "The topological mesh dimension", "ex62.c", options->dim, &options->dim, NULL);
352: spatialDim = options->dim;
353: PetscOptionsBool("-interpolate", "Generate intermediate mesh elements", "ex62.c", options->interpolate, &options->interpolate, NULL);
354: PetscOptionsBool("-simplex", "Use simplices or tensor product cells", "ex62.c", options->simplex, &options->simplex, NULL);
355: if (options->simplex) {
356: options->cells[0] = 4 - options->dim;
357: options->cells[1] = 4 - options->dim;
358: options->cells[2] = 4 - options->dim;
359: }
360: n = 3;
361: PetscOptionsIntArray("-cells", "The initial mesh division", "ex62.c", options->cells, &n, NULL);
362: PetscOptionsReal("-refinement_limit", "The largest allowable cell volume", "ex62.c", options->refinementLimit, &options->refinementLimit, NULL);
363: PetscOptionsBool("-test_partition", "Use a fixed partition for testing", "ex62.c", options->testPartition, &options->testPartition, NULL);
364: bc = options->bcType;
365: PetscOptionsEList("-bc_type","Type of boundary condition","ex62.c", bcTypes, NUM_BC_TYPES, bcTypes[options->bcType], &bc, NULL);
366: options->bcType = (BCType) bc;
367: sol = options->solType;
368: PetscOptionsEList("-sol_type", "The solution type", "ex62.c", solTypes, NUM_SOL_TYPES, solTypes[options->solType], &sol, NULL);
369: options->solType = (SolType) sol;
370: PetscOptionsBool("-show_initial", "Output the initial guess for verification", "ex62.c", options->showInitial, &options->showInitial, NULL);
371: PetscOptionsBool("-show_error", "Output the error for verification", "ex62.c", options->showError, &options->showError, NULL);
372: PetscOptionsEnd();
374: PetscLogEventRegister("CreateMesh", DM_CLASSID, &options->createMeshEvent);
375: return(0);
376: }
378: PetscErrorCode DMVecViewLocal(DM dm, Vec v)
379: {
380: Vec lv;
384: DMGetLocalVector(dm, &lv);
385: DMGlobalToLocalBegin(dm, v, INSERT_VALUES, lv);
386: DMGlobalToLocalEnd(dm, v, INSERT_VALUES, lv);
387: DMPrintLocalVec(dm, "Local function", 0.0, lv);
388: DMRestoreLocalVector(dm, &lv);
389: return(0);
390: }
392: PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
393: {
394: PetscInt dim = user->dim;
395: PetscBool interpolate = user->interpolate;
396: PetscReal refinementLimit = user->refinementLimit;
400: PetscLogEventBegin(user->createMeshEvent,0,0,0,0);
401: DMPlexCreateBoxMesh(comm, dim, user->simplex, user->cells, NULL, NULL, NULL, interpolate, dm);
402: {
403: DM refinedMesh = NULL;
404: DM distributedMesh = NULL;
406: /* Refine mesh using a volume constraint */
407: DMPlexSetRefinementLimit(*dm, refinementLimit);
408: if (user->simplex) {DMRefine(*dm, comm, &refinedMesh);}
409: if (refinedMesh) {
410: DMDestroy(dm);
411: *dm = refinedMesh;
412: }
413: /* Setup test partitioning */
414: if (user->testPartition) {
415: PetscInt triSizes_n2[2] = {4, 4};
416: PetscInt triPoints_n2[8] = {3, 5, 6, 7, 0, 1, 2, 4};
417: PetscInt triSizes_n3[3] = {2, 3, 3};
418: PetscInt triPoints_n3[8] = {3, 5, 1, 6, 7, 0, 2, 4};
419: PetscInt triSizes_n5[5] = {1, 2, 2, 1, 2};
420: PetscInt triPoints_n5[8] = {3, 5, 6, 4, 7, 0, 1, 2};
421: PetscInt triSizes_ref_n2[2] = {8, 8};
422: PetscInt triPoints_ref_n2[16] = {1, 5, 6, 7, 10, 11, 14, 15, 0, 2, 3, 4, 8, 9, 12, 13};
423: PetscInt triSizes_ref_n3[3] = {5, 6, 5};
424: PetscInt triPoints_ref_n3[16] = {1, 7, 10, 14, 15, 2, 6, 8, 11, 12, 13, 0, 3, 4, 5, 9};
425: PetscInt triSizes_ref_n5[5] = {3, 4, 3, 3, 3};
426: PetscInt triPoints_ref_n5[16] = {1, 7, 10, 2, 11, 13, 14, 5, 6, 15, 0, 8, 9, 3, 4, 12};
427: PetscInt triSizes_ref_n5_d3[5] = {1, 1, 1, 1, 2};
428: PetscInt triPoints_ref_n5_d3[6] = {0, 1, 2, 3, 4, 5};
429: const PetscInt *sizes = NULL;
430: const PetscInt *points = NULL;
431: PetscPartitioner part;
432: PetscInt cEnd;
433: PetscMPIInt rank, size;
435: MPI_Comm_rank(comm, &rank);
436: MPI_Comm_size(comm, &size);
437: DMPlexGetHeightStratum(*dm, 0, NULL, &cEnd);
438: if (!rank) {
439: if (dim == 2 && user->simplex && size == 2 && cEnd == 8) {
440: sizes = triSizes_n2; points = triPoints_n2;
441: } else if (dim == 2 && user->simplex && size == 3 && cEnd == 8) {
442: sizes = triSizes_n3; points = triPoints_n3;
443: } else if (dim == 2 && user->simplex && size == 5 && cEnd == 8) {
444: sizes = triSizes_n5; points = triPoints_n5;
445: } else if (dim == 2 && user->simplex && size == 2 && cEnd == 16) {
446: sizes = triSizes_ref_n2; points = triPoints_ref_n2;
447: } else if (dim == 2 && user->simplex && size == 3 && cEnd == 16) {
448: sizes = triSizes_ref_n3; points = triPoints_ref_n3;
449: } else if (dim == 2 && user->simplex && size == 5 && cEnd == 16) {
450: sizes = triSizes_ref_n5; points = triPoints_ref_n5;
451: } else if (dim == 3 && user->simplex && size == 5 && cEnd == 6) {
452: sizes = triSizes_ref_n5_d3; points = triPoints_ref_n5_d3;
453: } else SETERRQ(comm, PETSC_ERR_ARG_WRONG, "No stored partition matching run parameters");
454: }
455: DMPlexGetPartitioner(*dm, &part);
456: PetscPartitionerSetType(part, PETSCPARTITIONERSHELL);
457: PetscPartitionerShellSetPartition(part, size, sizes, points);
458: } else {
459: PetscPartitioner part;
461: DMPlexGetPartitioner(*dm, &part);
462: PetscPartitionerSetFromOptions(part);
463: }
464: /* Distribute mesh over processes */
465: DMPlexDistribute(*dm, 0, NULL, &distributedMesh);
466: if (distributedMesh) {
467: DMDestroy(dm);
468: *dm = distributedMesh;
469: }
470: }
471: DMSetFromOptions(*dm);
472: DMViewFromOptions(*dm, NULL, "-dm_view");
473: PetscLogEventEnd(user->createMeshEvent,0,0,0,0);
474: return(0);
475: }
477: PetscErrorCode SetupProblem(DM dm, AppCtx *user)
478: {
479: PetscDS prob;
480: const PetscInt id = 1;
484: DMGetDS(dm, &prob);
485: switch (user->solType) {
486: case SOL_QUADRATIC:
487: switch (user->dim) {
488: case 2:
489: PetscDSSetResidual(prob, 0, f0_quadratic_u, f1_u);
490: user->exactFuncs[0] = quadratic_u_2d;
491: user->exactFuncs[1] = linear_p_2d;
492: break;
493: case 3:
494: PetscDSSetResidual(prob, 0, f0_quadratic_u, f1_u);
495: user->exactFuncs[0] = quadratic_u_3d;
496: user->exactFuncs[1] = linear_p_3d;
497: break;
498: default: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Unsupported dimension %d for quadratic solution", user->dim);
499: }
500: break;
501: case SOL_CUBIC:
502: switch (user->dim) {
503: case 2:
504: PetscDSSetResidual(prob, 0, f0_cubic_u, f1_u);
505: user->exactFuncs[0] = cubic_u_2d;
506: user->exactFuncs[1] = quadratic_p_2d;
507: break;
508: default: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Unsupported dimension %d for quadratic solution", user->dim);
509: }
510: break;
511: case SOL_TRIG:
512: switch (user->dim) {
513: case 2:
514: PetscDSSetResidual(prob, 0, f0_trig_u, f1_u);
515: user->exactFuncs[0] = trig_u_2d;
516: user->exactFuncs[1] = quadratic_p_2d;
517: break;
518: default: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Unsupported dimension %d for trigonometric solution", user->dim);
519: }
520: break;
521: default: SETERRQ2(PetscObjectComm((PetscObject) prob), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%D)", solTypes[PetscMin(user->solType, NUM_SOL_TYPES)], user->solType);
522: }
523: PetscDSSetResidual(prob, 1, f0_p, f1_p);
524: PetscDSSetJacobian(prob, 0, 0, NULL, NULL, NULL, g3_uu);
525: PetscDSSetJacobian(prob, 0, 1, NULL, NULL, g2_up, NULL);
526: PetscDSSetJacobian(prob, 1, 0, NULL, g1_pu, NULL, NULL);
528: PetscDSAddBoundary(prob, user->bcType == DIRICHLET ? DM_BC_ESSENTIAL : DM_BC_NATURAL, "wall", user->bcType == NEUMANN ? "boundary" : "marker", 0, 0, NULL, (void (*)(void)) user->exactFuncs[0], 1, &id, user);
529: PetscDSSetExactSolution(prob, 0, user->exactFuncs[0], user);
530: PetscDSSetExactSolution(prob, 1, user->exactFuncs[1], user);
531: return(0);
532: }
534: PetscErrorCode SetupDiscretization(DM dm, AppCtx *user)
535: {
536: DM cdm = dm;
537: const PetscInt dim = user->dim;
538: PetscFE fe[2];
539: MPI_Comm comm;
540: PetscErrorCode ierr;
543: /* Create finite element */
544: PetscObjectGetComm((PetscObject) dm, &comm);
545: PetscFECreateDefault(comm, dim, dim, user->simplex, "vel_", PETSC_DEFAULT, &fe[0]);
546: PetscObjectSetName((PetscObject) fe[0], "velocity");
547: PetscFECreateDefault(comm, dim, 1, user->simplex, "pres_", PETSC_DEFAULT, &fe[1]);
548: PetscFECopyQuadrature(fe[0], fe[1]);
549: PetscObjectSetName((PetscObject) fe[1], "pressure");
550: /* Set discretization and boundary conditions for each mesh */
551: DMSetField(dm, 0, NULL, (PetscObject) fe[0]);
552: DMSetField(dm, 1, NULL, (PetscObject) fe[1]);
553: DMCreateDS(dm);
554: SetupProblem(dm, user);
555: while (cdm) {
556: DMCopyDisc(dm, cdm);
557: DMGetCoarseDM(cdm, &cdm);
558: }
559: PetscFEDestroy(&fe[0]);
560: PetscFEDestroy(&fe[1]);
561: return(0);
562: }
564: static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt dummy, MatNullSpace *nullspace)
565: {
566: Vec vec;
567: PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero_vector, constant_p};
568: PetscErrorCode ierr;
571: DMCreateGlobalVector(dm, &vec);
572: DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec);
573: VecNormalize(vec, NULL);
574: PetscObjectSetName((PetscObject) vec, "Pressure Null Space");
575: VecViewFromOptions(vec, NULL, "-pressure_nullspace_view");
576: MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace);
577: VecDestroy(&vec);
578: /* New style for field null spaces */
579: {
580: PetscObject pressure;
581: MatNullSpace nullspacePres;
583: DMGetField(dm, 1, NULL, &pressure);
584: MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres);
585: PetscObjectCompose(pressure, "nullspace", (PetscObject) nullspacePres);
586: MatNullSpaceDestroy(&nullspacePres);
587: }
588: return(0);
589: }
591: /* Add a vector in the nullspace to make the continuum integral 0.
593: If int(u) = a and int(n) = b, then int(u - a/b n) = a - a/b b = 0
594: */
595: static PetscErrorCode CorrectDiscretePressure(DM dm, MatNullSpace nullspace, Vec u, AppCtx *user)
596: {
597: PetscDS prob;
598: const Vec *nullvecs;
599: PetscScalar pintd, intc[2], intn[2];
600: MPI_Comm comm;
604: PetscObjectGetComm((PetscObject) dm, &comm);
605: DMGetDS(dm, &prob);
606: PetscDSSetObjective(prob, 1, pressure);
607: MatNullSpaceGetVecs(nullspace, NULL, NULL, &nullvecs);
608: VecDot(nullvecs[0], u, &pintd);
609: if (PetscAbsScalar(pintd) > 1.0e-10) SETERRQ1(comm, PETSC_ERR_ARG_WRONG, "Discrete integral of pressure: %g\n", (double) PetscRealPart(pintd));
610: DMPlexComputeIntegralFEM(dm, nullvecs[0], intn, user);
611: DMPlexComputeIntegralFEM(dm, u, intc, user);
612: VecAXPY(u, -intc[1]/intn[1], nullvecs[0]);
613: DMPlexComputeIntegralFEM(dm, u, intc, user);
614: if (PetscAbsScalar(intc[1]) > 1.0e-10) SETERRQ1(comm, PETSC_ERR_ARG_WRONG, "Continuum integral of pressure after correction: %g\n", (double) PetscRealPart(intc[1]));
615: return(0);
616: }
618: static PetscErrorCode SNESConvergenceCorrectPressure(SNES snes, PetscInt it, PetscReal xnorm, PetscReal gnorm, PetscReal f, SNESConvergedReason *reason, void *user)
619: {
623: SNESConvergedDefault(snes, it, xnorm, gnorm, f, reason, user);
624: if (*reason > 0) {
625: DM dm;
626: Mat J;
627: Vec u;
628: MatNullSpace nullspace;
630: SNESGetDM(snes, &dm);
631: SNESGetSolution(snes, &u);
632: SNESGetJacobian(snes, &J, NULL, NULL, NULL);
633: MatGetNullSpace(J, &nullspace);
634: CorrectDiscretePressure(dm, nullspace, u, (AppCtx *) user);
635: }
636: return(0);
637: }
639: int main(int argc, char **argv)
640: {
641: SNES snes; /* nonlinear solver */
642: DM dm; /* problem definition */
643: Vec u, r; /* solution and residual */
644: AppCtx user; /* user-defined work context */
645: PetscReal error = 0.0; /* L_2 error in the solution */
646: PetscReal ferrors[2];
649: PetscInitialize(&argc, &argv, NULL,help);if (ierr) return ierr;
650: ProcessOptions(PETSC_COMM_WORLD, &user);
651: SNESCreate(PETSC_COMM_WORLD, &snes);
652: CreateMesh(PETSC_COMM_WORLD, &user, &dm);
653: SNESSetDM(snes, dm);
654: DMSetApplicationContext(dm, &user);
656: PetscMalloc(2 * sizeof(void (*)(const PetscReal[], PetscScalar *, void *)), &user.exactFuncs);
657: SetupDiscretization(dm, &user);
658: DMPlexCreateClosureIndex(dm, NULL);
660: DMCreateGlobalVector(dm, &u);
661: VecDuplicate(u, &r);
663: DMSetNullSpaceConstructor(dm, 2, CreatePressureNullSpace);
664: SNESSetConvergenceTest(snes, SNESConvergenceCorrectPressure, &user, NULL);
666: DMPlexSetSNESLocalFEM(dm,&user,&user,&user);
668: SNESSetFromOptions(snes);
670: DMProjectFunction(dm, 0.0, user.exactFuncs, NULL, INSERT_ALL_VALUES, u);
671: PetscObjectSetName((PetscObject) u, "Exact Solution");
672: VecViewFromOptions(u, NULL, "-exact_vec_view");
673: PetscObjectSetName((PetscObject) u, "Solution");
674: if (user.showInitial) {DMVecViewLocal(dm, u);}
675: PetscObjectSetName((PetscObject) u, "Solution");
676: if (user.runType == RUN_FULL) {
677: PetscErrorCode (*initialGuess[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero_vector, zero_scalar};
679: DMProjectFunction(dm, 0.0, initialGuess, NULL, INSERT_VALUES, u);
680: if (user.debug) {
681: PetscPrintf(PETSC_COMM_WORLD, "Initial guess\n");
682: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
683: }
684: SNESSolve(snes, NULL, u);
685: DMComputeL2Diff(dm, 0.0, user.exactFuncs, NULL, u, &error);
686: DMComputeL2FieldDiff(dm, 0.0, user.exactFuncs, NULL, u, ferrors);
687: PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: %g [%g, %g]\n", (double)error, (double)ferrors[0], (double)ferrors[1]);
688: if (user.showError) {
689: Vec r;
691: DMGetGlobalVector(dm, &r);
692: DMProjectFunction(dm, 0.0, user.exactFuncs, NULL, INSERT_ALL_VALUES, r);
693: VecAXPY(r, -1.0, u);
694: PetscObjectSetName((PetscObject) r, "Solution Error");
695: VecViewFromOptions(r, NULL, "-error_vec_view");
696: DMRestoreGlobalVector(dm, &r);
697: }
698: } else {
699: PetscReal res = 0.0;
701: /* Check discretization error */
702: PetscPrintf(PETSC_COMM_WORLD, "Initial guess\n");
703: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
704: DMComputeL2Diff(dm, 0.0, user.exactFuncs, NULL, u, &error);
705: if (error >= 1.0e-11) {PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: %g\n", (double)error);}
706: else {PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: < 1.0e-11\n");}
707: /* Check residual */
708: SNESComputeFunction(snes, u, r);
709: PetscPrintf(PETSC_COMM_WORLD, "Initial Residual\n");
710: VecChop(r, 1.0e-10);
711: VecView(r, PETSC_VIEWER_STDOUT_WORLD);
712: VecNorm(r, NORM_2, &res);
713: PetscPrintf(PETSC_COMM_WORLD, "L_2 Residual: %g\n", (double)res);
714: /* Check Jacobian */
715: {
716: Mat J, M;
717: MatNullSpace nullspace;
718: Vec b;
719: PetscBool isNull;
721: SNESSetUpMatrices(snes);
722: SNESGetJacobian(snes, &J, &M, NULL, NULL);
723: SNESComputeJacobian(snes, u, J, M);
724: MatGetNullSpace(J, &nullspace);
725: MatNullSpaceTest(nullspace, J, &isNull);
726: VecDuplicate(u, &b);
727: VecSet(r, 0.0);
728: SNESComputeFunction(snes, r, b);
729: MatMult(J, u, r);
730: VecAXPY(r, 1.0, b);
731: VecDestroy(&b);
732: PetscPrintf(PETSC_COMM_WORLD, "Au - b = Au + F(0)\n");
733: VecChop(r, 1.0e-10);
734: VecView(r, PETSC_VIEWER_STDOUT_WORLD);
735: VecNorm(r, NORM_2, &res);
736: PetscPrintf(PETSC_COMM_WORLD, "Linear L_2 Residual: %g\n", (double)res);
737: }
738: }
739: VecViewFromOptions(u, NULL, "-sol_vec_view");
741: VecDestroy(&u);
742: VecDestroy(&r);
743: SNESDestroy(&snes);
744: DMDestroy(&dm);
745: PetscFree(user.exactFuncs);
746: PetscFinalize();
747: return ierr;
748: }
750: /*TEST
752: # 2D serial P1 tests 0-3
753: test:
754: suffix: 0
755: requires: triangle
756: args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
757: test:
758: suffix: 1
759: requires: triangle
760: args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
761: test:
762: suffix: 2
763: requires: triangle
764: args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
765: test:
766: suffix: 3
767: requires: triangle
768: args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
769: # 2D serial P2 tests 4-5
770: test:
771: suffix: 4
772: requires: triangle
773: args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
774: test:
775: suffix: 5
776: requires: triangle
777: args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
778: # 2D serial P3 tests
779: test:
780: suffix: 2d_p3_0
781: requires: triangle
782: args: -run_type test -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2
783: test:
784: suffix: 2d_p3_1
785: requires: triangle !single
786: args: -run_type full -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2
787: # Parallel tests 6-17
788: test:
789: suffix: 6
790: requires: triangle
791: nsize: 2
792: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
793: test:
794: suffix: 7
795: requires: triangle
796: nsize: 3
797: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
798: test:
799: suffix: 8
800: requires: triangle
801: nsize: 5
802: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
803: test:
804: suffix: 9
805: requires: triangle
806: nsize: 2
807: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
808: test:
809: suffix: 10
810: requires: triangle
811: nsize: 3
812: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
813: test:
814: suffix: 11
815: requires: triangle
816: nsize: 5
817: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
818: test:
819: suffix: 12
820: requires: triangle
821: nsize: 2
822: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
823: test:
824: suffix: 13
825: requires: triangle
826: nsize: 3
827: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
828: test:
829: suffix: 14
830: requires: triangle
831: nsize: 5
832: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
833: test:
834: suffix: 15
835: requires: triangle
836: nsize: 2
837: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
838: test:
839: suffix: 16
840: requires: triangle
841: nsize: 3
842: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
843: test:
844: suffix: 17
845: requires: triangle
846: nsize: 5
847: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1
848: # 3D serial P1 tests 43-46
849: test:
850: suffix: 43
851: requires: ctetgen
852: args: -run_type test -dim 3 -refinement_limit 0.0 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
853: test:
854: suffix: 44
855: requires: ctetgen
856: args: -run_type test -dim 3 -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
857: test:
858: suffix: 45
859: requires: ctetgen
860: args: -run_type test -dim 3 -refinement_limit 0.0125 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
861: test:
862: suffix: 46
863: requires: ctetgen
864: args: -run_type test -dim 3 -refinement_limit 0.0125 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1
865: # Full solutions 18-29
866: test:
867: suffix: 18
868: requires: triangle !single
869: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
870: args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
871: test:
872: suffix: 19
873: requires: triangle !single
874: nsize: 2
875: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
876: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
877: test:
878: suffix: 20
879: requires: triangle !single
880: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
881: nsize: 3
882: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
883: test:
884: suffix: 20_parmetis
885: requires: parmetis triangle !single
886: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
887: nsize: 3
888: args: -run_type full -petscpartitioner_type parmetis -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
889: test:
890: suffix: 21
891: requires: triangle !single
892: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
893: nsize: 5
894: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
895: test:
896: suffix: 22
897: requires: triangle !single
898: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
899: args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
900: test:
901: suffix: 23
902: requires: triangle !single
903: nsize: 2
904: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
905: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
906: test:
907: suffix: 24
908: requires: triangle !single
909: nsize: 3
910: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
911: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
912: test:
913: suffix: 25
914: requires: triangle !single
915: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
916: nsize: 5
917: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
918: test:
919: suffix: 26
920: requires: triangle !single
921: args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
922: test:
923: suffix: 27
924: requires: triangle !single
925: nsize: 2
926: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
927: test:
928: suffix: 28
929: requires: triangle !single
930: nsize: 3
931: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
932: test:
933: suffix: 29
934: requires: triangle !single
935: nsize: 5
936: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
937: # Full solutions with quads
938: # FULL Schur with LU/Jacobi
939: test:
940: suffix: quad_q2q1_full
941: requires: !single
942: args: -run_type full -simplex 0 -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
943: test:
944: suffix: quad_q2p1_full
945: requires: !single
946: args: -run_type full -simplex 0 -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pres_petscspace_poly_tensor 0 -pres_petscdualspace_lagrange_continuity 0 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
947: # Stokes preconditioners 30-36
948: # Jacobi
949: test:
950: suffix: 30
951: requires: triangle !single
952: filter: sed -e "s/total number of linear solver iterations=756/total number of linear solver iterations=757/g" -e "s/total number of linear solver iterations=758/total number of linear solver iterations=757/g"
953: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_gmres_restart 100 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
954: # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
955: test:
956: suffix: 31
957: requires: triangle !single
958: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
959: # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
960: test:
961: suffix: 32
962: requires: triangle !single
963: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
964: # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
965: test:
966: suffix: 33
967: requires: triangle !single
968: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
969: # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
970: test:
971: suffix: 34
972: requires: triangle !single
973: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
974: # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
975: test:
976: suffix: 35
977: requires: triangle !single
978: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
979: # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
980: test:
981: suffix: 36
982: requires: triangle !single
983: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
984: # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
985: test:
986: suffix: pc_simple
987: requires: triangle !single
988: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
989: # SIMPLEC \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T rowsum(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & rowsum(A)^{-1} B \\ 0 & I \end{pmatrix}
990: test:
991: suffix: pc_simplec
992: requires: triangle
993: args: -run_type full -dm_refine 3 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_max_it 5 -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_ksp_max_it 10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_inner_pc_jacobi_type rowsum -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi -fieldsplit_pressure_upper_pc_jacobi_type rowsum -snes_converged_reason -ksp_converged_reason -snes_view
994: # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
995: test:
996: suffix: fetidp_2d_tri
997: requires: triangle mumps
998: filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=41/linear solver iterations=42/g"
999: nsize: 5
1000: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly
1001: test:
1002: suffix: fetidp_3d_tet_smumps
1003: output_file: output/ex62_fetidp_3d_tet.out
1004: requires: ctetgen suitesparse mumps
1005: filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=10[0-9]/linear solver iterations=100/g" | sed -e "s/linear solver iterations=9[0-9]/linear solver iterations=100/g"
1006: nsize: 5
1007: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat -dim 3 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -test_partition -fetidp_bddc_sub_schurs_mat_solver_type mumps
1008: test:
1009: suffix: fetidp_3d_tet_spetsc
1010: requires: ctetgen suitesparse
1011: filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=10[0-9]/linear solver iterations=100/g" | sed -e "s/linear solver iterations=9[0-9]/linear solver iterations=100/g"
1012: nsize: 5
1013: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat -dim 3 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -test_partition -fetidp_bddc_sub_schurs_mat_solver_type petsc -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack
1014: test:
1015: suffix: fetidp_2d_quad
1016: requires: mumps double
1017: filter: grep -v "variant HERMITIAN"
1018: nsize: 5
1019: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -simplex 0 -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly
1020: test:
1021: suffix: fetidp_3d_hex
1022: requires: suitesparse
1023: filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=7[0-9]/linear solver iterations=71/g"
1024: nsize: 5
1025: args: -run_type full -dm_refine 1 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -dim 3 -simplex 0 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack
1026: # Convergence
1027: test:
1028: suffix: 2d_quad_q1_p0_conv
1029: requires: !single
1030: args: -run_type full -bc_type dirichlet -simplex 0 -interpolate 1 -dm_refine 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 \
1031: -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \
1032: -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
1033: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
1034: -fieldsplit_velocity_pc_type lu \
1035: -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
1036: test:
1037: suffix: 2d_tri_p2_p1_conv
1038: requires: triangle !single
1039: args: -run_type full -sol_type cubic -bc_type dirichlet -interpolate 1 -dm_refine 0 \
1040: -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
1041: -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \
1042: -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
1043: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
1044: -fieldsplit_velocity_pc_type lu \
1045: -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
1046: test:
1047: suffix: 2d_quad_q2_q1_conv
1048: requires: !single
1049: args: -run_type full -sol_type cubic -bc_type dirichlet -simplex 0 -interpolate 1 -dm_refine 0 \
1050: -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
1051: -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \
1052: -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
1053: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
1054: -fieldsplit_velocity_pc_type lu \
1055: -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
1056: test:
1057: suffix: 2d_quad_q2_p1_conv
1058: requires: !single
1059: args: -run_type full -sol_type cubic -bc_type dirichlet -simplex 0 -interpolate 1 -dm_refine 0 \
1060: -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pres_petscspace_poly_tensor 0 -pres_petscdualspace_lagrange_continuity 0 \
1061: -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \
1062: -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
1063: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
1064: -fieldsplit_velocity_pc_type lu \
1065: -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
1066: # GMG solver
1067: test:
1068: suffix: 2d_tri_p2_p1_gmg_vcycle
1069: requires: triangle
1070: args: -run_type full -sol_type cubic -bc_type dirichlet -interpolate 1 -cells 2,2 -dm_refine_hierarchy 1 \
1071: -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
1072: -snes_convergence_estimate -convest_num_refine 1 -snes_error_if_not_converged \
1073: -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
1074: -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
1075: -fieldsplit_velocity_pc_type mg \
1076: -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
1077: # Vanka solver
1078: test:
1079: suffix: 2d_quad_q1_p0_vanka_add
1080: requires: double !complex
1081: filter: sed -e "s/linear solver iterations=[0-9][0-9]*""/linear solver iterations=49/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations [0-9][0-9]*""/Linear solve converged due to CONVERGED_RTOL iterations 49/g"
1082: args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 1 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
1083: -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \
1084: -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_converged_reason \
1085: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
1086: -sub_ksp_type preonly -sub_pc_type lu
1087: # Vanka solver, forming dense inverses on patches
1088: test:
1089: suffix: 2d_quad_q1_p0_vanka_add_dense_inverse
1090: requires: double !complex
1091: filter: sed -e "s/linear solver iterations=[0-9][0-9]*""/linear solver iterations=49/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations [0-9][0-9]*""/Linear solve converged due to CONVERGED_RTOL iterations 49/g"
1092: args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 1 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
1093: -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \
1094: -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_converged_reason \
1095: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
1096: -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
1097: test:
1098: suffix: 2d_quad_q1_p0_vanka_add_unity
1099: requires: double !complex
1100: filter: sed -e "s/linear solver iterations=[0-9][0-9]*""/linear solver iterations=45/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations [0-9][0-9]*""/Linear solve converged due to CONVERGED_RTOL iterations 45/g"
1101: args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 1 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
1102: -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \
1103: -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_converged_reason \
1104: -pc_type patch -pc_patch_partition_of_unity 1 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
1105: -sub_ksp_type preonly -sub_pc_type lu
1106: test:
1107: suffix: 2d_quad_q2_q1_vanka_add
1108: requires: double !complex
1109: filter: sed -e "s/linear solver iterations=[0-9][0-9][0-9]*""/linear solver iterations=489/g"
1110: args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 0 -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
1111: -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \
1112: -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged \
1113: -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_dim 0 -pc_patch_construct_type vanka \
1114: -sub_ksp_type preonly -sub_pc_type lu
1115: test:
1116: suffix: 2d_quad_q2_q1_vanka_add_unity
1117: requires: double !complex
1118: filter: sed -e "s/linear solver iterations=[0-9][0-9][0-9]*""/linear solver iterations=795/g"
1119: args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 0 -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
1120: -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \
1121: -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged \
1122: -pc_type patch -pc_patch_partition_of_unity 1 -pc_patch_construct_dim 0 -pc_patch_construct_type vanka \
1123: -sub_ksp_type preonly -sub_pc_type lu
1124: # Vanka smoother
1125: test:
1126: suffix: 2d_quad_q1_p0_gmg_vanka_add
1127: requires: double !complex long_runtime
1128: args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine_hierarchy 3 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
1129: -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \
1130: -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_monitor_true_residual \
1131: -pc_type mg -pc_mg_levels 3 \
1132: -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 -mg_levels_ksp_monitor_true_residual_no \
1133: -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
1134: -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
1135: -mg_coarse_pc_type svd
1137: test:
1138: requires: !single
1139: suffix: bddc_quad
1140: nsize: 2
1141: args: -run_type full -dm_refine 1 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type gmres -ksp_rtol 1.e-8 -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd -simplex 0 -petscpartitioner_type simple -ksp_monitor_short -pc_bddc_symmetric 0
1143: TEST*/