Actual source code: ex62.c
petsc-3.8.4 2018-03-24
1: static char help[] = "Stokes Problem in 2d and 3d with simplicial finite elements.\n\
2: We solve the Stokes problem in a rectangular\n\
3: domain, using a parallel unstructured mesh (DMPLEX) to discretize it.\n\n\n";
5: /*
6: The isoviscous Stokes problem, which we discretize using the finite
7: element method on an unstructured mesh. The weak form equations are
9: < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > + < v, f > = 0
10: < q, \nabla\cdot u > = 0
12: We start with homogeneous Dirichlet conditions. We will expand this as the set
13: of test problems is developed.
15: Discretization:
17: We use PetscFE to generate a tabulation of the finite element basis functions
18: at quadrature points. We can currently generate an arbitrary order Lagrange
19: element.
21: Field Data:
23: DMPLEX data is organized by point, and the closure operation just stacks up the
24: data from each sieve point in the closure. Thus, for a P_2-P_1 Stokes element, we
25: have
27: cl{e} = {f e_0 e_1 e_2 v_0 v_1 v_2}
28: x = [u_{e_0} v_{e_0} u_{e_1} v_{e_1} u_{e_2} v_{e_2} u_{v_0} v_{v_0} p_{v_0} u_{v_1} v_{v_1} p_{v_1} u_{v_2} v_{v_2} p_{v_2}]
30: The problem here is that we would like to loop over each field separately for
31: integration. Therefore, the closure visitor in DMPlexVecGetClosure() reorders
32: the data so that each field is contiguous
34: x' = [u_{e_0} v_{e_0} u_{e_1} v_{e_1} u_{e_2} v_{e_2} u_{v_0} v_{v_0} u_{v_1} v_{v_1} u_{v_2} v_{v_2} p_{v_0} p_{v_1} p_{v_2}]
36: Likewise, DMPlexVecSetClosure() takes data partitioned by field, and correctly
37: puts it into the Sieve ordering.
39: Next Steps:
41: - Refine and show convergence of correct order automatically (use femTest.py)
42: - Fix InitialGuess for arbitrary disc (means making dual application work again)
43: - Redo slides from GUCASTutorial for this new example
45: For tensor product meshes, see SNES ex67, ex72
46: */
48: #include <petscdmplex.h>
49: #include <petscsnes.h>
50: #include <petscds.h>
52: PetscInt spatialDim = 0;
54: typedef enum {NEUMANN, DIRICHLET} BCType;
55: typedef enum {RUN_FULL, RUN_TEST} RunType;
57: typedef struct {
58: PetscInt debug; /* The debugging level */
59: RunType runType; /* Whether to run tests, or solve the full problem */
60: PetscLogEvent createMeshEvent;
61: PetscBool showInitial, showSolution, showError;
62: /* Domain and mesh definition */
63: PetscInt dim; /* The topological mesh dimension */
64: PetscBool interpolate; /* Generate intermediate mesh elements */
65: PetscBool simplex; /* Use simplices or tensor product cells */
66: PetscReal refinementLimit; /* The largest allowable cell volume */
67: PetscBool testPartition; /* Use a fixed partitioning for testing */
68: /* Problem definition */
69: BCType bcType;
70: PetscErrorCode (**exactFuncs)(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx);
71: } AppCtx;
73: PetscErrorCode zero_scalar(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
74: {
75: u[0] = 0.0;
76: return 0;
77: }
78: PetscErrorCode zero_vector(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
79: {
80: PetscInt d;
81: for (d = 0; d < spatialDim; ++d) u[d] = 0.0;
82: return 0;
83: }
85: /*
86: In 2D we use exact solution:
88: u = x^2 + y^2
89: v = 2 x^2 - 2xy
90: p = x + y - 1
91: f_x = f_y = 3
93: so that
95: -\Delta u + \nabla p + f = <-4, -4> + <1, 1> + <3, 3> = 0
96: \nabla \cdot u = 2x - 2x = 0
97: */
98: PetscErrorCode quadratic_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
99: {
100: u[0] = x[0]*x[0] + x[1]*x[1];
101: u[1] = 2.0*x[0]*x[0] - 2.0*x[0]*x[1];
102: return 0;
103: }
105: PetscErrorCode linear_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
106: {
107: *p = x[0] + x[1] - 1.0;
108: return 0;
109: }
110: PetscErrorCode constant_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
111: {
112: *p = 1.0;
113: return 0;
114: }
116: void f0_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
117: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
118: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
119: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
120: {
121: PetscInt c;
122: for (c = 0; c < dim; ++c) f0[c] = 3.0;
123: }
125: /* gradU[comp*dim+d] = {u_x, u_y, v_x, v_y} or {u_x, u_y, u_z, v_x, v_y, v_z, w_x, w_y, w_z}
126: u[Ncomp] = {p} */
127: void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
128: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
129: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
130: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
131: {
132: const PetscInt Ncomp = dim;
133: PetscInt comp, d;
135: for (comp = 0; comp < Ncomp; ++comp) {
136: for (d = 0; d < dim; ++d) {
137: /* f1[comp*dim+d] = 0.5*(gradU[comp*dim+d] + gradU[d*dim+comp]); */
138: f1[comp*dim+d] = u_x[comp*dim+d];
139: }
140: f1[comp*dim+comp] -= u[Ncomp];
141: }
142: }
144: /* gradU[comp*dim+d] = {u_x, u_y, v_x, v_y} or {u_x, u_y, u_z, v_x, v_y, v_z, w_x, w_y, w_z} */
145: void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
146: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
147: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
148: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
149: {
150: PetscInt d;
151: for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] += u_x[d*dim+d];
152: }
154: void f1_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
155: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
156: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
157: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
158: {
159: PetscInt d;
160: for (d = 0; d < dim; ++d) f1[d] = 0.0;
161: }
163: /* < q, \nabla\cdot u >
164: NcompI = 1, NcompJ = dim */
165: void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
166: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
167: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
168: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
169: {
170: PetscInt d;
171: for (d = 0; d < dim; ++d) g1[d*dim+d] = 1.0; /* \frac{\partial\phi^{u_d}}{\partial x_d} */
172: }
174: /* -< \nabla\cdot v, p >
175: NcompI = dim, NcompJ = 1 */
176: void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux,
177: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
178: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
179: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
180: {
181: PetscInt d;
182: for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* \frac{\partial\psi^{u_d}}{\partial x_d} */
183: }
185: /* < \nabla v, \nabla u + {\nabla u}^T >
186: This just gives \nabla u, give the perdiagonal for the transpose */
187: void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
188: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
189: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
190: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
191: {
192: const PetscInt Ncomp = dim;
193: PetscInt compI, d;
195: for (compI = 0; compI < Ncomp; ++compI) {
196: for (d = 0; d < dim; ++d) {
197: g3[((compI*Ncomp+compI)*dim+d)*dim+d] = 1.0;
198: }
199: }
200: }
202: /*
203: In 3D we use exact solution:
205: u = x^2 + y^2
206: v = y^2 + z^2
207: w = x^2 + y^2 - 2(x+y)z
208: p = x + y + z - 3/2
209: f_x = f_y = f_z = 3
211: so that
213: -\Delta u + \nabla p + f = <-4, -4, -4> + <1, 1, 1> + <3, 3, 3> = 0
214: \nabla \cdot u = 2x + 2y - 2(x + y) = 0
215: */
216: PetscErrorCode quadratic_u_3d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx)
217: {
218: u[0] = x[0]*x[0] + x[1]*x[1];
219: u[1] = x[1]*x[1] + x[2]*x[2];
220: u[2] = x[0]*x[0] + x[1]*x[1] - 2.0*(x[0] + x[1])*x[2];
221: return 0;
222: }
224: PetscErrorCode linear_p_3d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx)
225: {
226: *p = x[0] + x[1] + x[2] - 1.5;
227: return 0;
228: }
230: PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
231: {
232: const char *bcTypes[2] = {"neumann", "dirichlet"};
233: const char *runTypes[2] = {"full", "test"};
234: PetscInt bc, run;
238: options->debug = 0;
239: options->runType = RUN_FULL;
240: options->dim = 2;
241: options->interpolate = PETSC_FALSE;
242: options->simplex = PETSC_TRUE;
243: options->refinementLimit = 0.0;
244: options->testPartition = PETSC_FALSE;
245: options->bcType = DIRICHLET;
246: options->showInitial = PETSC_FALSE;
247: options->showSolution = PETSC_TRUE;
248: options->showError = PETSC_FALSE;
250: PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
251: PetscOptionsInt("-debug", "The debugging level", "ex62.c", options->debug, &options->debug, NULL);
252: run = options->runType;
253: PetscOptionsEList("-run_type", "The run type", "ex62.c", runTypes, 2, runTypes[options->runType], &run, NULL);
255: options->runType = (RunType) run;
257: PetscOptionsInt("-dim", "The topological mesh dimension", "ex62.c", options->dim, &options->dim, NULL);
258: spatialDim = options->dim;
259: PetscOptionsBool("-interpolate", "Generate intermediate mesh elements", "ex62.c", options->interpolate, &options->interpolate, NULL);
260: PetscOptionsBool("-simplex", "Use simplices or tensor product cells", "ex62.c", options->simplex, &options->simplex, NULL);
261: PetscOptionsReal("-refinement_limit", "The largest allowable cell volume", "ex62.c", options->refinementLimit, &options->refinementLimit, NULL);
262: PetscOptionsBool("-test_partition", "Use a fixed partition for testing", "ex62.c", options->testPartition, &options->testPartition, NULL);
263: bc = options->bcType;
264: PetscOptionsEList("-bc_type","Type of boundary condition","ex62.c",bcTypes,2,bcTypes[options->bcType],&bc,NULL);
266: options->bcType = (BCType) bc;
268: PetscOptionsBool("-show_initial", "Output the initial guess for verification", "ex62.c", options->showInitial, &options->showInitial, NULL);
269: PetscOptionsBool("-show_solution", "Output the solution for verification", "ex62.c", options->showSolution, &options->showSolution, NULL);
270: PetscOptionsBool("-show_error", "Output the error for verification", "ex62.c", options->showError, &options->showError, NULL);
271: PetscOptionsEnd();
273: PetscLogEventRegister("CreateMesh", DM_CLASSID, &options->createMeshEvent);
274: return(0);
275: }
277: PetscErrorCode DMVecViewLocal(DM dm, Vec v)
278: {
279: Vec lv;
283: DMGetLocalVector(dm, &lv);
284: DMGlobalToLocalBegin(dm, v, INSERT_VALUES, lv);
285: DMGlobalToLocalEnd(dm, v, INSERT_VALUES, lv);
286: DMPrintLocalVec(dm, "Local function", 0.0, lv);
287: DMRestoreLocalVector(dm, &lv);
288: return(0);
289: }
291: PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
292: {
293: PetscInt dim = user->dim;
294: PetscBool interpolate = user->interpolate;
295: PetscReal refinementLimit = user->refinementLimit;
296: const PetscInt cells[3] = {3, 3, 3};
300: PetscLogEventBegin(user->createMeshEvent,0,0,0,0);
301: if (user->simplex) {DMPlexCreateBoxMesh(comm, dim, dim == 2 ? 2 : 1, interpolate, dm);}
302: else {DMPlexCreateHexBoxMesh(comm, dim, cells, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, dm);}
303: {
304: DM refinedMesh = NULL;
305: DM distributedMesh = NULL;
307: /* Refine mesh using a volume constraint */
308: DMPlexSetRefinementLimit(*dm, refinementLimit);
309: if (user->simplex) {DMRefine(*dm, comm, &refinedMesh);}
310: if (refinedMesh) {
311: DMDestroy(dm);
312: *dm = refinedMesh;
313: }
314: /* Setup test partitioning */
315: if (user->testPartition) {
316: PetscInt triSizes_n2[2] = {4, 4};
317: PetscInt triPoints_n2[8] = {3, 5, 6, 7, 0, 1, 2, 4};
318: PetscInt triSizes_n3[3] = {2, 3, 3};
319: PetscInt triPoints_n3[8] = {3, 5, 1, 6, 7, 0, 2, 4};
320: PetscInt triSizes_n5[5] = {1, 2, 2, 1, 2};
321: PetscInt triPoints_n5[8] = {3, 5, 6, 4, 7, 0, 1, 2};
322: PetscInt triSizes_ref_n2[2] = {8, 8};
323: PetscInt triPoints_ref_n2[16] = {1, 5, 6, 7, 10, 11, 14, 15, 0, 2, 3, 4, 8, 9, 12, 13};
324: PetscInt triSizes_ref_n3[3] = {5, 6, 5};
325: PetscInt triPoints_ref_n3[16] = {1, 7, 10, 14, 15, 2, 6, 8, 11, 12, 13, 0, 3, 4, 5, 9};
326: PetscInt triSizes_ref_n5[5] = {3, 4, 3, 3, 3};
327: PetscInt triPoints_ref_n5[16] = {1, 7, 10, 2, 11, 13, 14, 5, 6, 15, 0, 8, 9, 3, 4, 12};
328: const PetscInt *sizes = NULL;
329: const PetscInt *points = NULL;
330: PetscPartitioner part;
331: PetscInt cEnd;
332: PetscMPIInt rank, size;
334: MPI_Comm_rank(comm, &rank);
335: MPI_Comm_size(comm, &size);
336: DMPlexGetHeightStratum(*dm, 0, NULL, &cEnd);
337: if (!rank) {
338: if (dim == 2 && user->simplex && size == 2 && cEnd == 8) {
339: sizes = triSizes_n2; points = triPoints_n2;
340: } else if (dim == 2 && user->simplex && size == 3 && cEnd == 8) {
341: sizes = triSizes_n3; points = triPoints_n3;
342: } else if (dim == 2 && user->simplex && size == 5 && cEnd == 8) {
343: sizes = triSizes_n5; points = triPoints_n5;
344: } else if (dim == 2 && user->simplex && size == 2 && cEnd == 16) {
345: sizes = triSizes_ref_n2; points = triPoints_ref_n2;
346: } else if (dim == 2 && user->simplex && size == 3 && cEnd == 16) {
347: sizes = triSizes_ref_n3; points = triPoints_ref_n3;
348: } else if (dim == 2 && user->simplex && size == 5 && cEnd == 16) {
349: sizes = triSizes_ref_n5; points = triPoints_ref_n5;
350: } else SETERRQ(comm, PETSC_ERR_ARG_WRONG, "No stored partition matching run parameters");
351: }
352: DMPlexGetPartitioner(*dm, &part);
353: PetscPartitionerSetType(part, PETSCPARTITIONERSHELL);
354: PetscPartitionerShellSetPartition(part, size, sizes, points);
355: } else {
356: PetscPartitioner part;
358: DMPlexGetPartitioner(*dm, &part);
359: PetscPartitionerSetFromOptions(part);
360: }
361: /* Distribute mesh over processes */
362: DMPlexDistribute(*dm, 0, NULL, &distributedMesh);
363: if (distributedMesh) {
364: DMDestroy(dm);
365: *dm = distributedMesh;
366: }
367: }
368: DMSetFromOptions(*dm);
369: DMViewFromOptions(*dm, NULL, "-dm_view");
370: PetscLogEventEnd(user->createMeshEvent,0,0,0,0);
371: return(0);
372: }
374: PetscErrorCode SetupProblem(PetscDS prob, AppCtx *user)
375: {
376: const PetscInt id = 1;
380: PetscDSSetResidual(prob, 0, f0_u, f1_u);
381: PetscDSSetResidual(prob, 1, f0_p, f1_p);
382: PetscDSSetJacobian(prob, 0, 0, NULL, NULL, NULL, g3_uu);
383: PetscDSSetJacobian(prob, 0, 1, NULL, NULL, g2_up, NULL);
384: PetscDSSetJacobian(prob, 1, 0, NULL, g1_pu, NULL, NULL);
385: switch (user->dim) {
386: case 2:
387: user->exactFuncs[0] = quadratic_u_2d;
388: user->exactFuncs[1] = linear_p_2d;
389: break;
390: case 3:
391: user->exactFuncs[0] = quadratic_u_3d;
392: user->exactFuncs[1] = linear_p_3d;
393: break;
394: default:
395: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Invalid dimension %d", user->dim);
396: }
397: PetscDSAddBoundary(prob, user->bcType == DIRICHLET ? DM_BC_ESSENTIAL : DM_BC_NATURAL, "wall", user->bcType == NEUMANN ? "boundary" : "marker", 0, 0, NULL, (void (*)(void)) user->exactFuncs[0], 1, &id, user);
398: return(0);
399: }
401: PetscErrorCode SetupDiscretization(DM dm, AppCtx *user)
402: {
403: DM cdm = dm;
404: const PetscInt dim = user->dim;
405: PetscFE fe[2];
406: PetscQuadrature q;
407: PetscDS prob;
408: PetscErrorCode ierr;
411: /* Create finite element */
412: PetscFECreateDefault(dm, dim, dim, user->simplex, "vel_", PETSC_DEFAULT, &fe[0]);
413: PetscObjectSetName((PetscObject) fe[0], "velocity");
414: PetscFEGetQuadrature(fe[0], &q);
415: PetscFECreateDefault(dm, dim, 1, user->simplex, "pres_", PETSC_DEFAULT, &fe[1]);
416: PetscFESetQuadrature(fe[1], q);
417: PetscObjectSetName((PetscObject) fe[1], "pressure");
418: /* Set discretization and boundary conditions for each mesh */
419: DMGetDS(dm, &prob);
420: PetscDSSetDiscretization(prob, 0, (PetscObject) fe[0]);
421: PetscDSSetDiscretization(prob, 1, (PetscObject) fe[1]);
422: SetupProblem(prob, user);
423: while (cdm) {
424: DMSetDS(cdm, prob);
425: DMGetCoarseDM(cdm, &cdm);
426: }
427: PetscFEDestroy(&fe[0]);
428: PetscFEDestroy(&fe[1]);
429: return(0);
430: }
432: PetscErrorCode CreatePressureNullSpace(DM dm, AppCtx *user, Vec *v, MatNullSpace *nullSpace)
433: {
434: Vec vec;
435: PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero_vector, constant_p};
436: PetscErrorCode ierr;
439: DMGetGlobalVector(dm, &vec);
440: DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec);
441: VecNormalize(vec, NULL);
442: if (user->debug) {
443: PetscPrintf(PetscObjectComm((PetscObject)dm), "Pressure Null Space\n");
444: VecView(vec, PETSC_VIEWER_STDOUT_WORLD);
445: }
446: MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullSpace);
447: if (v) {
448: DMCreateGlobalVector(dm, v);
449: VecCopy(vec, *v);
450: }
451: DMRestoreGlobalVector(dm, &vec);
452: /* New style for field null spaces */
453: {
454: PetscObject pressure;
455: MatNullSpace nullSpacePres;
457: DMGetField(dm, 1, &pressure);
458: MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullSpacePres);
459: PetscObjectCompose(pressure, "nullspace", (PetscObject) nullSpacePres);
460: MatNullSpaceDestroy(&nullSpacePres);
461: }
462: return(0);
463: }
465: int main(int argc, char **argv)
466: {
467: SNES snes; /* nonlinear solver */
468: DM dm; /* problem definition */
469: Vec u,r; /* solution, residual vectors */
470: Mat A,J; /* Jacobian matrix */
471: MatNullSpace nullSpace; /* May be necessary for pressure */
472: AppCtx user; /* user-defined work context */
473: PetscInt its; /* iterations for convergence */
474: PetscReal error = 0.0; /* L_2 error in the solution */
475: PetscReal ferrors[2];
478: PetscInitialize(&argc, &argv, NULL,help);if (ierr) return ierr;
479: ProcessOptions(PETSC_COMM_WORLD, &user);
480: SNESCreate(PETSC_COMM_WORLD, &snes);
481: CreateMesh(PETSC_COMM_WORLD, &user, &dm);
482: SNESSetDM(snes, dm);
483: DMSetApplicationContext(dm, &user);
485: PetscMalloc(2 * sizeof(void (*)(const PetscReal[], PetscScalar *, void *)), &user.exactFuncs);
486: SetupDiscretization(dm, &user);
487: DMPlexCreateClosureIndex(dm, NULL);
489: DMCreateGlobalVector(dm, &u);
490: VecDuplicate(u, &r);
492: DMCreateMatrix(dm, &J);
493: A = J;
494: CreatePressureNullSpace(dm, &user, NULL, &nullSpace);
495: MatSetNullSpace(A, nullSpace);
497: DMPlexSetSNESLocalFEM(dm,&user,&user,&user);
498: SNESSetJacobian(snes, A, J, NULL, NULL);
500: SNESSetFromOptions(snes);
502: DMProjectFunction(dm, 0.0, user.exactFuncs, NULL, INSERT_ALL_VALUES, u);
503: if (user.showInitial) {DMVecViewLocal(dm, u);}
504: if (user.runType == RUN_FULL) {
505: PetscErrorCode (*initialGuess[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero_vector, zero_scalar};
507: DMProjectFunction(dm, 0.0, initialGuess, NULL, INSERT_VALUES, u);
508: if (user.debug) {
509: PetscPrintf(PETSC_COMM_WORLD, "Initial guess\n");
510: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
511: }
512: SNESSolve(snes, NULL, u);
513: SNESGetIterationNumber(snes, &its);
514: PetscPrintf(PETSC_COMM_WORLD, "Number of SNES iterations = %D\n", its);
515: DMComputeL2Diff(dm, 0.0, user.exactFuncs, NULL, u, &error);
516: DMComputeL2FieldDiff(dm, 0.0, user.exactFuncs, NULL, u, ferrors);
517: PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: %g [%g, %g]\n", error, ferrors[0], ferrors[1]);
518: if (user.showError) {
519: Vec r;
520: DMGetGlobalVector(dm, &r);
521: DMProjectFunction(dm, 0.0, user.exactFuncs, NULL, INSERT_ALL_VALUES, r);
522: VecAXPY(r, -1.0, u);
523: PetscPrintf(PETSC_COMM_WORLD, "Solution Error\n");
524: VecView(r, PETSC_VIEWER_STDOUT_WORLD);
525: DMRestoreGlobalVector(dm, &r);
526: }
527: if (user.showSolution) {
528: PetscPrintf(PETSC_COMM_WORLD, "Solution\n");
529: VecChop(u, 3.0e-9);
530: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
531: }
532: } else {
533: PetscReal res = 0.0;
535: /* Check discretization error */
536: PetscPrintf(PETSC_COMM_WORLD, "Initial guess\n");
537: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
538: DMComputeL2Diff(dm, 0.0, user.exactFuncs, NULL, u, &error);
539: if (error >= 1.0e-11) {PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: %g\n", error);}
540: else {PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: < 1.0e-11\n");}
541: /* Check residual */
542: SNESComputeFunction(snes, u, r);
543: PetscPrintf(PETSC_COMM_WORLD, "Initial Residual\n");
544: VecChop(r, 1.0e-10);
545: VecView(r, PETSC_VIEWER_STDOUT_WORLD);
546: VecNorm(r, NORM_2, &res);
547: PetscPrintf(PETSC_COMM_WORLD, "L_2 Residual: %g\n", res);
548: /* Check Jacobian */
549: {
550: Vec b;
551: PetscBool isNull;
553: SNESComputeJacobian(snes, u, A, A);
554: MatNullSpaceTest(nullSpace, J, &isNull);
555: VecDuplicate(u, &b);
556: VecSet(r, 0.0);
557: SNESComputeFunction(snes, r, b);
558: MatMult(A, u, r);
559: VecAXPY(r, 1.0, b);
560: VecDestroy(&b);
561: PetscPrintf(PETSC_COMM_WORLD, "Au - b = Au + F(0)\n");
562: VecChop(r, 1.0e-10);
563: VecView(r, PETSC_VIEWER_STDOUT_WORLD);
564: VecNorm(r, NORM_2, &res);
565: PetscPrintf(PETSC_COMM_WORLD, "Linear L_2 Residual: %g\n", res);
566: }
567: }
568: VecViewFromOptions(u, NULL, "-sol_vec_view");
570: MatNullSpaceDestroy(&nullSpace);
571: if (A != J) {MatDestroy(&A);}
572: MatDestroy(&J);
573: VecDestroy(&u);
574: VecDestroy(&r);
575: SNESDestroy(&snes);
576: DMDestroy(&dm);
577: PetscFree(user.exactFuncs);
578: PetscFinalize();
579: return ierr;
580: }
582: /*TEST
584: # 2D serial P1 tests 0-3
585: test:
586: suffix: 0
587: requires: triangle
588: args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
589: test:
590: suffix: 1
591: requires: triangle
592: args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
593: test:
594: suffix: 2
595: requires: triangle
596: args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
597: test:
598: suffix: 3
599: requires: triangle
600: args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
601: # 2D serial P2 tests 4-5
602: test:
603: suffix: 4
604: requires: triangle
605: args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
606: test:
607: suffix: 5
608: requires: triangle
609: args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
610: # Parallel tests 6-17
611: test:
612: suffix: 6
613: requires: triangle
614: nsize: 2
615: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
616: test:
617: suffix: 7
618: requires: triangle
619: nsize: 3
620: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
621: test:
622: suffix: 8
623: requires: triangle
624: nsize: 5
625: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
626: test:
627: suffix: 9
628: requires: triangle
629: nsize: 2
630: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
631: test:
632: suffix: 10
633: requires: triangle
634: nsize: 3
635: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
636: test:
637: suffix: 11
638: requires: triangle
639: nsize: 5
640: args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
641: test:
642: suffix: 12
643: requires: triangle
644: nsize: 2
645: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
646: test:
647: suffix: 13
648: requires: triangle
649: nsize: 3
650: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
651: test:
652: suffix: 14
653: requires: triangle
654: nsize: 5
655: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
656: test:
657: suffix: 15
658: requires: triangle
659: nsize: 2
660: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
661: test:
662: suffix: 16
663: requires: triangle
664: nsize: 3
665: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
666: test:
667: suffix: 17
668: requires: triangle
669: nsize: 5
670: args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -dm_plex_print_fem 1
671: # 3D serial P1 tests 43-46
672: test:
673: suffix: 43
674: requires: ctetgen
675: args: -run_type test -dim 3 -refinement_limit 0.0 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
676: test:
677: suffix: 44
678: requires: ctetgen
679: args: -run_type test -dim 3 -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
680: test:
681: suffix: 45
682: requires: ctetgen
683: args: -run_type test -dim 3 -refinement_limit 0.0125 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
684: test:
685: suffix: 46
686: requires: ctetgen
687: args: -run_type test -dim 3 -refinement_limit 0.0125 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -show_initial -dm_plex_print_fem 1
688: # Full solutions 18-29
689: test:
690: suffix: 18
691: requires: triangle
692: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
693: args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
694: test:
695: suffix: 19
696: requires: triangle
697: nsize: 2
698: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
699: test:
700: suffix: 20
701: requires: triangle
702: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
703: nsize: 3
704: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
705: test:
706: suffix: 21
707: requires: triangle
708: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
709: nsize: 5
710: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
711: test:
712: suffix: 22
713: requires: triangle
714: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
715: args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
716: test:
717: suffix: 23
718: requires: triangle
719: nsize: 2
720: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
721: test:
722: suffix: 24
723: requires: triangle
724: nsize: 3
725: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
726: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
727: test:
728: suffix: 25
729: requires: triangle
730: filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g"
731: nsize: 5
732: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 1 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
733: test:
734: suffix: 26
735: requires: triangle
736: args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
737: test:
738: suffix: 27
739: requires: triangle
740: nsize: 2
741: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
742: test:
743: suffix: 28
744: requires: triangle
745: nsize: 3
746: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
747: test:
748: suffix: 29
749: requires: triangle
750: nsize: 5
751: args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view
752: # Full solutions with quads
753: # FULL Schur with LU/Jacobi
754: test:
755: suffix: quad_q2q1_full
756: requires: !single
757: args: -run_type full -simplex 0 -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
758: test:
759: suffix: quad_q2p1_full
760: requires: !single
761: args: -run_type full -simplex 0 -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -pres_petscspace_poly_tensor 0 -pres_petscdualspace_lagrange_continuity 0 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
762: # Stokes preconditioners 30-36
763: # Jacobi
764: test:
765: suffix: 30
766: requires: triangle
767: filter: sed -e "s/total number of linear solver iterations=756/total number of linear solver iterations=757/g"
768: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_gmres_restart 100 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
769: # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
770: test:
771: suffix: 31
772: requires: triangle
773: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
774: # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
775: test:
776: suffix: 32
777: requires: triangle
778: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
779: # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
780: test:
781: suffix: 33
782: requires: triangle
783: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
784: # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
785: test:
786: suffix: 34
787: requires: triangle
788: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
789: # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
790: test:
791: suffix: 35
792: requires: triangle
793: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
794: # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
795: test:
796: suffix: 36
797: requires: triangle
798: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
799: # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
800: test:
801: suffix: pc_simple
802: requires: triangle
803: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view -show_solution 0
804: # SIMPLEC \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T rowsum(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & rowsum(A)^{-1} B \\ 0 & I \end{pmatrix}
805: test:
806: suffix: pc_simplec
807: requires: triangle
808: args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -ksp_type fgmres -ksp_max_it 5 -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_inner_pc_jacobi_type rowsum -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi -fieldsplit_pressure_upper_pc_jacobi_type rowsum -snes_converged_reason -ksp_converged_reason -snes_view -show_solution 0
809: # FETI-DP solvers
810: test:
811: suffix: fetidp_2d_tri
812: requires: triangle suitesparse
813: nsize: 5
814: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -snes_view -snes_error_if_not_converged -show_solution 0 -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_vertex_size 2 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_package umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_package umfpack -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly
815: test:
816: suffix: fetidp_3d_tet
817: requires: ctetgen mumps suitesparse
818: nsize: 5
819: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -snes_view -snes_error_if_not_converged -show_solution 0 -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_vertex_size 3 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat -dim 3 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_package cholmod -fetidp_harmonic_pc_type cholesky -fetidp_bddelta_pc_factor_mat_solver_package umfpack -fetidp_fieldsplit_lag_ksp_type preonly
820: filter: sed -s "s/linear solver iterations=10[0-9]/linear solver iterations=100/g"
821: test:
822: suffix: fetidp_2d_quad
823: requires: suitesparse
824: filter: grep -v "CG or CGNE: variant" | sed -e "s/BDDC: Graph max count: 9223372036854775807/BDDC: Graph max count: 2147483647/g"
825: nsize: 5
826: args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -snes_view -snes_error_if_not_converged -show_solution 0 -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_vertex_size 2 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_package umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_package umfpack -simplex 0 -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly
827: test:
828: suffix: fetidp_3d_hex
829: requires: suitesparse
830: nsize: 5
831: args: -run_type full -dm_refine 1 -bc_type dirichlet -interpolate 1 -vel_petscspace_order 2 -pres_petscspace_order 1 -snes_view -snes_error_if_not_converged -show_solution 0 -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_vertex_size 3 -dim 3 -simplex 0 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_package cholmod -fetidp_harmonic_pc_type cholesky -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_package umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_package umfpack
833: TEST*/