Actual source code: ex6.c
1: /*
2: Note:
3: -hratio is the ratio between mesh size of coarse grids and fine grids
4: -ts_rk_dtratio is the ratio between the large stepsize and the small stepsize
5: */
7: static const char help[] = "1D periodic Finite Volume solver in slope-limiter form with semidiscrete time stepping.\n"
8: " advection - Constant coefficient scalar advection\n"
9: " u_t + (a*u)_x = 0\n"
10: " for this toy problem, we choose different meshsizes for different sub-domains (slow-fast-slow), say\n"
11: " hxs = hratio*hxf \n"
12: " where hxs and hxf are the grid spacings for coarse and fine grids respectively.\n"
13: " exact - Exact Riemann solver which usually needs to perform a Newton iteration to connect\n"
14: " the states across shocks and rarefactions\n"
15: " simulation - use reference solution which is generated by smaller time step size to be true solution,\n"
16: " also the reference solution should be generated by user and stored in a binary file.\n"
17: " characteristic - Limit the characteristic variables, this is usually preferred (default)\n"
18: "Several initial conditions can be chosen with -initial N\n\n"
19: "The problem size should be set with -da_grid_x M\n\n";
21: #include <petscts.h>
22: #include <petscdm.h>
23: #include <petscdmda.h>
24: #include <petscdraw.h>
25: #include "finitevolume1d.h"
27: static inline PetscReal RangeMod(PetscReal a, PetscReal xmin, PetscReal xmax)
28: {
29: PetscReal range = xmax - xmin;
30: return xmin + PetscFmodReal(range + PetscFmodReal(a, range), range);
31: }
33: /* --------------------------------- Advection ----------------------------------- */
34: typedef struct {
35: PetscReal a; /* advective velocity */
36: } AdvectCtx;
38: static PetscErrorCode PhysicsRiemann_Advect(void *vctx, PetscInt m, const PetscScalar *uL, const PetscScalar *uR, PetscScalar *flux, PetscReal *maxspeed)
39: {
40: AdvectCtx *ctx = (AdvectCtx *)vctx;
41: PetscReal speed;
43: PetscFunctionBeginUser;
44: speed = ctx->a;
45: flux[0] = PetscMax(0, speed) * uL[0] + PetscMin(0, speed) * uR[0];
46: *maxspeed = speed;
47: PetscFunctionReturn(PETSC_SUCCESS);
48: }
50: static PetscErrorCode PhysicsCharacteristic_Advect(void *vctx, PetscInt m, const PetscScalar *u, PetscScalar *X, PetscScalar *Xi, PetscReal *speeds)
51: {
52: AdvectCtx *ctx = (AdvectCtx *)vctx;
54: PetscFunctionBeginUser;
55: X[0] = 1.;
56: Xi[0] = 1.;
57: speeds[0] = ctx->a;
58: PetscFunctionReturn(PETSC_SUCCESS);
59: }
61: static PetscErrorCode PhysicsSample_Advect(void *vctx, PetscInt initial, FVBCType bctype, PetscReal xmin, PetscReal xmax, PetscReal t, PetscReal x, PetscReal *u)
62: {
63: AdvectCtx *ctx = (AdvectCtx *)vctx;
64: PetscReal a = ctx->a, x0;
66: PetscFunctionBeginUser;
67: switch (bctype) {
68: case FVBC_OUTFLOW:
69: x0 = x - a * t;
70: break;
71: case FVBC_PERIODIC:
72: x0 = RangeMod(x - a * t, xmin, xmax);
73: break;
74: default:
75: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "unknown BCType");
76: }
77: switch (initial) {
78: case 0:
79: u[0] = (x0 < 0) ? 1 : -1;
80: break;
81: case 1:
82: u[0] = (x0 < 0) ? -1 : 1;
83: break;
84: case 2:
85: u[0] = (0 < x0 && x0 < 1) ? 1 : 0;
86: break;
87: case 3:
88: u[0] = PetscSinReal(2 * PETSC_PI * x0);
89: break;
90: case 4:
91: u[0] = PetscAbs(x0);
92: break;
93: case 5:
94: u[0] = (x0 < 0 || x0 > 0.5) ? 0 : PetscSqr(PetscSinReal(2 * PETSC_PI * x0));
95: break;
96: case 6:
97: u[0] = (x0 < 0) ? 0 : ((x0 < 1) ? x0 : ((x0 < 2) ? 2 - x0 : 0));
98: break;
99: case 7:
100: u[0] = PetscPowReal(PetscSinReal(PETSC_PI * x0), 10.0);
101: break;
102: default:
103: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "unknown initial condition");
104: }
105: PetscFunctionReturn(PETSC_SUCCESS);
106: }
108: static PetscErrorCode PhysicsCreate_Advect(FVCtx *ctx)
109: {
110: AdvectCtx *user;
112: PetscFunctionBeginUser;
113: PetscCall(PetscNew(&user));
114: ctx->physics2.sample2 = PhysicsSample_Advect;
115: ctx->physics2.riemann2 = PhysicsRiemann_Advect;
116: ctx->physics2.characteristic2 = PhysicsCharacteristic_Advect;
117: ctx->physics2.destroy = PhysicsDestroy_SimpleFree;
118: ctx->physics2.user = user;
119: ctx->physics2.dof = 1;
120: PetscCall(PetscStrallocpy("u", &ctx->physics2.fieldname[0]));
121: user->a = 1;
122: PetscOptionsBegin(ctx->comm, ctx->prefix, "Options for advection", "");
123: {
124: PetscCall(PetscOptionsReal("-physics_advect_a", "Speed", "", user->a, &user->a, NULL));
125: }
126: PetscOptionsEnd();
127: PetscFunctionReturn(PETSC_SUCCESS);
128: }
130: PetscErrorCode FVSample_2WaySplit(FVCtx *ctx, DM da, PetscReal time, Vec U)
131: {
132: PetscScalar *u, *uj, xj, xi;
133: PetscInt i, j, k, dof, xs, xm, Mx;
134: const PetscInt N = 200;
135: PetscReal hs, hf;
137: PetscFunctionBeginUser;
138: PetscCheck(ctx->physics2.sample2, PETSC_COMM_SELF, PETSC_ERR_SUP, "Physics has not provided a sampling function");
139: PetscCall(DMDAGetInfo(da, 0, &Mx, 0, 0, 0, 0, 0, &dof, 0, 0, 0, 0, 0));
140: PetscCall(DMDAGetCorners(da, &xs, 0, 0, &xm, 0, 0));
141: PetscCall(DMDAVecGetArray(da, U, &u));
142: PetscCall(PetscMalloc1(dof, &uj));
143: hs = (ctx->xmax - ctx->xmin) * 3.0 / 8.0 / ctx->sf;
144: hf = (ctx->xmax - ctx->xmin) / 4.0 / (ctx->fs - ctx->sf);
145: for (i = xs; i < xs + xm; i++) {
146: if (i < ctx->sf) {
147: xi = ctx->xmin + 0.5 * hs + i * hs;
148: /* Integrate over cell i using trapezoid rule with N points. */
149: for (k = 0; k < dof; k++) u[i * dof + k] = 0;
150: for (j = 0; j < N + 1; j++) {
151: xj = xi + hs * (j - N / 2) / (PetscReal)N;
152: PetscCall((*ctx->physics2.sample2)(ctx->physics2.user, ctx->initial, ctx->bctype, ctx->xmin, ctx->xmax, time, xj, uj));
153: for (k = 0; k < dof; k++) u[i * dof + k] += ((j == 0 || j == N) ? 0.5 : 1.0) * uj[k] / N;
154: }
155: } else if (i < ctx->fs) {
156: xi = ctx->xmin + ctx->sf * hs + 0.5 * hf + (i - ctx->sf) * hf;
157: /* Integrate over cell i using trapezoid rule with N points. */
158: for (k = 0; k < dof; k++) u[i * dof + k] = 0;
159: for (j = 0; j < N + 1; j++) {
160: xj = xi + hf * (j - N / 2) / (PetscReal)N;
161: PetscCall((*ctx->physics2.sample2)(ctx->physics2.user, ctx->initial, ctx->bctype, ctx->xmin, ctx->xmax, time, xj, uj));
162: for (k = 0; k < dof; k++) u[i * dof + k] += ((j == 0 || j == N) ? 0.5 : 1.0) * uj[k] / N;
163: }
164: } else {
165: xi = ctx->xmin + ctx->sf * hs + (ctx->fs - ctx->sf) * hf + 0.5 * hs + (i - ctx->fs) * hs;
166: /* Integrate over cell i using trapezoid rule with N points. */
167: for (k = 0; k < dof; k++) u[i * dof + k] = 0;
168: for (j = 0; j < N + 1; j++) {
169: xj = xi + hs * (j - N / 2) / (PetscReal)N;
170: PetscCall((*ctx->physics2.sample2)(ctx->physics2.user, ctx->initial, ctx->bctype, ctx->xmin, ctx->xmax, time, xj, uj));
171: for (k = 0; k < dof; k++) u[i * dof + k] += ((j == 0 || j == N) ? 0.5 : 1.0) * uj[k] / N;
172: }
173: }
174: }
175: PetscCall(DMDAVecRestoreArray(da, U, &u));
176: PetscCall(PetscFree(uj));
177: PetscFunctionReturn(PETSC_SUCCESS);
178: }
180: static PetscErrorCode SolutionErrorNorms_2WaySplit(FVCtx *ctx, DM da, PetscReal t, Vec X, PetscReal *nrm1)
181: {
182: Vec Y;
183: PetscInt i, Mx;
184: const PetscScalar *ptr_X, *ptr_Y;
185: PetscReal hs, hf;
187: PetscFunctionBeginUser;
188: PetscCall(VecGetSize(X, &Mx));
189: PetscCall(VecDuplicate(X, &Y));
190: PetscCall(FVSample_2WaySplit(ctx, da, t, Y));
191: hs = (ctx->xmax - ctx->xmin) * 3.0 / 8.0 / ctx->sf;
192: hf = (ctx->xmax - ctx->xmin) / 4.0 / (ctx->fs - ctx->sf);
193: PetscCall(VecGetArrayRead(X, &ptr_X));
194: PetscCall(VecGetArrayRead(Y, &ptr_Y));
195: for (i = 0; i < Mx; i++) {
196: if (i < ctx->sf || i > ctx->fs - 1) *nrm1 += hs * PetscAbs(ptr_X[i] - ptr_Y[i]);
197: else *nrm1 += hf * PetscAbs(ptr_X[i] - ptr_Y[i]);
198: }
199: PetscCall(VecRestoreArrayRead(X, &ptr_X));
200: PetscCall(VecRestoreArrayRead(Y, &ptr_Y));
201: PetscCall(VecDestroy(&Y));
202: PetscFunctionReturn(PETSC_SUCCESS);
203: }
205: PetscErrorCode FVRHSFunction_2WaySplit(TS ts, PetscReal time, Vec X, Vec F, void *vctx)
206: {
207: FVCtx *ctx = (FVCtx *)vctx;
208: PetscInt i, j, k, Mx, dof, xs, xm, sf = ctx->sf, fs = ctx->fs;
209: PetscReal hxf, hxs, cfl_idt = 0;
210: PetscScalar *x, *f, *slope;
211: Vec Xloc;
212: DM da;
214: PetscFunctionBeginUser;
215: PetscCall(TSGetDM(ts, &da));
216: PetscCall(DMGetLocalVector(da, &Xloc)); /* Xloc contains ghost points */
217: PetscCall(DMDAGetInfo(da, 0, &Mx, 0, 0, 0, 0, 0, &dof, 0, 0, 0, 0, 0)); /* Mx is the number of center points */
218: hxs = (ctx->xmax - ctx->xmin) * 3.0 / 8.0 / ctx->sf;
219: hxf = (ctx->xmax - ctx->xmin) / 4.0 / (ctx->fs - ctx->sf);
220: PetscCall(DMGlobalToLocalBegin(da, X, INSERT_VALUES, Xloc)); /* X is solution vector which does not contain ghost points */
221: PetscCall(DMGlobalToLocalEnd(da, X, INSERT_VALUES, Xloc));
223: PetscCall(VecZeroEntries(F)); /* F is the right-hand side function corresponds to center points */
225: PetscCall(DMDAVecGetArray(da, Xloc, &x));
226: PetscCall(DMDAVecGetArray(da, F, &f));
227: PetscCall(DMDAGetArray(da, PETSC_TRUE, &slope)); /* contains ghost points */
229: PetscCall(DMDAGetCorners(da, &xs, 0, 0, &xm, 0, 0));
231: if (ctx->bctype == FVBC_OUTFLOW) {
232: for (i = xs - 2; i < 0; i++) {
233: for (j = 0; j < dof; j++) x[i * dof + j] = x[j];
234: }
235: for (i = Mx; i < xs + xm + 2; i++) {
236: for (j = 0; j < dof; j++) x[i * dof + j] = x[(xs + xm - 1) * dof + j];
237: }
238: }
239: for (i = xs - 1; i < xs + xm + 1; i++) {
240: struct _LimitInfo info;
241: PetscScalar *cjmpL, *cjmpR;
242: /* Determine the right eigenvectors R, where A = R \Lambda R^{-1} */
243: PetscCall((*ctx->physics2.characteristic2)(ctx->physics2.user, dof, &x[i * dof], ctx->R, ctx->Rinv, ctx->speeds));
244: /* Evaluate jumps across interfaces (i-1, i) and (i, i+1), put in characteristic basis */
245: PetscCall(PetscArrayzero(ctx->cjmpLR, 2 * dof));
246: cjmpL = &ctx->cjmpLR[0];
247: cjmpR = &ctx->cjmpLR[dof];
248: for (j = 0; j < dof; j++) {
249: PetscScalar jmpL, jmpR;
250: jmpL = x[(i + 0) * dof + j] - x[(i - 1) * dof + j];
251: jmpR = x[(i + 1) * dof + j] - x[(i + 0) * dof + j];
252: for (k = 0; k < dof; k++) {
253: cjmpL[k] += ctx->Rinv[k + j * dof] * jmpL;
254: cjmpR[k] += ctx->Rinv[k + j * dof] * jmpR;
255: }
256: }
257: /* Apply limiter to the left and right characteristic jumps */
258: info.m = dof;
259: info.hxs = hxs;
260: info.hxf = hxf;
261: (*ctx->limit2)(&info, cjmpL, cjmpR, ctx->sf, ctx->fs, i, ctx->cslope);
262: for (j = 0; j < dof; j++) {
263: PetscScalar tmp = 0;
264: for (k = 0; k < dof; k++) tmp += ctx->R[j + k * dof] * ctx->cslope[k];
265: slope[i * dof + j] = tmp;
266: }
267: }
269: for (i = xs; i < xs + xm + 1; i++) {
270: PetscReal maxspeed;
271: PetscScalar *uL, *uR;
272: uL = &ctx->uLR[0];
273: uR = &ctx->uLR[dof];
274: if (i < sf) { /* slow region */
275: for (j = 0; j < dof; j++) {
276: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
277: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
278: }
279: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
280: if (i > xs) {
281: for (j = 0; j < dof; j++) f[(i - 1) * dof + j] -= ctx->flux[j] / hxs;
282: }
283: if (i < xs + xm) {
284: for (j = 0; j < dof; j++) f[i * dof + j] += ctx->flux[j] / hxs;
285: }
286: } else if (i == sf) { /* interface between the slow region and the fast region */
287: for (j = 0; j < dof; j++) {
288: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
289: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxf / 2;
290: }
291: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
292: if (i > xs) {
293: for (j = 0; j < dof; j++) f[(i - 1) * dof + j] -= ctx->flux[j] / hxs;
294: }
295: if (i < xs + xm) {
296: for (j = 0; j < dof; j++) f[i * dof + j] += ctx->flux[j] / hxf;
297: }
298: } else if (i > sf && i < fs) { /* fast region */
299: for (j = 0; j < dof; j++) {
300: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxf / 2;
301: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxf / 2;
302: }
303: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
304: if (i > xs) {
305: for (j = 0; j < dof; j++) f[(i - 1) * dof + j] -= ctx->flux[j] / hxf;
306: }
307: if (i < xs + xm) {
308: for (j = 0; j < dof; j++) f[i * dof + j] += ctx->flux[j] / hxf;
309: }
310: } else if (i == fs) { /* interface between the fast region and the slow region */
311: for (j = 0; j < dof; j++) {
312: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxf / 2;
313: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
314: }
315: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
316: if (i > xs) {
317: for (j = 0; j < dof; j++) f[(i - 1) * dof + j] -= ctx->flux[j] / hxf;
318: }
319: if (i < xs + xm) {
320: for (j = 0; j < dof; j++) f[i * dof + j] += ctx->flux[j] / hxs;
321: }
322: } else { /* slow region */
323: for (j = 0; j < dof; j++) {
324: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
325: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
326: }
327: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
328: cfl_idt = PetscMax(cfl_idt, PetscAbsScalar(maxspeed / hxs)); /* Max allowable value of 1/Delta t */
329: if (i > xs) {
330: for (j = 0; j < dof; j++) f[(i - 1) * dof + j] -= ctx->flux[j] / hxs;
331: }
332: if (i < xs + xm) {
333: for (j = 0; j < dof; j++) f[i * dof + j] += ctx->flux[j] / hxs;
334: }
335: }
336: }
337: PetscCall(DMDAVecRestoreArray(da, Xloc, &x));
338: PetscCall(DMDAVecRestoreArray(da, F, &f));
339: PetscCall(DMDARestoreArray(da, PETSC_TRUE, &slope));
340: PetscCall(DMRestoreLocalVector(da, &Xloc));
341: PetscCall(MPIU_Allreduce(&cfl_idt, &ctx->cfl_idt, 1, MPIU_SCALAR, MPIU_MAX, PetscObjectComm((PetscObject)da)));
342: if (0) {
343: /* We need to a way to inform the TS of a CFL constraint, this is a debugging fragment */
344: PetscReal dt, tnow;
345: PetscCall(TSGetTimeStep(ts, &dt));
346: PetscCall(TSGetTime(ts, &tnow));
347: if (dt > 0.5 / ctx->cfl_idt) {
348: if (1) {
349: PetscCall(PetscPrintf(ctx->comm, "Stability constraint exceeded at t=%g, dt %g > %g\n", (double)tnow, (double)dt, (double)(0.5 / ctx->cfl_idt)));
350: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Stability constraint exceeded, %g > %g", (double)dt, (double)(ctx->cfl / ctx->cfl_idt));
351: }
352: }
353: PetscFunctionReturn(PETSC_SUCCESS);
354: }
356: /* --------------------------------- Finite Volume Solver for slow components ----------------------------------- */
357: PetscErrorCode FVRHSFunctionslow_2WaySplit(TS ts, PetscReal time, Vec X, Vec F, void *vctx)
358: {
359: FVCtx *ctx = (FVCtx *)vctx;
360: PetscInt i, j, k, Mx, dof, xs, xm, islow = 0, sf = ctx->sf, fs = ctx->fs, lsbwidth = ctx->lsbwidth, rsbwidth = ctx->rsbwidth;
361: PetscReal hxs, hxf, cfl_idt = 0;
362: PetscScalar *x, *f, *slope;
363: Vec Xloc;
364: DM da;
366: PetscFunctionBeginUser;
367: PetscCall(TSGetDM(ts, &da));
368: PetscCall(DMGetLocalVector(da, &Xloc));
369: PetscCall(DMDAGetInfo(da, 0, &Mx, 0, 0, 0, 0, 0, &dof, 0, 0, 0, 0, 0));
370: hxs = (ctx->xmax - ctx->xmin) * 3.0 / 8.0 / ctx->sf;
371: hxf = (ctx->xmax - ctx->xmin) / 4.0 / (ctx->fs - ctx->sf);
372: PetscCall(DMGlobalToLocalBegin(da, X, INSERT_VALUES, Xloc));
373: PetscCall(DMGlobalToLocalEnd(da, X, INSERT_VALUES, Xloc));
374: PetscCall(VecZeroEntries(F));
375: PetscCall(DMDAVecGetArray(da, Xloc, &x));
376: PetscCall(VecGetArray(F, &f));
377: PetscCall(DMDAGetArray(da, PETSC_TRUE, &slope));
378: PetscCall(DMDAGetCorners(da, &xs, 0, 0, &xm, 0, 0));
380: if (ctx->bctype == FVBC_OUTFLOW) {
381: for (i = xs - 2; i < 0; i++) {
382: for (j = 0; j < dof; j++) x[i * dof + j] = x[j];
383: }
384: for (i = Mx; i < xs + xm + 2; i++) {
385: for (j = 0; j < dof; j++) x[i * dof + j] = x[(xs + xm - 1) * dof + j];
386: }
387: }
388: for (i = xs - 1; i < xs + xm + 1; i++) {
389: struct _LimitInfo info;
390: PetscScalar *cjmpL, *cjmpR;
391: if (i < sf - lsbwidth + 1 || i > fs + rsbwidth - 2) { /* slow components and the first and last fast components */
392: /* Determine the right eigenvectors R, where A = R \Lambda R^{-1} */
393: PetscCall((*ctx->physics2.characteristic2)(ctx->physics2.user, dof, &x[i * dof], ctx->R, ctx->Rinv, ctx->speeds));
394: /* Evaluate jumps across interfaces (i-1, i) and (i, i+1), put in characteristic basis */
395: PetscCall(PetscArrayzero(ctx->cjmpLR, 2 * dof));
396: cjmpL = &ctx->cjmpLR[0];
397: cjmpR = &ctx->cjmpLR[dof];
398: for (j = 0; j < dof; j++) {
399: PetscScalar jmpL, jmpR;
400: jmpL = x[(i + 0) * dof + j] - x[(i - 1) * dof + j];
401: jmpR = x[(i + 1) * dof + j] - x[(i + 0) * dof + j];
402: for (k = 0; k < dof; k++) {
403: cjmpL[k] += ctx->Rinv[k + j * dof] * jmpL;
404: cjmpR[k] += ctx->Rinv[k + j * dof] * jmpR;
405: }
406: }
407: /* Apply limiter to the left and right characteristic jumps */
408: info.m = dof;
409: info.hxs = hxs;
410: info.hxf = hxf;
411: (*ctx->limit2)(&info, cjmpL, cjmpR, ctx->sf, ctx->fs, i, ctx->cslope);
412: for (j = 0; j < dof; j++) {
413: PetscScalar tmp = 0;
414: for (k = 0; k < dof; k++) tmp += ctx->R[j + k * dof] * ctx->cslope[k];
415: slope[i * dof + j] = tmp;
416: }
417: }
418: }
420: for (i = xs; i < xs + xm + 1; i++) {
421: PetscReal maxspeed;
422: PetscScalar *uL, *uR;
423: uL = &ctx->uLR[0];
424: uR = &ctx->uLR[dof];
425: if (i < sf - lsbwidth) { /* slow region */
426: for (j = 0; j < dof; j++) {
427: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
428: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
429: }
430: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
431: cfl_idt = PetscMax(cfl_idt, PetscAbsScalar(maxspeed / hxs)); /* Max allowable value of 1/Delta t */
432: if (i > xs) {
433: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
434: }
435: if (i < xs + xm) {
436: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
437: islow++;
438: }
439: }
440: if (i == sf - lsbwidth) { /* interface between the slow region and the fast region */
441: for (j = 0; j < dof; j++) {
442: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
443: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
444: }
445: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
446: if (i > xs) {
447: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
448: }
449: }
450: if (i == fs + rsbwidth) { /* slow region */
451: for (j = 0; j < dof; j++) {
452: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
453: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
454: }
455: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
456: if (i < xs + xm) {
457: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
458: islow++;
459: }
460: }
461: if (i > fs + rsbwidth) { /* slow region */
462: for (j = 0; j < dof; j++) {
463: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
464: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
465: }
466: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
467: if (i > xs) {
468: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
469: }
470: if (i < xs + xm) {
471: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
472: islow++;
473: }
474: }
475: }
476: PetscCall(DMDAVecRestoreArray(da, Xloc, &x));
477: PetscCall(VecRestoreArray(F, &f));
478: PetscCall(DMDARestoreArray(da, PETSC_TRUE, &slope));
479: PetscCall(DMRestoreLocalVector(da, &Xloc));
480: PetscCall(MPIU_Allreduce(&cfl_idt, &ctx->cfl_idt, 1, MPIU_SCALAR, MPIU_MAX, PetscObjectComm((PetscObject)da)));
481: PetscFunctionReturn(PETSC_SUCCESS);
482: }
484: PetscErrorCode FVRHSFunctionslowbuffer_2WaySplit(TS ts, PetscReal time, Vec X, Vec F, void *vctx)
485: {
486: FVCtx *ctx = (FVCtx *)vctx;
487: PetscInt i, j, k, Mx, dof, xs, xm, islow = 0, sf = ctx->sf, fs = ctx->fs, lsbwidth = ctx->lsbwidth, rsbwidth = ctx->rsbwidth;
488: PetscReal hxs, hxf;
489: PetscScalar *x, *f, *slope;
490: Vec Xloc;
491: DM da;
493: PetscFunctionBeginUser;
494: PetscCall(TSGetDM(ts, &da));
495: PetscCall(DMGetLocalVector(da, &Xloc));
496: PetscCall(DMDAGetInfo(da, 0, &Mx, 0, 0, 0, 0, 0, &dof, 0, 0, 0, 0, 0));
497: hxs = (ctx->xmax - ctx->xmin) * 3.0 / 8.0 / ctx->sf;
498: hxf = (ctx->xmax - ctx->xmin) / 4.0 / (ctx->fs - ctx->sf);
499: PetscCall(DMGlobalToLocalBegin(da, X, INSERT_VALUES, Xloc));
500: PetscCall(DMGlobalToLocalEnd(da, X, INSERT_VALUES, Xloc));
501: PetscCall(VecZeroEntries(F));
502: PetscCall(DMDAVecGetArray(da, Xloc, &x));
503: PetscCall(VecGetArray(F, &f));
504: PetscCall(DMDAGetArray(da, PETSC_TRUE, &slope));
505: PetscCall(DMDAGetCorners(da, &xs, 0, 0, &xm, 0, 0));
507: if (ctx->bctype == FVBC_OUTFLOW) {
508: for (i = xs - 2; i < 0; i++) {
509: for (j = 0; j < dof; j++) x[i * dof + j] = x[j];
510: }
511: for (i = Mx; i < xs + xm + 2; i++) {
512: for (j = 0; j < dof; j++) x[i * dof + j] = x[(xs + xm - 1) * dof + j];
513: }
514: }
515: for (i = xs - 1; i < xs + xm + 1; i++) {
516: struct _LimitInfo info;
517: PetscScalar *cjmpL, *cjmpR;
518: if ((i > sf - lsbwidth - 2 && i < sf + 1) || (i > fs - 2 && i < fs + rsbwidth + 1)) {
519: /* Determine the right eigenvectors R, where A = R \Lambda R^{-1} */
520: PetscCall((*ctx->physics2.characteristic2)(ctx->physics2.user, dof, &x[i * dof], ctx->R, ctx->Rinv, ctx->speeds));
521: /* Evaluate jumps across interfaces (i-1, i) and (i, i+1), put in characteristic basis */
522: PetscCall(PetscArrayzero(ctx->cjmpLR, 2 * dof));
523: cjmpL = &ctx->cjmpLR[0];
524: cjmpR = &ctx->cjmpLR[dof];
525: for (j = 0; j < dof; j++) {
526: PetscScalar jmpL, jmpR;
527: jmpL = x[(i + 0) * dof + j] - x[(i - 1) * dof + j];
528: jmpR = x[(i + 1) * dof + j] - x[(i + 0) * dof + j];
529: for (k = 0; k < dof; k++) {
530: cjmpL[k] += ctx->Rinv[k + j * dof] * jmpL;
531: cjmpR[k] += ctx->Rinv[k + j * dof] * jmpR;
532: }
533: }
534: /* Apply limiter to the left and right characteristic jumps */
535: info.m = dof;
536: info.hxs = hxs;
537: info.hxf = hxf;
538: (*ctx->limit2)(&info, cjmpL, cjmpR, ctx->sf, ctx->fs, i, ctx->cslope);
539: for (j = 0; j < dof; j++) {
540: PetscScalar tmp = 0;
541: for (k = 0; k < dof; k++) tmp += ctx->R[j + k * dof] * ctx->cslope[k];
542: slope[i * dof + j] = tmp;
543: }
544: }
545: }
547: for (i = xs; i < xs + xm + 1; i++) {
548: PetscReal maxspeed;
549: PetscScalar *uL, *uR;
550: uL = &ctx->uLR[0];
551: uR = &ctx->uLR[dof];
552: if (i == sf - lsbwidth) {
553: for (j = 0; j < dof; j++) {
554: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
555: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
556: }
557: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
558: if (i < xs + xm) {
559: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
560: islow++;
561: }
562: }
563: if (i > sf - lsbwidth && i < sf) {
564: for (j = 0; j < dof; j++) {
565: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
566: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
567: }
568: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
569: if (i > xs) {
570: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
571: }
572: if (i < xs + xm) {
573: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
574: islow++;
575: }
576: }
577: if (i == sf) { /* interface between the slow region and the fast region */
578: for (j = 0; j < dof; j++) {
579: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
580: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxf / 2;
581: }
582: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
583: if (i > xs) {
584: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
585: }
586: }
587: if (i == fs) { /* interface between the fast region and the slow region */
588: for (j = 0; j < dof; j++) {
589: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxf / 2;
590: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
591: }
592: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
593: if (i < xs + xm) {
594: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
595: islow++;
596: }
597: }
598: if (i > fs && i < fs + rsbwidth) {
599: for (j = 0; j < dof; j++) {
600: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
601: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
602: }
603: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
604: if (i > xs) {
605: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
606: }
607: if (i < xs + xm) {
608: for (j = 0; j < dof; j++) f[islow * dof + j] += ctx->flux[j] / hxs;
609: islow++;
610: }
611: }
612: if (i == fs + rsbwidth) {
613: for (j = 0; j < dof; j++) {
614: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
615: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
616: }
617: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
618: if (i > xs) {
619: for (j = 0; j < dof; j++) f[(islow - 1) * dof + j] -= ctx->flux[j] / hxs;
620: }
621: }
622: }
623: PetscCall(DMDAVecRestoreArray(da, Xloc, &x));
624: PetscCall(VecRestoreArray(F, &f));
625: PetscCall(DMDARestoreArray(da, PETSC_TRUE, &slope));
626: PetscCall(DMRestoreLocalVector(da, &Xloc));
627: PetscFunctionReturn(PETSC_SUCCESS);
628: }
630: /* --------------------------------- Finite Volume Solver for fast parts ----------------------------------- */
631: PetscErrorCode FVRHSFunctionfast_2WaySplit(TS ts, PetscReal time, Vec X, Vec F, void *vctx)
632: {
633: FVCtx *ctx = (FVCtx *)vctx;
634: PetscInt i, j, k, Mx, dof, xs, xm, ifast = 0, sf = ctx->sf, fs = ctx->fs;
635: PetscReal hxs, hxf;
636: PetscScalar *x, *f, *slope;
637: Vec Xloc;
638: DM da;
640: PetscFunctionBeginUser;
641: PetscCall(TSGetDM(ts, &da));
642: PetscCall(DMGetLocalVector(da, &Xloc));
643: PetscCall(DMDAGetInfo(da, 0, &Mx, 0, 0, 0, 0, 0, &dof, 0, 0, 0, 0, 0));
644: hxs = (ctx->xmax - ctx->xmin) * 3.0 / 8.0 / ctx->sf;
645: hxf = (ctx->xmax - ctx->xmin) / 4.0 / (ctx->fs - ctx->sf);
646: PetscCall(DMGlobalToLocalBegin(da, X, INSERT_VALUES, Xloc));
647: PetscCall(DMGlobalToLocalEnd(da, X, INSERT_VALUES, Xloc));
648: PetscCall(VecZeroEntries(F));
649: PetscCall(DMDAVecGetArray(da, Xloc, &x));
650: PetscCall(VecGetArray(F, &f));
651: PetscCall(DMDAGetArray(da, PETSC_TRUE, &slope));
652: PetscCall(DMDAGetCorners(da, &xs, 0, 0, &xm, 0, 0));
654: if (ctx->bctype == FVBC_OUTFLOW) {
655: for (i = xs - 2; i < 0; i++) {
656: for (j = 0; j < dof; j++) x[i * dof + j] = x[j];
657: }
658: for (i = Mx; i < xs + xm + 2; i++) {
659: for (j = 0; j < dof; j++) x[i * dof + j] = x[(xs + xm - 1) * dof + j];
660: }
661: }
662: for (i = xs - 1; i < xs + xm + 1; i++) {
663: struct _LimitInfo info;
664: PetscScalar *cjmpL, *cjmpR;
665: if (i > sf - 2 && i < fs + 1) {
666: PetscCall((*ctx->physics2.characteristic2)(ctx->physics2.user, dof, &x[i * dof], ctx->R, ctx->Rinv, ctx->speeds));
667: PetscCall(PetscArrayzero(ctx->cjmpLR, 2 * dof));
668: cjmpL = &ctx->cjmpLR[0];
669: cjmpR = &ctx->cjmpLR[dof];
670: for (j = 0; j < dof; j++) {
671: PetscScalar jmpL, jmpR;
672: jmpL = x[(i + 0) * dof + j] - x[(i - 1) * dof + j];
673: jmpR = x[(i + 1) * dof + j] - x[(i + 0) * dof + j];
674: for (k = 0; k < dof; k++) {
675: cjmpL[k] += ctx->Rinv[k + j * dof] * jmpL;
676: cjmpR[k] += ctx->Rinv[k + j * dof] * jmpR;
677: }
678: }
679: /* Apply limiter to the left and right characteristic jumps */
680: info.m = dof;
681: info.hxs = hxs;
682: info.hxf = hxf;
683: (*ctx->limit2)(&info, cjmpL, cjmpR, ctx->sf, ctx->fs, i, ctx->cslope);
684: for (j = 0; j < dof; j++) {
685: PetscScalar tmp = 0;
686: for (k = 0; k < dof; k++) tmp += ctx->R[j + k * dof] * ctx->cslope[k];
687: slope[i * dof + j] = tmp;
688: }
689: }
690: }
692: for (i = xs; i < xs + xm + 1; i++) {
693: PetscReal maxspeed;
694: PetscScalar *uL, *uR;
695: uL = &ctx->uLR[0];
696: uR = &ctx->uLR[dof];
697: if (i == sf) { /* interface between the slow region and the fast region */
698: for (j = 0; j < dof; j++) {
699: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxs / 2;
700: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxf / 2;
701: }
702: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
703: if (i < xs + xm) {
704: for (j = 0; j < dof; j++) f[ifast * dof + j] += ctx->flux[j] / hxf;
705: ifast++;
706: }
707: }
708: if (i > sf && i < fs) { /* fast region */
709: for (j = 0; j < dof; j++) {
710: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxf / 2;
711: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxf / 2;
712: }
713: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
714: if (i > xs) {
715: for (j = 0; j < dof; j++) f[(ifast - 1) * dof + j] -= ctx->flux[j] / hxf;
716: }
717: if (i < xs + xm) {
718: for (j = 0; j < dof; j++) f[ifast * dof + j] += ctx->flux[j] / hxf;
719: ifast++;
720: }
721: }
722: if (i == fs) { /* interface between the fast region and the slow region */
723: for (j = 0; j < dof; j++) {
724: uL[j] = x[(i - 1) * dof + j] + slope[(i - 1) * dof + j] * hxf / 2;
725: uR[j] = x[(i - 0) * dof + j] - slope[(i - 0) * dof + j] * hxs / 2;
726: }
727: PetscCall((*ctx->physics2.riemann2)(ctx->physics2.user, dof, uL, uR, ctx->flux, &maxspeed));
728: if (i > xs) {
729: for (j = 0; j < dof; j++) f[(ifast - 1) * dof + j] -= ctx->flux[j] / hxf;
730: }
731: }
732: }
733: PetscCall(DMDAVecRestoreArray(da, Xloc, &x));
734: PetscCall(VecRestoreArray(F, &f));
735: PetscCall(DMDARestoreArray(da, PETSC_TRUE, &slope));
736: PetscCall(DMRestoreLocalVector(da, &Xloc));
737: PetscFunctionReturn(PETSC_SUCCESS);
738: }
740: int main(int argc, char *argv[])
741: {
742: char lname[256] = "mc", physname[256] = "advect", final_fname[256] = "solution.m";
743: PetscFunctionList limiters = 0, physics = 0;
744: MPI_Comm comm;
745: TS ts;
746: DM da;
747: Vec X, X0, R;
748: FVCtx ctx;
749: PetscInt i, k, dof, xs, xm, Mx, draw = 0, count_slow, count_fast, islow = 0, ifast = 0, islowbuffer = 0, *index_slow, *index_fast, *index_slowbuffer;
750: PetscBool view_final = PETSC_FALSE;
751: PetscReal ptime;
753: PetscFunctionBeginUser;
754: PetscCall(PetscInitialize(&argc, &argv, 0, help));
755: comm = PETSC_COMM_WORLD;
756: PetscCall(PetscMemzero(&ctx, sizeof(ctx)));
758: /* Register limiters to be available on the command line */
759: PetscCall(PetscFunctionListAdd(&limiters, "upwind", Limit2_Upwind));
760: PetscCall(PetscFunctionListAdd(&limiters, "lax-wendroff", Limit2_LaxWendroff));
761: PetscCall(PetscFunctionListAdd(&limiters, "beam-warming", Limit2_BeamWarming));
762: PetscCall(PetscFunctionListAdd(&limiters, "fromm", Limit2_Fromm));
763: PetscCall(PetscFunctionListAdd(&limiters, "minmod", Limit2_Minmod));
764: PetscCall(PetscFunctionListAdd(&limiters, "superbee", Limit2_Superbee));
765: PetscCall(PetscFunctionListAdd(&limiters, "mc", Limit2_MC));
766: PetscCall(PetscFunctionListAdd(&limiters, "koren3", Limit2_Koren3));
768: /* Register physical models to be available on the command line */
769: PetscCall(PetscFunctionListAdd(&physics, "advect", PhysicsCreate_Advect));
771: ctx.comm = comm;
772: ctx.cfl = 0.9;
773: ctx.bctype = FVBC_PERIODIC;
774: ctx.xmin = -1.0;
775: ctx.xmax = 1.0;
776: PetscOptionsBegin(comm, NULL, "Finite Volume solver options", "");
777: PetscCall(PetscOptionsReal("-xmin", "X min", "", ctx.xmin, &ctx.xmin, NULL));
778: PetscCall(PetscOptionsReal("-xmax", "X max", "", ctx.xmax, &ctx.xmax, NULL));
779: PetscCall(PetscOptionsFList("-limit", "Name of flux imiter to use", "", limiters, lname, lname, sizeof(lname), NULL));
780: PetscCall(PetscOptionsInt("-draw", "Draw solution vector, bitwise OR of (1=initial,2=final,4=final error)", "", draw, &draw, NULL));
781: PetscCall(PetscOptionsString("-view_final", "Write final solution in ASCII MATLAB format to given file name", "", final_fname, final_fname, sizeof(final_fname), &view_final));
782: PetscCall(PetscOptionsInt("-initial", "Initial condition (depends on the physics)", "", ctx.initial, &ctx.initial, NULL));
783: PetscCall(PetscOptionsBool("-exact", "Compare errors with exact solution", "", ctx.exact, &ctx.exact, NULL));
784: PetscCall(PetscOptionsBool("-simulation", "Compare errors with reference solution", "", ctx.simulation, &ctx.simulation, NULL));
785: PetscCall(PetscOptionsReal("-cfl", "CFL number to time step at", "", ctx.cfl, &ctx.cfl, NULL));
786: PetscCall(PetscOptionsEnum("-bc_type", "Boundary condition", "", FVBCTypes, (PetscEnum)ctx.bctype, (PetscEnum *)&ctx.bctype, NULL));
787: PetscCall(PetscOptionsInt("-hratio", "Spacing ratio", "", ctx.hratio, &ctx.hratio, NULL));
788: PetscOptionsEnd();
790: /* Choose the limiter from the list of registered limiters */
791: PetscCall(PetscFunctionListFind(limiters, lname, &ctx.limit2));
792: PetscCheck(ctx.limit2, PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Limiter '%s' not found", lname);
794: /* Choose the physics from the list of registered models */
795: {
796: PetscErrorCode (*r)(FVCtx *);
797: PetscCall(PetscFunctionListFind(physics, physname, &r));
798: PetscCheck(r, PETSC_COMM_SELF, PETSC_ERR_ARG_UNKNOWN_TYPE, "Physics '%s' not found", physname);
799: /* Create the physics, will set the number of fields and their names */
800: PetscCall((*r)(&ctx));
801: }
803: /* Create a DMDA to manage the parallel grid */
804: PetscCall(DMDACreate1d(comm, DM_BOUNDARY_PERIODIC, 50, ctx.physics2.dof, 2, NULL, &da));
805: PetscCall(DMSetFromOptions(da));
806: PetscCall(DMSetUp(da));
807: /* Inform the DMDA of the field names provided by the physics. */
808: /* The names will be shown in the title bars when run with -ts_monitor_draw_solution */
809: for (i = 0; i < ctx.physics2.dof; i++) PetscCall(DMDASetFieldName(da, i, ctx.physics2.fieldname[i]));
810: PetscCall(DMDAGetInfo(da, 0, &Mx, 0, 0, 0, 0, 0, &dof, 0, 0, 0, 0, 0));
811: PetscCall(DMDAGetCorners(da, &xs, 0, 0, &xm, 0, 0));
813: /* Set coordinates of cell centers */
814: PetscCall(DMDASetUniformCoordinates(da, ctx.xmin + 0.5 * (ctx.xmax - ctx.xmin) / Mx, ctx.xmax + 0.5 * (ctx.xmax - ctx.xmin) / Mx, 0, 0, 0, 0));
816: /* Allocate work space for the Finite Volume solver (so it doesn't have to be reallocated on each function evaluation) */
817: PetscCall(PetscMalloc4(dof * dof, &ctx.R, dof * dof, &ctx.Rinv, 2 * dof, &ctx.cjmpLR, 1 * dof, &ctx.cslope));
818: PetscCall(PetscMalloc3(2 * dof, &ctx.uLR, dof, &ctx.flux, dof, &ctx.speeds));
820: /* Create a vector to store the solution and to save the initial state */
821: PetscCall(DMCreateGlobalVector(da, &X));
822: PetscCall(VecDuplicate(X, &X0));
823: PetscCall(VecDuplicate(X, &R));
825: /* create index for slow parts and fast parts,
826: count_slow + count_fast = Mx, counts_slow*hs = 0.5, counts_fast*hf = 0.5 */
827: count_slow = Mx / (1.0 + ctx.hratio / 3.0);
828: PetscCheck(count_slow % 2 == 0, PETSC_COMM_WORLD, PETSC_ERR_USER, "Please adjust grid size Mx (-da_grid_x) and hratio (-hratio) so that Mx/(1+hartio/3) is even");
829: count_fast = Mx - count_slow;
830: ctx.sf = count_slow / 2;
831: ctx.fs = ctx.sf + count_fast;
832: PetscCall(PetscMalloc1(xm * dof, &index_slow));
833: PetscCall(PetscMalloc1(xm * dof, &index_fast));
834: PetscCall(PetscMalloc1(6 * dof, &index_slowbuffer));
835: if (((AdvectCtx *)ctx.physics2.user)->a > 0) {
836: ctx.lsbwidth = 2;
837: ctx.rsbwidth = 4;
838: } else {
839: ctx.lsbwidth = 4;
840: ctx.rsbwidth = 2;
841: }
842: for (i = xs; i < xs + xm; i++) {
843: if (i < ctx.sf - ctx.lsbwidth || i > ctx.fs + ctx.rsbwidth - 1)
844: for (k = 0; k < dof; k++) index_slow[islow++] = i * dof + k;
845: else if ((i >= ctx.sf - ctx.lsbwidth && i < ctx.sf) || (i > ctx.fs - 1 && i <= ctx.fs + ctx.rsbwidth - 1))
846: for (k = 0; k < dof; k++) index_slowbuffer[islowbuffer++] = i * dof + k;
847: else
848: for (k = 0; k < dof; k++) index_fast[ifast++] = i * dof + k;
849: }
850: PetscCall(ISCreateGeneral(PETSC_COMM_WORLD, islow, index_slow, PETSC_COPY_VALUES, &ctx.iss));
851: PetscCall(ISCreateGeneral(PETSC_COMM_WORLD, ifast, index_fast, PETSC_COPY_VALUES, &ctx.isf));
852: PetscCall(ISCreateGeneral(PETSC_COMM_WORLD, islowbuffer, index_slowbuffer, PETSC_COPY_VALUES, &ctx.issb));
854: /* Create a time-stepping object */
855: PetscCall(TSCreate(comm, &ts));
856: PetscCall(TSSetDM(ts, da));
857: PetscCall(TSSetRHSFunction(ts, R, FVRHSFunction_2WaySplit, &ctx));
858: PetscCall(TSRHSSplitSetIS(ts, "slow", ctx.iss));
859: PetscCall(TSRHSSplitSetIS(ts, "slowbuffer", ctx.issb));
860: PetscCall(TSRHSSplitSetIS(ts, "fast", ctx.isf));
861: PetscCall(TSRHSSplitSetRHSFunction(ts, "slow", NULL, FVRHSFunctionslow_2WaySplit, &ctx));
862: PetscCall(TSRHSSplitSetRHSFunction(ts, "fast", NULL, FVRHSFunctionfast_2WaySplit, &ctx));
863: PetscCall(TSRHSSplitSetRHSFunction(ts, "slowbuffer", NULL, FVRHSFunctionslowbuffer_2WaySplit, &ctx));
865: PetscCall(TSSetType(ts, TSSSP));
866: /*PetscCall(TSSetType(ts,TSMPRK));*/
867: PetscCall(TSSetMaxTime(ts, 10));
868: PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
870: /* Compute initial conditions and starting time step */
871: PetscCall(FVSample_2WaySplit(&ctx, da, 0, X0));
872: PetscCall(FVRHSFunction_2WaySplit(ts, 0, X0, X, (void *)&ctx)); /* Initial function evaluation, only used to determine max speed */
873: PetscCall(VecCopy(X0, X)); /* The function value was not used so we set X=X0 again */
874: PetscCall(TSSetTimeStep(ts, ctx.cfl / ctx.cfl_idt));
875: PetscCall(TSSetFromOptions(ts)); /* Take runtime options */
876: PetscCall(SolutionStatsView(da, X, PETSC_VIEWER_STDOUT_WORLD));
877: {
878: PetscInt steps;
879: PetscScalar mass_initial, mass_final, mass_difference, mass_differenceg;
880: const PetscScalar *ptr_X, *ptr_X0;
881: const PetscReal hs = (ctx.xmax - ctx.xmin) * 3.0 / 4.0 / count_slow;
882: const PetscReal hf = (ctx.xmax - ctx.xmin) / 4.0 / count_fast;
884: PetscCall(TSSolve(ts, X));
885: PetscCall(TSGetSolveTime(ts, &ptime));
886: PetscCall(TSGetStepNumber(ts, &steps));
887: /* calculate the total mass at initial time and final time */
888: mass_initial = 0.0;
889: mass_final = 0.0;
890: PetscCall(DMDAVecGetArrayRead(da, X0, (void *)&ptr_X0));
891: PetscCall(DMDAVecGetArrayRead(da, X, (void *)&ptr_X));
892: for (i = xs; i < xs + xm; i++) {
893: if (i < ctx.sf || i > ctx.fs - 1) {
894: for (k = 0; k < dof; k++) {
895: mass_initial = mass_initial + hs * ptr_X0[i * dof + k];
896: mass_final = mass_final + hs * ptr_X[i * dof + k];
897: }
898: } else {
899: for (k = 0; k < dof; k++) {
900: mass_initial = mass_initial + hf * ptr_X0[i * dof + k];
901: mass_final = mass_final + hf * ptr_X[i * dof + k];
902: }
903: }
904: }
905: PetscCall(DMDAVecRestoreArrayRead(da, X0, (void *)&ptr_X0));
906: PetscCall(DMDAVecRestoreArrayRead(da, X, (void *)&ptr_X));
907: mass_difference = mass_final - mass_initial;
908: PetscCall(MPIU_Allreduce(&mass_difference, &mass_differenceg, 1, MPIU_SCALAR, MPIU_SUM, comm));
909: PetscCall(PetscPrintf(comm, "Mass difference %g\n", (double)mass_differenceg));
910: PetscCall(PetscPrintf(comm, "Final time %g, steps %" PetscInt_FMT "\n", (double)ptime, steps));
911: PetscCall(PetscPrintf(comm, "Maximum allowable stepsize according to CFL %g\n", (double)(1.0 / ctx.cfl_idt)));
912: if (ctx.exact) {
913: PetscReal nrm1 = 0;
914: PetscCall(SolutionErrorNorms_2WaySplit(&ctx, da, ptime, X, &nrm1));
915: PetscCall(PetscPrintf(comm, "Error ||x-x_e||_1 %g\n", (double)nrm1));
916: }
917: if (ctx.simulation) {
918: PetscReal nrm1 = 0;
919: PetscViewer fd;
920: char filename[PETSC_MAX_PATH_LEN] = "binaryoutput";
921: Vec XR;
922: PetscBool flg;
923: const PetscScalar *ptr_XR;
924: PetscCall(PetscOptionsGetString(NULL, NULL, "-f", filename, sizeof(filename), &flg));
925: PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_USER, "Must indicate binary file with the -f option");
926: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, filename, FILE_MODE_READ, &fd));
927: PetscCall(VecDuplicate(X0, &XR));
928: PetscCall(VecLoad(XR, fd));
929: PetscCall(PetscViewerDestroy(&fd));
930: PetscCall(VecGetArrayRead(X, &ptr_X));
931: PetscCall(VecGetArrayRead(XR, &ptr_XR));
932: for (i = xs; i < xs + xm; i++) {
933: if (i < ctx.sf || i > ctx.fs - 1)
934: for (k = 0; k < dof; k++) nrm1 = nrm1 + hs * PetscAbs(ptr_X[i * dof + k] - ptr_XR[i * dof + k]);
935: else
936: for (k = 0; k < dof; k++) nrm1 = nrm1 + hf * PetscAbs(ptr_X[i * dof + k] - ptr_XR[i * dof + k]);
937: }
938: PetscCall(VecRestoreArrayRead(X, &ptr_X));
939: PetscCall(VecRestoreArrayRead(XR, &ptr_XR));
940: PetscCall(PetscPrintf(comm, "Error ||x-x_e||_1 %g\n", (double)nrm1));
941: PetscCall(VecDestroy(&XR));
942: }
943: }
945: PetscCall(SolutionStatsView(da, X, PETSC_VIEWER_STDOUT_WORLD));
946: if (draw & 0x1) PetscCall(VecView(X0, PETSC_VIEWER_DRAW_WORLD));
947: if (draw & 0x2) PetscCall(VecView(X, PETSC_VIEWER_DRAW_WORLD));
948: if (draw & 0x4) {
949: Vec Y;
950: PetscCall(VecDuplicate(X, &Y));
951: PetscCall(FVSample_2WaySplit(&ctx, da, ptime, Y));
952: PetscCall(VecAYPX(Y, -1, X));
953: PetscCall(VecView(Y, PETSC_VIEWER_DRAW_WORLD));
954: PetscCall(VecDestroy(&Y));
955: }
957: if (view_final) {
958: PetscViewer viewer;
959: PetscCall(PetscViewerASCIIOpen(PETSC_COMM_WORLD, final_fname, &viewer));
960: PetscCall(PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_MATLAB));
961: PetscCall(VecView(X, viewer));
962: PetscCall(PetscViewerPopFormat(viewer));
963: PetscCall(PetscViewerDestroy(&viewer));
964: }
966: /* Clean up */
967: PetscCall((*ctx.physics2.destroy)(ctx.physics2.user));
968: for (i = 0; i < ctx.physics2.dof; i++) PetscCall(PetscFree(ctx.physics2.fieldname[i]));
969: PetscCall(PetscFree4(ctx.R, ctx.Rinv, ctx.cjmpLR, ctx.cslope));
970: PetscCall(PetscFree3(ctx.uLR, ctx.flux, ctx.speeds));
971: PetscCall(VecDestroy(&X));
972: PetscCall(VecDestroy(&X0));
973: PetscCall(VecDestroy(&R));
974: PetscCall(DMDestroy(&da));
975: PetscCall(TSDestroy(&ts));
976: PetscCall(ISDestroy(&ctx.iss));
977: PetscCall(ISDestroy(&ctx.isf));
978: PetscCall(ISDestroy(&ctx.issb));
979: PetscCall(PetscFree(index_slow));
980: PetscCall(PetscFree(index_fast));
981: PetscCall(PetscFree(index_slowbuffer));
982: PetscCall(PetscFunctionListDestroy(&limiters));
983: PetscCall(PetscFunctionListDestroy(&physics));
984: PetscCall(PetscFinalize());
985: return 0;
986: }
988: /*TEST
990: build:
991: requires: !complex
992: depends: finitevolume1d.c
994: test:
995: suffix: 1
996: args: -da_grid_x 60 -initial 7 -xmin -1 -xmax 1 -hratio 2 -limit mc -ts_dt 0.025 -ts_max_steps 24 -ts_type mprk -ts_mprk_type 2a22
998: test:
999: suffix: 2
1000: args: -da_grid_x 60 -initial 7 -xmin -1 -xmax 1 -hratio 2 -limit mc -ts_dt 0.025 -ts_max_steps 24 -ts_type mprk -ts_mprk_type 2a22 -ts_use_splitrhsfunction 0
1001: output_file: output/ex6_1.out
1003: TEST*/