Actual source code: burgers_spectral.c

  1: static char help[] = "Solves a simple data assimilation problem with one dimensional Burger's equation using TSAdjoint\n\n";

  3: /*

  5:     Not yet tested in parallel

  7: */

  9: /* ------------------------------------------------------------------------

 11:    This program uses the one-dimensional Burger's equation
 12:        u_t = mu*u_xx - u u_x,
 13:    on the domain 0 <= x <= 1, with periodic boundary conditions

 15:    to demonstrate solving a data assimilation problem of finding the initial conditions
 16:    to produce a given solution at a fixed time.

 18:    The operators are discretized with the spectral element method

 20:    See the paper PDE-CONSTRAINED OPTIMIZATION WITH SPECTRAL ELEMENTS USING PETSC AND TAO
 21:    by OANA MARIN, EMIL CONSTANTINESCU, AND BARRY SMITH for details on the exact solution
 22:    used

 24:   ------------------------------------------------------------------------- */

 26: #include <petsctao.h>
 27: #include <petscts.h>
 28: #include <petscdt.h>
 29: #include <petscdraw.h>
 30: #include <petscdmda.h>

 32: /*
 33:    User-defined application context - contains data needed by the
 34:    application-provided call-back routines.
 35: */

 37: typedef struct {
 38:   PetscInt   n;       /* number of nodes */
 39:   PetscReal *nodes;   /* GLL nodes */
 40:   PetscReal *weights; /* GLL weights */
 41: } PetscGLL;

 43: typedef struct {
 44:   PetscInt  N;               /* grid points per elements*/
 45:   PetscInt  E;               /* number of elements */
 46:   PetscReal tol_L2, tol_max; /* error norms */
 47:   PetscInt  steps;           /* number of timesteps */
 48:   PetscReal Tend;            /* endtime */
 49:   PetscReal mu;              /* viscosity */
 50:   PetscReal L;               /* total length of domain */
 51:   PetscReal Le;
 52:   PetscReal Tadj;
 53: } PetscParam;

 55: typedef struct {
 56:   Vec obj;  /* desired end state */
 57:   Vec grid; /* total grid */
 58:   Vec grad;
 59:   Vec ic;
 60:   Vec curr_sol;
 61:   Vec true_solution; /* actual initial conditions for the final solution */
 62: } PetscData;

 64: typedef struct {
 65:   Vec      grid;  /* total grid */
 66:   Vec      mass;  /* mass matrix for total integration */
 67:   Mat      stiff; /* stifness matrix */
 68:   Mat      keptstiff;
 69:   Mat      grad;
 70:   PetscGLL gll;
 71: } PetscSEMOperators;

 73: typedef struct {
 74:   DM                da; /* distributed array data structure */
 75:   PetscSEMOperators SEMop;
 76:   PetscParam        param;
 77:   PetscData         dat;
 78:   TS                ts;
 79:   PetscReal         initial_dt;
 80: } AppCtx;

 82: /*
 83:    User-defined routines
 84: */
 85: extern PetscErrorCode FormFunctionGradient(Tao, Vec, PetscReal *, Vec, void *);
 86: extern PetscErrorCode RHSMatrixLaplaciangllDM(TS, PetscReal, Vec, Mat, Mat, void *);
 87: extern PetscErrorCode RHSMatrixAdvectiongllDM(TS, PetscReal, Vec, Mat, Mat, void *);
 88: extern PetscErrorCode InitialConditions(Vec, AppCtx *);
 89: extern PetscErrorCode TrueSolution(Vec, AppCtx *);
 90: extern PetscErrorCode ComputeObjective(PetscReal, Vec, AppCtx *);
 91: extern PetscErrorCode MonitorError(Tao, void *);
 92: extern PetscErrorCode RHSFunction(TS, PetscReal, Vec, Vec, void *);
 93: extern PetscErrorCode RHSJacobian(TS, PetscReal, Vec, Mat, Mat, void *);

 95: int main(int argc, char **argv)
 96: {
 97:   AppCtx       appctx; /* user-defined application context */
 98:   Tao          tao;
 99:   Vec          u; /* approximate solution vector */
100:   PetscInt     i, xs, xm, ind, j, lenglob;
101:   PetscReal    x, *wrk_ptr1, *wrk_ptr2;
102:   MatNullSpace nsp;
103:   PetscMPIInt  size;

105:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
106:      Initialize program and set problem parameters
107:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
108:   PetscFunctionBegin;

110:   PetscFunctionBeginUser;
111:   PetscCall(PetscInitialize(&argc, &argv, (char *)0, help));

113:   /*initialize parameters */
114:   appctx.param.N     = 10;   /* order of the spectral element */
115:   appctx.param.E     = 10;   /* number of elements */
116:   appctx.param.L     = 4.0;  /* length of the domain */
117:   appctx.param.mu    = 0.01; /* diffusion coefficient */
118:   appctx.initial_dt  = 5e-3;
119:   appctx.param.steps = PETSC_MAX_INT;
120:   appctx.param.Tend  = 4;

122:   PetscCall(PetscOptionsGetInt(NULL, NULL, "-N", &appctx.param.N, NULL));
123:   PetscCall(PetscOptionsGetInt(NULL, NULL, "-E", &appctx.param.E, NULL));
124:   PetscCall(PetscOptionsGetReal(NULL, NULL, "-Tend", &appctx.param.Tend, NULL));
125:   PetscCall(PetscOptionsGetReal(NULL, NULL, "-mu", &appctx.param.mu, NULL));
126:   appctx.param.Le = appctx.param.L / appctx.param.E;

128:   PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
129:   PetscCheck((appctx.param.E % size) == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Number of elements must be divisible by number of processes");

131:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
132:      Create GLL data structures
133:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
134:   PetscCall(PetscMalloc2(appctx.param.N, &appctx.SEMop.gll.nodes, appctx.param.N, &appctx.SEMop.gll.weights));
135:   PetscCall(PetscDTGaussLobattoLegendreQuadrature(appctx.param.N, PETSCGAUSSLOBATTOLEGENDRE_VIA_LINEAR_ALGEBRA, appctx.SEMop.gll.nodes, appctx.SEMop.gll.weights));
136:   appctx.SEMop.gll.n = appctx.param.N;
137:   lenglob            = appctx.param.E * (appctx.param.N - 1);

139:   /*
140:      Create distributed array (DMDA) to manage parallel grid and vectors
141:      and to set up the ghost point communication pattern.  There are E*(Nl-1)+1
142:      total grid values spread equally among all the processors, except first and last
143:   */

145:   PetscCall(DMDACreate1d(PETSC_COMM_WORLD, DM_BOUNDARY_PERIODIC, lenglob, 1, 1, NULL, &appctx.da));
146:   PetscCall(DMSetFromOptions(appctx.da));
147:   PetscCall(DMSetUp(appctx.da));

149:   /*
150:      Extract global and local vectors from DMDA; we use these to store the
151:      approximate solution.  Then duplicate these for remaining vectors that
152:      have the same types.
153:   */

155:   PetscCall(DMCreateGlobalVector(appctx.da, &u));
156:   PetscCall(VecDuplicate(u, &appctx.dat.ic));
157:   PetscCall(VecDuplicate(u, &appctx.dat.true_solution));
158:   PetscCall(VecDuplicate(u, &appctx.dat.obj));
159:   PetscCall(VecDuplicate(u, &appctx.SEMop.grid));
160:   PetscCall(VecDuplicate(u, &appctx.SEMop.mass));
161:   PetscCall(VecDuplicate(u, &appctx.dat.curr_sol));

163:   PetscCall(DMDAGetCorners(appctx.da, &xs, NULL, NULL, &xm, NULL, NULL));
164:   PetscCall(DMDAVecGetArray(appctx.da, appctx.SEMop.grid, &wrk_ptr1));
165:   PetscCall(DMDAVecGetArray(appctx.da, appctx.SEMop.mass, &wrk_ptr2));

167:   /* Compute function over the locally owned part of the grid */

169:   xs = xs / (appctx.param.N - 1);
170:   xm = xm / (appctx.param.N - 1);

172:   /*
173:      Build total grid and mass over entire mesh (multi-elemental)
174:   */

176:   for (i = xs; i < xs + xm; i++) {
177:     for (j = 0; j < appctx.param.N - 1; j++) {
178:       x             = (appctx.param.Le / 2.0) * (appctx.SEMop.gll.nodes[j] + 1.0) + appctx.param.Le * i;
179:       ind           = i * (appctx.param.N - 1) + j;
180:       wrk_ptr1[ind] = x;
181:       wrk_ptr2[ind] = .5 * appctx.param.Le * appctx.SEMop.gll.weights[j];
182:       if (j == 0) wrk_ptr2[ind] += .5 * appctx.param.Le * appctx.SEMop.gll.weights[j];
183:     }
184:   }
185:   PetscCall(DMDAVecRestoreArray(appctx.da, appctx.SEMop.grid, &wrk_ptr1));
186:   PetscCall(DMDAVecRestoreArray(appctx.da, appctx.SEMop.mass, &wrk_ptr2));

188:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
189:    Create matrix data structure; set matrix evaluation routine.
190:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
191:   PetscCall(DMSetMatrixPreallocateOnly(appctx.da, PETSC_TRUE));
192:   PetscCall(DMCreateMatrix(appctx.da, &appctx.SEMop.stiff));
193:   PetscCall(DMCreateMatrix(appctx.da, &appctx.SEMop.grad));
194:   /*
195:    For linear problems with a time-dependent f(u,t) in the equation
196:    u_t = f(u,t), the user provides the discretized right-hand-side
197:    as a time-dependent matrix.
198:    */
199:   PetscCall(RHSMatrixLaplaciangllDM(appctx.ts, 0.0, u, appctx.SEMop.stiff, appctx.SEMop.stiff, &appctx));
200:   PetscCall(RHSMatrixAdvectiongllDM(appctx.ts, 0.0, u, appctx.SEMop.grad, appctx.SEMop.grad, &appctx));
201:   /*
202:        For linear problems with a time-dependent f(u,t) in the equation
203:        u_t = f(u,t), the user provides the discretized right-hand-side
204:        as a time-dependent matrix.
205:     */

207:   PetscCall(MatDuplicate(appctx.SEMop.stiff, MAT_COPY_VALUES, &appctx.SEMop.keptstiff));

209:   /* attach the null space to the matrix, this probably is not needed but does no harm */
210:   PetscCall(MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nsp));
211:   PetscCall(MatSetNullSpace(appctx.SEMop.stiff, nsp));
212:   PetscCall(MatSetNullSpace(appctx.SEMop.keptstiff, nsp));
213:   PetscCall(MatNullSpaceTest(nsp, appctx.SEMop.stiff, NULL));
214:   PetscCall(MatNullSpaceDestroy(&nsp));
215:   /* attach the null space to the matrix, this probably is not needed but does no harm */
216:   PetscCall(MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nsp));
217:   PetscCall(MatSetNullSpace(appctx.SEMop.grad, nsp));
218:   PetscCall(MatNullSpaceTest(nsp, appctx.SEMop.grad, NULL));
219:   PetscCall(MatNullSpaceDestroy(&nsp));

221:   /* Create the TS solver that solves the ODE and its adjoint; set its options */
222:   PetscCall(TSCreate(PETSC_COMM_WORLD, &appctx.ts));
223:   PetscCall(TSSetProblemType(appctx.ts, TS_NONLINEAR));
224:   PetscCall(TSSetType(appctx.ts, TSRK));
225:   PetscCall(TSSetDM(appctx.ts, appctx.da));
226:   PetscCall(TSSetTime(appctx.ts, 0.0));
227:   PetscCall(TSSetTimeStep(appctx.ts, appctx.initial_dt));
228:   PetscCall(TSSetMaxSteps(appctx.ts, appctx.param.steps));
229:   PetscCall(TSSetMaxTime(appctx.ts, appctx.param.Tend));
230:   PetscCall(TSSetExactFinalTime(appctx.ts, TS_EXACTFINALTIME_MATCHSTEP));
231:   PetscCall(TSSetTolerances(appctx.ts, 1e-7, NULL, 1e-7, NULL));
232:   PetscCall(TSSetFromOptions(appctx.ts));
233:   /* Need to save initial timestep user may have set with -ts_dt so it can be reset for each new TSSolve() */
234:   PetscCall(TSGetTimeStep(appctx.ts, &appctx.initial_dt));
235:   PetscCall(TSSetRHSFunction(appctx.ts, NULL, RHSFunction, &appctx));
236:   PetscCall(TSSetRHSJacobian(appctx.ts, appctx.SEMop.stiff, appctx.SEMop.stiff, RHSJacobian, &appctx));

238:   /* Set Objective and Initial conditions for the problem and compute Objective function (evolution of true_solution to final time */
239:   PetscCall(InitialConditions(appctx.dat.ic, &appctx));
240:   PetscCall(TrueSolution(appctx.dat.true_solution, &appctx));
241:   PetscCall(ComputeObjective(appctx.param.Tend, appctx.dat.obj, &appctx));

243:   PetscCall(TSSetSaveTrajectory(appctx.ts));
244:   PetscCall(TSSetFromOptions(appctx.ts));

246:   /* Create TAO solver and set desired solution method  */
247:   PetscCall(TaoCreate(PETSC_COMM_WORLD, &tao));
248:   PetscCall(TaoSetMonitor(tao, MonitorError, &appctx, NULL));
249:   PetscCall(TaoSetType(tao, TAOBQNLS));
250:   PetscCall(TaoSetSolution(tao, appctx.dat.ic));
251:   /* Set routine for function and gradient evaluation  */
252:   PetscCall(TaoSetObjectiveAndGradient(tao, NULL, FormFunctionGradient, (void *)&appctx));
253:   /* Check for any TAO command line options  */
254:   PetscCall(TaoSetTolerances(tao, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT));
255:   PetscCall(TaoSetFromOptions(tao));
256:   PetscCall(TaoSolve(tao));

258:   PetscCall(TaoDestroy(&tao));
259:   PetscCall(MatDestroy(&appctx.SEMop.stiff));
260:   PetscCall(MatDestroy(&appctx.SEMop.keptstiff));
261:   PetscCall(MatDestroy(&appctx.SEMop.grad));
262:   PetscCall(VecDestroy(&u));
263:   PetscCall(VecDestroy(&appctx.dat.ic));
264:   PetscCall(VecDestroy(&appctx.dat.true_solution));
265:   PetscCall(VecDestroy(&appctx.dat.obj));
266:   PetscCall(VecDestroy(&appctx.SEMop.grid));
267:   PetscCall(VecDestroy(&appctx.SEMop.mass));
268:   PetscCall(VecDestroy(&appctx.dat.curr_sol));
269:   PetscCall(PetscFree2(appctx.SEMop.gll.nodes, appctx.SEMop.gll.weights));
270:   PetscCall(DMDestroy(&appctx.da));
271:   PetscCall(TSDestroy(&appctx.ts));

273:   /*
274:      Always call PetscFinalize() before exiting a program.  This routine
275:        - finalizes the PETSc libraries as well as MPI
276:        - provides summary and diagnostic information if certain runtime
277:          options are chosen (e.g., -log_view).
278:   */
279:   PetscCall(PetscFinalize());
280:   return 0;
281: }

283: /* --------------------------------------------------------------------- */
284: /*
285:    InitialConditions - Computes the initial conditions for the Tao optimization solve (these are also initial conditions for the first TSSolve()

287:                        The routine TrueSolution() computes the true solution for the Tao optimization solve which means they are the initial conditions for the objective function

289:    Input Parameter:
290:    u - uninitialized solution vector (global)
291:    appctx - user-defined application context

293:    Output Parameter:
294:    u - vector with solution at initial time (global)
295: */
296: PetscErrorCode InitialConditions(Vec u, AppCtx *appctx)
297: {
298:   PetscScalar       *s;
299:   const PetscScalar *xg;
300:   PetscInt           i, xs, xn;

302:   PetscFunctionBegin;
303:   PetscCall(DMDAVecGetArray(appctx->da, u, &s));
304:   PetscCall(DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
305:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));
306:   for (i = xs; i < xs + xn; i++) s[i] = 2.0 * appctx->param.mu * PETSC_PI * PetscSinScalar(PETSC_PI * xg[i]) / (2.0 + PetscCosScalar(PETSC_PI * xg[i])) + 0.25 * PetscExpReal(-4.0 * PetscPowReal(xg[i] - 2.0, 2.0));
307:   PetscCall(DMDAVecRestoreArray(appctx->da, u, &s));
308:   PetscCall(DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
309:   PetscFunctionReturn(PETSC_SUCCESS);
310: }

312: /*
313:    TrueSolution() computes the true solution for the Tao optimization solve which means they are the initial conditions for the objective function.

315:              InitialConditions() computes the initial conditions for the beginning of the Tao iterations

317:    Input Parameter:
318:    u - uninitialized solution vector (global)
319:    appctx - user-defined application context

321:    Output Parameter:
322:    u - vector with solution at initial time (global)
323: */
324: PetscErrorCode TrueSolution(Vec u, AppCtx *appctx)
325: {
326:   PetscScalar       *s;
327:   const PetscScalar *xg;
328:   PetscInt           i, xs, xn;

330:   PetscFunctionBegin;
331:   PetscCall(DMDAVecGetArray(appctx->da, u, &s));
332:   PetscCall(DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
333:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));
334:   for (i = xs; i < xs + xn; i++) s[i] = 2.0 * appctx->param.mu * PETSC_PI * PetscSinScalar(PETSC_PI * xg[i]) / (2.0 + PetscCosScalar(PETSC_PI * xg[i]));
335:   PetscCall(DMDAVecRestoreArray(appctx->da, u, &s));
336:   PetscCall(DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
337:   PetscFunctionReturn(PETSC_SUCCESS);
338: }
339: /* --------------------------------------------------------------------- */
340: /*
341:    Sets the desired profile for the final end time

343:    Input Parameters:
344:    t - final time
345:    obj - vector storing the desired profile
346:    appctx - user-defined application context

348: */
349: PetscErrorCode ComputeObjective(PetscReal t, Vec obj, AppCtx *appctx)
350: {
351:   PetscScalar       *s;
352:   const PetscScalar *xg;
353:   PetscInt           i, xs, xn;

355:   PetscFunctionBegin;
356:   PetscCall(DMDAVecGetArray(appctx->da, obj, &s));
357:   PetscCall(DMDAVecGetArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
358:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));
359:   for (i = xs; i < xs + xn; i++) {
360:     s[i] = 2.0 * appctx->param.mu * PETSC_PI * PetscSinScalar(PETSC_PI * xg[i]) * PetscExpScalar(-PETSC_PI * PETSC_PI * t * appctx->param.mu) / (2.0 + PetscExpScalar(-PETSC_PI * PETSC_PI * t * appctx->param.mu) * PetscCosScalar(PETSC_PI * xg[i]));
361:   }
362:   PetscCall(DMDAVecRestoreArray(appctx->da, obj, &s));
363:   PetscCall(DMDAVecRestoreArrayRead(appctx->da, appctx->SEMop.grid, (void *)&xg));
364:   PetscFunctionReturn(PETSC_SUCCESS);
365: }

367: PetscErrorCode RHSFunction(TS ts, PetscReal t, Vec globalin, Vec globalout, void *ctx)
368: {
369:   AppCtx *appctx = (AppCtx *)ctx;

371:   PetscFunctionBegin;
372:   PetscCall(MatMult(appctx->SEMop.grad, globalin, globalout)); /* grad u */
373:   PetscCall(VecPointwiseMult(globalout, globalin, globalout)); /* u grad u */
374:   PetscCall(VecScale(globalout, -1.0));
375:   PetscCall(MatMultAdd(appctx->SEMop.keptstiff, globalin, globalout, globalout));
376:   PetscFunctionReturn(PETSC_SUCCESS);
377: }

379: /*

381:       K is the discretiziation of the Laplacian
382:       G is the discretization of the gradient

384:       Computes Jacobian of      K u + diag(u) G u   which is given by
385:               K   + diag(u)G + diag(Gu)
386: */
387: PetscErrorCode RHSJacobian(TS ts, PetscReal t, Vec globalin, Mat A, Mat B, void *ctx)
388: {
389:   AppCtx *appctx = (AppCtx *)ctx;
390:   Vec     Gglobalin;

392:   PetscFunctionBegin;
393:   /*    A = diag(u) G */

395:   PetscCall(MatCopy(appctx->SEMop.grad, A, SAME_NONZERO_PATTERN));
396:   PetscCall(MatDiagonalScale(A, globalin, NULL));

398:   /*    A  = A + diag(Gu) */
399:   PetscCall(VecDuplicate(globalin, &Gglobalin));
400:   PetscCall(MatMult(appctx->SEMop.grad, globalin, Gglobalin));
401:   PetscCall(MatDiagonalSet(A, Gglobalin, ADD_VALUES));
402:   PetscCall(VecDestroy(&Gglobalin));

404:   /*   A  = K - A    */
405:   PetscCall(MatScale(A, -1.0));
406:   PetscCall(MatAXPY(A, 1.0, appctx->SEMop.keptstiff, SAME_NONZERO_PATTERN));
407:   PetscFunctionReturn(PETSC_SUCCESS);
408: }

410: /* --------------------------------------------------------------------- */

412: /*
413:    RHSMatrixLaplacian - User-provided routine to compute the right-hand-side
414:    matrix for the heat equation.

416:    Input Parameters:
417:    ts - the TS context
418:    t - current time  (ignored)
419:    X - current solution (ignored)
420:    dummy - optional user-defined context, as set by TSetRHSJacobian()

422:    Output Parameters:
423:    AA - Jacobian matrix
424:    BB - optionally different matrix from which the preconditioner is built
425:    str - flag indicating matrix structure

427: */
428: PetscErrorCode RHSMatrixLaplaciangllDM(TS ts, PetscReal t, Vec X, Mat A, Mat BB, void *ctx)
429: {
430:   PetscReal **temp;
431:   PetscReal   vv;
432:   AppCtx     *appctx = (AppCtx *)ctx; /* user-defined application context */
433:   PetscInt    i, xs, xn, l, j;
434:   PetscInt   *rowsDM;

436:   PetscFunctionBegin;
437:   /*
438:    Creates the element stiffness matrix for the given gll
439:    */
440:   PetscCall(PetscGaussLobattoLegendreElementLaplacianCreate(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
441:   /* workaround for clang analyzer warning: Division by zero */
442:   PetscCheck(appctx->param.N > 1, PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "Spectral element order should be > 1");

444:   /* scale by the size of the element */
445:   for (i = 0; i < appctx->param.N; i++) {
446:     vv = -appctx->param.mu * 2.0 / appctx->param.Le;
447:     for (j = 0; j < appctx->param.N; j++) temp[i][j] = temp[i][j] * vv;
448:   }

450:   PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
451:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));

453:   xs = xs / (appctx->param.N - 1);
454:   xn = xn / (appctx->param.N - 1);

456:   PetscCall(PetscMalloc1(appctx->param.N, &rowsDM));
457:   /*
458:    loop over local elements
459:    */
460:   for (j = xs; j < xs + xn; j++) {
461:     for (l = 0; l < appctx->param.N; l++) rowsDM[l] = 1 + (j - xs) * (appctx->param.N - 1) + l;
462:     PetscCall(MatSetValuesLocal(A, appctx->param.N, rowsDM, appctx->param.N, rowsDM, &temp[0][0], ADD_VALUES));
463:   }
464:   PetscCall(PetscFree(rowsDM));
465:   PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
466:   PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
467:   PetscCall(VecReciprocal(appctx->SEMop.mass));
468:   PetscCall(MatDiagonalScale(A, appctx->SEMop.mass, 0));
469:   PetscCall(VecReciprocal(appctx->SEMop.mass));

471:   PetscCall(PetscGaussLobattoLegendreElementLaplacianDestroy(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
472:   PetscFunctionReturn(PETSC_SUCCESS);
473: }

475: /*
476:    RHSMatrixAdvection - User-provided routine to compute the right-hand-side
477:    matrix for the Advection equation.

479:    Input Parameters:
480:    ts - the TS context
481:    t - current time
482:    global_in - global input vector
483:    dummy - optional user-defined context, as set by TSetRHSJacobian()

485:    Output Parameters:
486:    AA - Jacobian matrix
487:    BB - optionally different preconditioning matrix
488:    str - flag indicating matrix structure

490: */
491: PetscErrorCode RHSMatrixAdvectiongllDM(TS ts, PetscReal t, Vec X, Mat A, Mat BB, void *ctx)
492: {
493:   PetscReal **temp;
494:   AppCtx     *appctx = (AppCtx *)ctx; /* user-defined application context */
495:   PetscInt    xs, xn, l, j;
496:   PetscInt   *rowsDM;

498:   PetscFunctionBegin;
499:   /*
500:    Creates the advection matrix for the given gll
501:    */
502:   PetscCall(PetscGaussLobattoLegendreElementAdvectionCreate(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
503:   PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));

505:   PetscCall(DMDAGetCorners(appctx->da, &xs, NULL, NULL, &xn, NULL, NULL));

507:   xs = xs / (appctx->param.N - 1);
508:   xn = xn / (appctx->param.N - 1);

510:   PetscCall(PetscMalloc1(appctx->param.N, &rowsDM));
511:   for (j = xs; j < xs + xn; j++) {
512:     for (l = 0; l < appctx->param.N; l++) rowsDM[l] = 1 + (j - xs) * (appctx->param.N - 1) + l;
513:     PetscCall(MatSetValuesLocal(A, appctx->param.N, rowsDM, appctx->param.N, rowsDM, &temp[0][0], ADD_VALUES));
514:   }
515:   PetscCall(PetscFree(rowsDM));
516:   PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
517:   PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));

519:   PetscCall(VecReciprocal(appctx->SEMop.mass));
520:   PetscCall(MatDiagonalScale(A, appctx->SEMop.mass, 0));
521:   PetscCall(VecReciprocal(appctx->SEMop.mass));
522:   PetscCall(PetscGaussLobattoLegendreElementAdvectionDestroy(appctx->SEMop.gll.n, appctx->SEMop.gll.nodes, appctx->SEMop.gll.weights, &temp));
523:   PetscFunctionReturn(PETSC_SUCCESS);
524: }
525: /* ------------------------------------------------------------------ */
526: /*
527:    FormFunctionGradient - Evaluates the function and corresponding gradient.

529:    Input Parameters:
530:    tao - the Tao context
531:    IC   - the input vector
532:    ctx - optional user-defined context, as set when calling TaoSetObjectiveAndGradient()

534:    Output Parameters:
535:    f   - the newly evaluated function
536:    G   - the newly evaluated gradient

538:    Notes:

540:           The forward equation is
541:               M u_t = F(U)
542:           which is converted to
543:                 u_t = M^{-1} F(u)
544:           in the user code since TS has no direct way of providing a mass matrix. The Jacobian of this is
545:                  M^{-1} J
546:           where J is the Jacobian of F. Now the adjoint equation is
547:                 M v_t = J^T v
548:           but TSAdjoint does not solve this since it can only solve the transposed system for the
549:           Jacobian the user provided. Hence TSAdjoint solves
550:                  w_t = J^T M^{-1} w  (where w = M v)
551:           since there is no way to indicate the mass matrix as a separate entity to TS. Thus one
552:           must be careful in initializing the "adjoint equation" and using the result. This is
553:           why
554:               G = -2 M(u(T) - u_d)
555:           below (instead of -2(u(T) - u_d) and why the result is
556:               G = G/appctx->SEMop.mass (that is G = M^{-1}w)
557:           below (instead of just the result of the "adjoint solve").

559: */
560: PetscErrorCode FormFunctionGradient(Tao tao, Vec IC, PetscReal *f, Vec G, void *ctx)
561: {
562:   AppCtx            *appctx = (AppCtx *)ctx; /* user-defined application context */
563:   Vec                temp;
564:   PetscInt           its;
565:   PetscReal          ff, gnorm, cnorm, xdiff, errex;
566:   TaoConvergedReason reason;

568:   PetscFunctionBegin;
569:   PetscCall(TSSetTime(appctx->ts, 0.0));
570:   PetscCall(TSSetStepNumber(appctx->ts, 0));
571:   PetscCall(TSSetTimeStep(appctx->ts, appctx->initial_dt));
572:   PetscCall(VecCopy(IC, appctx->dat.curr_sol));

574:   PetscCall(TSSolve(appctx->ts, appctx->dat.curr_sol));

576:   PetscCall(VecWAXPY(G, -1.0, appctx->dat.curr_sol, appctx->dat.obj));

578:   /*
579:      Compute the L2-norm of the objective function, cost function is f
580:   */
581:   PetscCall(VecDuplicate(G, &temp));
582:   PetscCall(VecPointwiseMult(temp, G, G));
583:   PetscCall(VecDot(temp, appctx->SEMop.mass, f));

585:   /* local error evaluation   */
586:   PetscCall(VecWAXPY(temp, -1.0, appctx->dat.ic, appctx->dat.true_solution));
587:   PetscCall(VecPointwiseMult(temp, temp, temp));
588:   /* for error evaluation */
589:   PetscCall(VecDot(temp, appctx->SEMop.mass, &errex));
590:   PetscCall(VecDestroy(&temp));
591:   errex = PetscSqrtReal(errex);

593:   /*
594:      Compute initial conditions for the adjoint integration. See Notes above
595:   */

597:   PetscCall(VecScale(G, -2.0));
598:   PetscCall(VecPointwiseMult(G, G, appctx->SEMop.mass));
599:   PetscCall(TSSetCostGradients(appctx->ts, 1, &G, NULL));
600:   PetscCall(TSAdjointSolve(appctx->ts));
601:   PetscCall(VecPointwiseDivide(G, G, appctx->SEMop.mass));

603:   PetscCall(TaoGetSolutionStatus(tao, &its, &ff, &gnorm, &cnorm, &xdiff, &reason));
604:   PetscFunctionReturn(PETSC_SUCCESS);
605: }

607: PetscErrorCode MonitorError(Tao tao, void *ctx)
608: {
609:   AppCtx   *appctx = (AppCtx *)ctx;
610:   Vec       temp;
611:   PetscReal nrm;

613:   PetscFunctionBegin;
614:   PetscCall(VecDuplicate(appctx->dat.ic, &temp));
615:   PetscCall(VecWAXPY(temp, -1.0, appctx->dat.ic, appctx->dat.true_solution));
616:   PetscCall(VecPointwiseMult(temp, temp, temp));
617:   PetscCall(VecDot(temp, appctx->SEMop.mass, &nrm));
618:   PetscCall(VecDestroy(&temp));
619:   nrm = PetscSqrtReal(nrm);
620:   PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Error for initial conditions %g\n", (double)nrm));
621:   PetscFunctionReturn(PETSC_SUCCESS);
622: }

624: /*TEST

626:     build:
627:       requires: !complex

629:     test:
630:       args: -tao_max_it 5 -tao_gatol 1.e-4
631:       requires: !single

633:     test:
634:       suffix: 2
635:       nsize: 2
636:       args: -tao_max_it 5 -tao_gatol 1.e-4
637:       requires: !single

639: TEST*/