Actual source code: heat-data-assimulation.c
petsc-3.8.4 2018-03-24
2: static char help[] ="Solves a simple data assimulation problem with the heat equation using TSAdjoint\n\n";
4: /*
6: Not yet tested in parallel
8: */
9: /*
10: Concepts: TS^time-dependent linear problems
11: Concepts: TS^heat equation
12: Concepts: TS^diffusion equation
13: Concepts: adjoints
14: Processors: n
15: */
17: /* ------------------------------------------------------------------------
19: This program uses the one-dimensional heat equation (also called the
20: diffusion equation),
21: u_t = u_xx,
22: on the domain 0 <= x <= 1, with periodic boundary conditions
24: to demonstrate solving a data assimulation problem of finding the initial conditions
25: to produce a given solution at a fixed time.
27: We discretize the right-hand side using the spectral element method
29: ------------------------------------------------------------------------- */
31: /*
32: Include "petscts.h" so that we can use TS solvers. Note that this file
33: automatically includes:
34: petscsys.h - base PETSc routines petscvec.h - vectors
35: petscmat.h - matrices
36: petscis.h - index sets petscksp.h - Krylov subspace methods
37: petscviewer.h - viewers petscpc.h - preconditioners
38: petscksp.h - linear solvers petscsnes.h - nonlinear solvers
39: */
41: #include <petsctao.h>
42: #include <petscts.h>
43: #include <petscgll.h>
44: #include <petscdraw.h>
45: #include <petscdmda.h>
47: /*
48: User-defined application context - contains data needed by the
49: application-provided call-back routines.
50: */
52: typedef struct {
53: PetscInt N; /* grid points per elements*/
54: PetscInt E; /* number of elements */
55: PetscReal tol_L2,tol_max; /* error norms */
56: PetscInt steps; /* number of timesteps */
57: PetscReal Tend; /* endtime */
58: PetscReal mu; /* viscosity */
59: PetscReal dt; /* timestep*/
60: PetscReal L; /* total length of domain */
61: PetscReal Le;
62: PetscReal Tadj;
63: } PetscParam;
65: typedef struct {
66: Vec obj; /* desired end state */
67: Vec grid; /* total grid */
68: Vec grad;
69: Vec ic;
70: Vec curr_sol;
71: PetscReal *Z; /* mesh grid */
72: PetscScalar *W; /* weights */
73: } PetscData;
75: typedef struct {
76: Vec grid; /* total grid */
77: Vec mass; /* mass matrix for total integration */
78: Mat stiff; // stifness matrix
79: PetscGLL gll;
80: } PetscSEMOperators;
82: typedef struct {
83: DM da; /* distributed array data structure */
84: PetscSEMOperators SEMop;
85: PetscParam param;
86: PetscData dat;
87: PetscBool debug;
88: TS ts;
89: PetscReal initial_dt;
90: } AppCtx;
92: /*
93: User-defined routines
94: */
95: extern PetscErrorCode InitialConditions(Vec,AppCtx*);
96: extern PetscErrorCode FormFunctionGradient(Tao,Vec,PetscReal*,Vec,void*);
97: extern PetscErrorCode RHSMatrixHeatgllDM(TS,PetscReal,Vec,Mat,Mat,void*);
98: extern PetscErrorCode RHSAdjointgllDM(TS,PetscReal,Vec,Mat,Mat,void*);
99: extern PetscErrorCode RHSFunctionHeat(TS,PetscReal,Vec,Vec,void*);
100: extern PetscErrorCode Objective(PetscReal,Vec,AppCtx*);
102: int main(int argc,char **argv)
103: {
104: AppCtx appctx; /* user-defined application context */
105: Tao tao;
106: Vec u; /* approximate solution vector */
108: PetscInt i, xs, xm, ind, j, lenglob;
109: PetscReal x, *wrk_ptr1, *wrk_ptr2;
110: MatNullSpace nsp;
112: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
113: Initialize program and set problem parameters
114: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
117: PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
119: /*initialize parameters */
120: appctx.param.N = 10; /* order of the spectral element */
121: appctx.param.E = 8; /* number of elements */
122: appctx.param.L = 1.0; /* length of the domain */
123: appctx.param.mu = 0.001; /* diffusion coefficient */
124: appctx.initial_dt = 1e-4;
125: appctx.param.steps = PETSC_MAX_INT;
126: appctx.param.Tend = 0.01;
128: PetscOptionsGetInt(NULL,NULL,"-N",&appctx.param.N,NULL);
129: PetscOptionsGetInt(NULL,NULL,"-E",&appctx.param.E,NULL);
130: PetscOptionsHasName(NULL,NULL,"-debug",&appctx.debug);
131: appctx.param.Le = appctx.param.L/appctx.param.E;
134: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
135: Create vector data structures
136: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
137: PetscGLLCreate(appctx.param.N,PETSCGLL_VIA_LINEARALGEBRA,&appctx.SEMop.gll);
138:
139: PetscMalloc1(appctx.param.N, &appctx.dat.Z);
140: PetscMalloc1(appctx.param.N, &appctx.dat.W);
142: for(i=0; i<appctx.param.N; i++) {
143: appctx.dat.Z[i]=(appctx.SEMop.gll.nodes[i]+1.0);
144: appctx.dat.W[i]=appctx.SEMop.gll.weights[i];
145: }
148: //lenloc = appctx.param.E*appctx.param.N; //only if I want to do it totally local for explicit
149: lenglob = appctx.param.E*(appctx.param.N-1);
151: /*
152: Create distributed array (DMDA) to manage parallel grid and vectors
153: and to set up the ghost point communication pattern. There are E*(Nl-1)+1
154: total grid values spread equally among all the processors, except first and last
155: */
157: DMDACreate1d(PETSC_COMM_WORLD,DM_BOUNDARY_PERIODIC,lenglob,1,1,NULL,&appctx.da);
158: DMSetFromOptions(appctx.da);
159: DMSetUp(appctx.da);
160: //DMDAGetInfo(appctx.da,NULL,&E,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
161:
162: /*
163: Extract global and local vectors from DMDA; we use these to store the
164: approximate solution. Then duplicate these for remaining vectors that
165: have the same types.
166: */
168: DMCreateGlobalVector(appctx.da,&u);
169: VecDuplicate(u,&appctx.dat.ic);
170: VecDuplicate(u,&appctx.dat.obj);
171: VecDuplicate(u,&appctx.dat.grad);
172: VecDuplicate(u,&appctx.SEMop.grid);
173: VecDuplicate(u,&appctx.SEMop.mass);
174: VecDuplicate(u,&appctx.dat.curr_sol);
175:
176: DMDAGetCorners(appctx.da,&xs,NULL,NULL,&xm,NULL,NULL);
178: DMDAVecGetArray(appctx.da,appctx.SEMop.grid,&wrk_ptr1);
179: DMDAVecGetArray(appctx.da,appctx.SEMop.mass,&wrk_ptr2);
180:
181: //Compute function over the locally owned part of the grid
182:
183: xs=xs/(appctx.param.N-1);
184: xm=xm/(appctx.param.N-1);
185:
186: /*
187: Build total grid and mass over entire mesh (multi-elemental)
188: */
190: for (i=xs; i<xs+xm; i++) {
191: for (j=0; j<appctx.param.N-1; j++) {
192: x = (appctx.param.Le/2.0)*(appctx.dat.Z[j])+appctx.param.Le*i;
193: ind=i*(appctx.param.N-1)+j;
194: wrk_ptr1[ind]=x;
195: wrk_ptr2[ind]=.5*appctx.param.Le*appctx.dat.W[j];
196: if (j==0) wrk_ptr2[ind]+=.5*appctx.param.Le*appctx.dat.W[j];
197: }
198: }
199: DMDAVecRestoreArray(appctx.da,appctx.SEMop.grid,&wrk_ptr1);
200: DMDAVecRestoreArray(appctx.da,appctx.SEMop.mass,&wrk_ptr2);
203: //Set Objective and Initial conditions for the problem
204: Objective(appctx.param.Tend+2,appctx.dat.obj,&appctx);
205: InitialConditions(appctx.dat.ic,&appctx);
207: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
208: Create matrix data structure; set matrix evaluation routine.
209: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
210: DMSetMatrixPreallocateOnly(appctx.da, PETSC_TRUE);
211: DMCreateMatrix(appctx.da,&appctx.SEMop.stiff);
213: /*
214: For linear problems with a time-dependent f(u,t) in the equation
215: u_t = f(u,t), the user provides the discretized right-hand-side
216: as a time-dependent matrix.
217: */
218: RHSMatrixHeatgllDM(appctx.ts,0.0,u,appctx.SEMop.stiff,appctx.SEMop.stiff,&appctx);
220: /* attach the null space to the matrix, this probably is not needed but does no harm */
221: MatNullSpaceCreate(PETSC_COMM_WORLD,PETSC_TRUE,0,NULL,&nsp);
222: MatSetNullSpace(appctx.SEMop.stiff,nsp);
223: MatNullSpaceTest(nsp,appctx.SEMop.stiff,NULL);
224: MatNullSpaceDestroy(&nsp);
226: // Create TAO solver and set desired solution method
227: TaoCreate(PETSC_COMM_WORLD,&tao);
228: TaoSetType(tao,TAOBLMVM);
229: TaoSetInitialVector(tao,appctx.dat.ic);
231: // Set routine for function and gradient evaluation
232: TaoSetObjectiveAndGradientRoutine(tao,FormFunctionGradient,(void *)&appctx);
234: // Check for any TAO command line options
235: TaoSetTolerances(tao,1e-8,PETSC_DEFAULT,PETSC_DEFAULT);
236: TaoSetFromOptions(tao);
238: TaoSolve(tao);
240: TaoDestroy(&tao);
241: MatDestroy(&appctx.SEMop.stiff);
242: VecDestroy(&u);
243: VecDestroy(&appctx.dat.ic);
244: VecDestroy(&appctx.dat.obj);
245: VecDestroy(&appctx.dat.grad);
246: VecDestroy(&appctx.SEMop.grid);
247: VecDestroy(&appctx.SEMop.mass);
248: VecDestroy(&appctx.dat.curr_sol);
249: PetscGLLDestroy(&appctx.SEMop.gll);
250: DMDestroy(&appctx.da);
251: PetscFree(appctx.dat.Z);
252: PetscFree(appctx.dat.W);
254: /*
255: Always call PetscFinalize() before exiting a program. This routine
256: - finalizes the PETSc libraries as well as MPI
257: - provides summary and diagnostic information if certain runtime
258: options are chosen (e.g., -log_summary).
259: */
260: PetscFinalize();
261: return ierr;
262: }
263: /* --------------------------------------------------------------------- */
264: /*
265: InitialConditions - Computes the solution at the initial time.
267: Input Parameter:
268: u - uninitialized solution vector (global)
269: appctx - user-defined application context
271: Output Parameter:
272: u - vector with solution at initial time (global)
273: */
274: PetscErrorCode InitialConditions(Vec u,AppCtx *appctx)
275: {
276: PetscScalar *s_localptr, *xg_localptr;
278: PetscInt i,lenglob;
279: PetscReal sum;
281: /*
282: Get pointers to vector data
283: */
284: DMDAVecGetArray(appctx->da,u,&s_localptr);
285: DMDAVecGetArray(appctx->da,appctx->SEMop.grid,&xg_localptr);
287: lenglob = appctx->param.E*(appctx->param.N-1);
289: /* for (i=0; i<lenglob; i++) {
290: s_localptr[i]= PetscSinScalar(2.0*PETSC_PI*xg_localptr[i]);
291: } */
293: for (i=0; i<lenglob; i++) {
294: s_localptr[i]=PetscSinScalar(2.0*PETSC_PI*xg_localptr[i])+PetscCosScalar(4.0*PETSC_PI*xg_localptr[i])+3.0*PetscSinScalar(2.0*PETSC_PI*xg_localptr[i])*PetscCosScalar(6.0*PETSC_PI*xg_localptr[i]);
295: }
297: DMDAVecRestoreArray(appctx->da,appctx->SEMop.grid,&xg_localptr);
298: DMDAVecRestoreArray(appctx->da,appctx->dat.ic,&s_localptr);
299: /* make sure initial conditions do not contain the constant functions */
300: VecSum(appctx->dat.ic,&sum);
301: VecShift(appctx->dat.ic,-sum/lenglob);
302: return 0;
303: }
304: /* --------------------------------------------------------------------- */
305: /*
306: Sets the profile at end time
308: Input Parameters:
309: t - current time
310: obj - vector storing the end function
311: appctx - user-defined application context
313: Output Parameter:
314: solution - vector with the newly computed exact solution
315: */
316: PetscErrorCode Objective(PetscReal t,Vec obj,AppCtx *appctx)
317: {
318: PetscScalar *s_localptr,*xg_localptr,tc = -.001*4*PETSC_PI*PETSC_PI*t;
320: PetscInt i, lenglob;
321:
322: /*
323: Simply write the solution directly into the array locations.
324: Alternatively, we culd use VecSetValues() or VecSetValuesLocal().
325: */
326:
327: DMDAVecGetArray(appctx->da,obj,&s_localptr);
328: DMDAVecGetArray(appctx->da,appctx->SEMop.grid,&xg_localptr);
330: lenglob = appctx->param.E*(appctx->param.N-1);
331:
332: for (i=0; i<lenglob; i++) {
333: s_localptr[i]=PetscSinScalar(2.0*PETSC_PI*xg_localptr[i])*PetscExpScalar(tc);
334: }
335:
336: /*for (i=0; i<lenglob; i++) {
337: s_localptr[i]=1.0;
338: }*/
341: /*
342: Restore vectors
343: */
344: DMDAVecRestoreArray(appctx->da,appctx->SEMop.grid,&xg_localptr);
345: DMDAVecRestoreArray(appctx->da,appctx->dat.obj,&s_localptr);
347: return 0;
348: }
351: /* --------------------------------------------------------------------- */
353: /*
354: RHSMatrixHeat - User-provided routine to compute the right-hand-side
355: matrix for the heat equation.
357: Input Parameters:
358: ts - the TS context
359: t - current time
360: global_in - global input vector
361: dummy - optional user-defined context, as set by TSetRHSJacobian()
363: Output Parameters:
364: AA - Jacobian matrix
365: BB - optionally different preconditioning matrix
366: str - flag indicating matrix structure
368: */
369: PetscErrorCode RHSMatrixHeatgllDM(TS ts,PetscReal t,Vec X,Mat A,Mat BB,void *ctx)
370: {
371: PetscReal **temp;
372: PetscReal vv;
373: AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */
375: PetscInt i,xs,xn,l,j;
376: PetscInt *rowsDM;
378: /*
379: Creates the element stiffness matrix for the given gll
380: */
381: PetscGLLElementLaplacianCreate(&appctx->SEMop.gll,&temp);
383: // scale by the size of the element
384: for (i=0; i<appctx->param.N; i++) {
385: vv=-appctx->param.mu*2.0/appctx->param.Le;
386: for (j=0; j<appctx->param.N; j++) temp[i][j]=temp[i][j]*vv;
387: }
389: MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
390: DMDAGetCorners(appctx->da,&xs,NULL,NULL,&xn,NULL,NULL);
392: xs = xs/(appctx->param.N-1);
393: xn = xn/(appctx->param.N-1);
395: PetscMalloc1(appctx->param.N,&rowsDM);
396: /*
397: loop over local elements
398: */
399: for (j=xs; j<xs+xn; j++) {
400: for (l=0; l<appctx->param.N; l++) {
401: rowsDM[l] = 1+(j-xs)*(appctx->param.N-1)+l;
402: }
403: MatSetValuesLocal(A,appctx->param.N,rowsDM,appctx->param.N,rowsDM,&temp[0][0],ADD_VALUES);
404: }
405: PetscFree(rowsDM);
406: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
407: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
408: VecReciprocal(appctx->SEMop.mass);
409: MatDiagonalScale(A,appctx->SEMop.mass,0);
410: VecReciprocal(appctx->SEMop.mass);
412: PetscGLLElementLaplacianDestroy(&appctx->SEMop.gll,&temp);
413: return 0;
414: }
416: /* ------------------------------------------------------------------ */
417: /*
418: FormFunctionGradient - Evaluates the function and corresponding gradient.
420: Input Parameters:
421: tao - the Tao context
422: X - the input vector
423: ptr - optional user-defined context, as set by TaoSetObjectiveAndGradientRoutine()
425: Output Parameters:
426: f - the newly evaluated function
427: G - the newly evaluated gradient
428: */
429: PetscErrorCode FormFunctionGradient(Tao tao,Vec IC,PetscReal *f,Vec G,void *ctx)
430: {
431: AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */
432: PetscErrorCode ierr;
433: Vec temp, temp2;
435: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
436: Create timestepping solver context
437: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
439: TSCreate(PETSC_COMM_WORLD,&appctx->ts);
440: TSSetProblemType(appctx->ts,TS_LINEAR);
441: TSSetType(appctx->ts,TSRK);
442: TSSetDM(appctx->ts,appctx->da);
443: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
444: Set time
445: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
446: TSSetTime(appctx->ts,0.0);
447: TSSetTimeStep(appctx->ts,appctx->initial_dt);
448: TSSetMaxSteps(appctx->ts,appctx->param.steps);
449: TSSetMaxTime(appctx->ts,appctx->param.Tend);
450: TSSetExactFinalTime(appctx->ts,TS_EXACTFINALTIME_MATCHSTEP);
452: TSSetTolerances(appctx->ts,1e-7,NULL,1e-7,NULL);
453: TSSetFromOptions(appctx->ts);
454: /* Need to save initial timestep user may have set with -ts_dt so it can be reset for each new TSSolve() */
455: TSGetTimeStep(appctx->ts,&appctx->initial_dt);
457: TSSetTime(appctx->ts,0.0);
458: TSSetTimeStep(appctx->ts,appctx->initial_dt);
459: TSSetRHSFunction(appctx->ts,NULL,TSComputeRHSFunctionLinear,&appctx);
460: TSSetRHSJacobian(appctx->ts,appctx->SEMop.stiff,appctx->SEMop.stiff,TSComputeRHSJacobianConstant,&appctx);
461: VecCopy(IC,appctx->dat.curr_sol);
463: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
464: Save trajectory of solution so that TSAdjointSolve() may be used
465: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
466: TSSetSaveTrajectory(appctx->ts);
468: TSSolve(appctx->ts,appctx->dat.curr_sol);
470: /*
471: Compute the L2-norm of the objective function, cost function is f
472: */
473: VecDuplicate(appctx->dat.obj,&temp);
474: VecCopy(appctx->dat.obj,temp);
475: VecAXPY(temp,-1.0,appctx->dat.curr_sol);
477: VecDuplicate(temp,&temp2);
478: VecPointwiseMult(temp2,temp,temp);
479: VecDot(temp2,appctx->SEMop.mass,f);
481: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
482: Adjoint model starts here
483: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
484: /*
485: Initial conditions for the adjoint integration
486: */
488: VecScale(temp, -2.0);
489: VecPointwiseMult(temp,temp,appctx->SEMop.mass);
490: VecCopy(temp,appctx->dat.grad);
491: TSSetCostGradients(appctx->ts,1,&appctx->dat.grad,NULL);
492: TSAdjointSolve(appctx->ts);
493: VecCopy(appctx->dat.grad,G);
494: VecDestroy(&temp);
495: VecDestroy(&temp2);
496: TSDestroy(&appctx->ts);
497: return(0);
498: }
500: /*TEST
502: build:
503: requires: !complex
505: test:
506: requires: !single
507: args: -tao_monitor -ts_adapt_dt_max 3.e-3
509: test:
510: suffix: cn
511: requires: !single
512: args: -tao_monitor -ts_type cn -ts_dt .003 -pc_type lu
514: TEST*/