Actual source code: theta.c
petsc-3.13.6 2020-09-29
1: /*
2: Code for timestepping with implicit Theta method
3: */
4: #include <petsc/private/tsimpl.h>
5: #include <petscsnes.h>
6: #include <petscdm.h>
7: #include <petscmat.h>
9: typedef struct {
10: /* context for time stepping */
11: PetscReal stage_time;
12: Vec X0,X,Xdot; /* Storage for stage solution, u^n + dt a_{11} k_1, and time derivative u^{n+1}_t */
13: Vec affine; /* Affine vector needed for residual at beginning of step in endpoint formulation */
14: PetscReal Theta;
15: PetscReal shift; /* Shift parameter for SNES Jacobian, used by forward, TLM and adjoint */
16: PetscInt order;
17: PetscBool endpoint;
18: PetscBool extrapolate;
19: TSStepStatus status;
20: Vec VecCostIntegral0; /* Backup for roll-backs due to events, used by cost integral */
21: PetscReal ptime0; /* Backup for ts->ptime, the start time of current time step, used by TLM and cost integral */
22: PetscReal time_step0; /* Backup for ts->timestep, the step size of current time step, used by TLM and cost integral*/
24: /* context for sensitivity analysis */
25: PetscInt num_tlm; /* Total number of tangent linear equations */
26: Vec *VecsDeltaLam; /* Increment of the adjoint sensitivity w.r.t IC at stage */
27: Vec *VecsDeltaMu; /* Increment of the adjoint sensitivity w.r.t P at stage */
28: Vec *VecsSensiTemp; /* Vector to be multiplied with Jacobian transpose */
29: Mat MatDeltaFwdSensip; /* Increment of the forward sensitivity at stage */
30: Vec VecDeltaFwdSensipCol; /* Working vector for holding one column of the sensitivity matrix */
31: Mat MatFwdSensip0; /* backup for roll-backs due to events */
32: Mat MatIntegralSensipTemp; /* Working vector for forward integral sensitivity */
33: Mat MatIntegralSensip0; /* backup for roll-backs due to events */
34: Vec *VecsDeltaLam2; /* Increment of the 2nd-order adjoint sensitivity w.r.t IC at stage */
35: Vec *VecsDeltaMu2; /* Increment of the 2nd-order adjoint sensitivity w.r.t P at stage */
36: Vec *VecsSensi2Temp; /* Working vectors that holds the residual for the second-order adjoint */
37: Vec *VecsAffine; /* Working vectors to store residuals */
38: /* context for error estimation */
39: Vec vec_sol_prev;
40: Vec vec_lte_work;
41: } TS_Theta;
43: static PetscErrorCode TSThetaGetX0AndXdot(TS ts,DM dm,Vec *X0,Vec *Xdot)
44: {
45: TS_Theta *th = (TS_Theta*)ts->data;
49: if (X0) {
50: if (dm && dm != ts->dm) {
51: DMGetNamedGlobalVector(dm,"TSTheta_X0",X0);
52: } else *X0 = ts->vec_sol;
53: }
54: if (Xdot) {
55: if (dm && dm != ts->dm) {
56: DMGetNamedGlobalVector(dm,"TSTheta_Xdot",Xdot);
57: } else *Xdot = th->Xdot;
58: }
59: return(0);
60: }
62: static PetscErrorCode TSThetaRestoreX0AndXdot(TS ts,DM dm,Vec *X0,Vec *Xdot)
63: {
67: if (X0) {
68: if (dm && dm != ts->dm) {
69: DMRestoreNamedGlobalVector(dm,"TSTheta_X0",X0);
70: }
71: }
72: if (Xdot) {
73: if (dm && dm != ts->dm) {
74: DMRestoreNamedGlobalVector(dm,"TSTheta_Xdot",Xdot);
75: }
76: }
77: return(0);
78: }
80: static PetscErrorCode DMCoarsenHook_TSTheta(DM fine,DM coarse,void *ctx)
81: {
83: return(0);
84: }
86: static PetscErrorCode DMRestrictHook_TSTheta(DM fine,Mat restrct,Vec rscale,Mat inject,DM coarse,void *ctx)
87: {
88: TS ts = (TS)ctx;
90: Vec X0,Xdot,X0_c,Xdot_c;
93: TSThetaGetX0AndXdot(ts,fine,&X0,&Xdot);
94: TSThetaGetX0AndXdot(ts,coarse,&X0_c,&Xdot_c);
95: MatRestrict(restrct,X0,X0_c);
96: MatRestrict(restrct,Xdot,Xdot_c);
97: VecPointwiseMult(X0_c,rscale,X0_c);
98: VecPointwiseMult(Xdot_c,rscale,Xdot_c);
99: TSThetaRestoreX0AndXdot(ts,fine,&X0,&Xdot);
100: TSThetaRestoreX0AndXdot(ts,coarse,&X0_c,&Xdot_c);
101: return(0);
102: }
104: static PetscErrorCode DMSubDomainHook_TSTheta(DM dm,DM subdm,void *ctx)
105: {
107: return(0);
108: }
110: static PetscErrorCode DMSubDomainRestrictHook_TSTheta(DM dm,VecScatter gscat,VecScatter lscat,DM subdm,void *ctx)
111: {
112: TS ts = (TS)ctx;
114: Vec X0,Xdot,X0_sub,Xdot_sub;
117: TSThetaGetX0AndXdot(ts,dm,&X0,&Xdot);
118: TSThetaGetX0AndXdot(ts,subdm,&X0_sub,&Xdot_sub);
120: VecScatterBegin(gscat,X0,X0_sub,INSERT_VALUES,SCATTER_FORWARD);
121: VecScatterEnd(gscat,X0,X0_sub,INSERT_VALUES,SCATTER_FORWARD);
123: VecScatterBegin(gscat,Xdot,Xdot_sub,INSERT_VALUES,SCATTER_FORWARD);
124: VecScatterEnd(gscat,Xdot,Xdot_sub,INSERT_VALUES,SCATTER_FORWARD);
126: TSThetaRestoreX0AndXdot(ts,dm,&X0,&Xdot);
127: TSThetaRestoreX0AndXdot(ts,subdm,&X0_sub,&Xdot_sub);
128: return(0);
129: }
131: static PetscErrorCode TSThetaEvaluateCostIntegral(TS ts)
132: {
133: TS_Theta *th = (TS_Theta*)ts->data;
134: TS quadts = ts->quadraturets;
138: if (th->endpoint) {
139: /* Evolve ts->vec_costintegral to compute integrals */
140: if (th->Theta!=1.0) {
141: TSComputeRHSFunction(quadts,th->ptime0,th->X0,ts->vec_costintegrand);
142: VecAXPY(quadts->vec_sol,th->time_step0*(1.0-th->Theta),ts->vec_costintegrand);
143: }
144: TSComputeRHSFunction(quadts,ts->ptime,ts->vec_sol,ts->vec_costintegrand);
145: VecAXPY(quadts->vec_sol,th->time_step0*th->Theta,ts->vec_costintegrand);
146: } else {
147: TSComputeRHSFunction(quadts,th->stage_time,th->X,ts->vec_costintegrand);
148: VecAXPY(quadts->vec_sol,th->time_step0,ts->vec_costintegrand);
149: }
150: return(0);
151: }
153: static PetscErrorCode TSForwardCostIntegral_Theta(TS ts)
154: {
155: TS_Theta *th = (TS_Theta*)ts->data;
156: TS quadts = ts->quadraturets;
160: /* backup cost integral */
161: VecCopy(quadts->vec_sol,th->VecCostIntegral0);
162: TSThetaEvaluateCostIntegral(ts);
163: return(0);
164: }
166: static PetscErrorCode TSAdjointCostIntegral_Theta(TS ts)
167: {
168: TS_Theta *th = (TS_Theta*)ts->data;
172: /* Like TSForwardCostIntegral(), the adjoint cost integral evaluation relies on ptime0 and time_step0. */
173: th->ptime0 = ts->ptime + ts->time_step;
174: th->time_step0 = -ts->time_step;
175: TSThetaEvaluateCostIntegral(ts);
176: return(0);
177: }
179: static PetscErrorCode TSTheta_SNESSolve(TS ts,Vec b,Vec x)
180: {
181: PetscInt nits,lits;
185: SNESSolve(ts->snes,b,x);
186: SNESGetIterationNumber(ts->snes,&nits);
187: SNESGetLinearSolveIterations(ts->snes,&lits);
188: ts->snes_its += nits; ts->ksp_its += lits;
189: return(0);
190: }
192: static PetscErrorCode TSStep_Theta(TS ts)
193: {
194: TS_Theta *th = (TS_Theta*)ts->data;
195: PetscInt rejections = 0;
196: PetscBool stageok,accept = PETSC_TRUE;
197: PetscReal next_time_step = ts->time_step;
201: if (!ts->steprollback) {
202: if (th->vec_sol_prev) { VecCopy(th->X0,th->vec_sol_prev); }
203: VecCopy(ts->vec_sol,th->X0);
204: }
206: th->status = TS_STEP_INCOMPLETE;
207: while (!ts->reason && th->status != TS_STEP_COMPLETE) {
208: th->shift = 1/(th->Theta*ts->time_step);
209: th->stage_time = ts->ptime + (th->endpoint ? (PetscReal)1 : th->Theta)*ts->time_step;
210: VecCopy(th->X0,th->X);
211: if (th->extrapolate && !ts->steprestart) {
212: VecAXPY(th->X,1/th->shift,th->Xdot);
213: }
214: if (th->endpoint) { /* This formulation assumes linear time-independent mass matrix */
215: if (!th->affine) {VecDuplicate(ts->vec_sol,&th->affine);}
216: VecZeroEntries(th->Xdot);
217: TSComputeIFunction(ts,ts->ptime,th->X0,th->Xdot,th->affine,PETSC_FALSE);
218: VecScale(th->affine,(th->Theta-1)/th->Theta);
219: } else if (th->affine) { /* Just in case th->endpoint is changed between calls to TSStep_Theta() */
220: VecZeroEntries(th->affine);
221: }
222: TSPreStage(ts,th->stage_time);
223: TSTheta_SNESSolve(ts,th->affine,th->X);
224: TSPostStage(ts,th->stage_time,0,&th->X);
225: TSAdaptCheckStage(ts->adapt,ts,th->stage_time,th->X,&stageok);
226: if (!stageok) goto reject_step;
228: th->status = TS_STEP_PENDING;
229: if (th->endpoint) {
230: VecCopy(th->X,ts->vec_sol);
231: } else {
232: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
233: VecAXPY(ts->vec_sol,ts->time_step,th->Xdot);
234: }
235: TSAdaptChoose(ts->adapt,ts,ts->time_step,NULL,&next_time_step,&accept);
236: th->status = accept ? TS_STEP_COMPLETE : TS_STEP_INCOMPLETE;
237: if (!accept) {
238: VecCopy(th->X0,ts->vec_sol);
239: ts->time_step = next_time_step;
240: goto reject_step;
241: }
243: if (ts->forward_solve || ts->costintegralfwd) { /* Save the info for the later use in cost integral evaluation */
244: th->ptime0 = ts->ptime;
245: th->time_step0 = ts->time_step;
246: }
247: ts->ptime += ts->time_step;
248: ts->time_step = next_time_step;
249: break;
251: reject_step:
252: ts->reject++; accept = PETSC_FALSE;
253: if (!ts->reason && ++rejections > ts->max_reject && ts->max_reject >= 0) {
254: ts->reason = TS_DIVERGED_STEP_REJECTED;
255: PetscInfo2(ts,"Step=%D, step rejections %D greater than current TS allowed, stopping solve\n",ts->steps,rejections);
256: }
257: }
258: return(0);
259: }
261: static PetscErrorCode TSAdjointStepBEuler_Private(TS ts)
262: {
263: TS_Theta *th = (TS_Theta*)ts->data;
264: TS quadts = ts->quadraturets;
265: Vec *VecsDeltaLam = th->VecsDeltaLam,*VecsDeltaMu = th->VecsDeltaMu,*VecsSensiTemp = th->VecsSensiTemp;
266: Vec *VecsDeltaLam2 = th->VecsDeltaLam2,*VecsDeltaMu2 = th->VecsDeltaMu2,*VecsSensi2Temp = th->VecsSensi2Temp;
267: PetscInt nadj;
268: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
269: KSP ksp;
270: PetscScalar *xarr;
271: TSEquationType eqtype;
272: PetscBool isexplicitode = PETSC_FALSE;
273: PetscReal adjoint_time_step;
277: TSGetEquationType(ts,&eqtype);
278: if (eqtype == TS_EQ_ODE_EXPLICIT) {
279: isexplicitode = PETSC_TRUE;
280: VecsDeltaLam = ts->vecs_sensi;
281: VecsDeltaLam2 = ts->vecs_sensi2;
282: }
283: th->status = TS_STEP_INCOMPLETE;
284: SNESGetKSP(ts->snes,&ksp);
285: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
286: if (quadts) {
287: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
288: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
289: }
291: th->stage_time = ts->ptime;
292: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
294: /* Build RHS for first-order adjoint lambda_{n+1}/h + r_u^T(n+1) */
295: if (quadts) {
296: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
297: }
299: for (nadj=0; nadj<ts->numcost; nadj++) {
300: VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);
301: VecScale(VecsSensiTemp[nadj],1./adjoint_time_step); /* lambda_{n+1}/h */
302: if (quadJ) {
303: MatDenseGetColumn(quadJ,nadj,&xarr);
304: VecPlaceArray(ts->vec_drdu_col,xarr);
305: VecAXPY(VecsSensiTemp[nadj],1.,ts->vec_drdu_col);
306: VecResetArray(ts->vec_drdu_col);
307: MatDenseRestoreColumn(quadJ,&xarr);
308: }
309: }
311: /* Build LHS for first-order adjoint */
312: th->shift = 1./adjoint_time_step;
313: TSComputeSNESJacobian(ts,th->X,J,Jpre);
314: KSPSetOperators(ksp,J,Jpre);
316: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
317: for (nadj=0; nadj<ts->numcost; nadj++) {
318: KSPConvergedReason kspreason;
319: KSPSolveTranspose(ksp,VecsSensiTemp[nadj],VecsDeltaLam[nadj]);
320: KSPGetConvergedReason(ksp,&kspreason);
321: if (kspreason < 0) {
322: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
323: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n",ts->steps,nadj);
324: }
325: }
327: if (ts->vecs_sensi2) { /* U_{n+1} */
328: /* Get w1 at t_{n+1} from TLM matrix */
329: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
330: VecPlaceArray(ts->vec_sensip_col,xarr);
331: /* lambda_s^T F_UU w_1 */
332: TSComputeIHessianProductFunctionUU(ts,th->stage_time,th->X,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
333: /* lambda_s^T F_UP w_2 */
334: TSComputeIHessianProductFunctionUP(ts,th->stage_time,th->X,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
335: for (nadj=0; nadj<ts->numcost; nadj++) { /* compute the residual */
336: VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);
337: VecScale(VecsSensi2Temp[nadj],1./adjoint_time_step);
338: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fuu[nadj]);
339: if (ts->vecs_fup) {
340: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fup[nadj]);
341: }
342: }
343: /* Solve stage equation LHS X = RHS for second-order adjoint */
344: for (nadj=0; nadj<ts->numcost; nadj++) {
345: KSPConvergedReason kspreason;
346: KSPSolveTranspose(ksp,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj]);
347: KSPGetConvergedReason(ksp,&kspreason);
348: if (kspreason < 0) {
349: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
350: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n",ts->steps,nadj);
351: }
352: }
353: }
355: /* Update sensitivities, and evaluate integrals if there is any */
356: if (!isexplicitode) {
357: th->shift = 0.0;
358: TSComputeSNESJacobian(ts,th->X,J,Jpre);
359: KSPSetOperators(ksp,J,Jpre);
360: MatScale(J,-1.);
361: for (nadj=0; nadj<ts->numcost; nadj++) {
362: /* Add f_U \lambda_s to the original RHS */
363: MatMultTransposeAdd(J,VecsDeltaLam[nadj],VecsSensiTemp[nadj],VecsSensiTemp[nadj]);
364: VecScale(VecsSensiTemp[nadj],adjoint_time_step);
365: VecCopy(VecsSensiTemp[nadj],ts->vecs_sensi[nadj]);
366: if (ts->vecs_sensi2) {
367: MatMultTransposeAdd(J,VecsDeltaLam2[nadj],VecsSensi2Temp[nadj],VecsSensi2Temp[nadj]);
368: VecScale(VecsSensi2Temp[nadj],adjoint_time_step);
369: VecCopy(VecsSensi2Temp[nadj],ts->vecs_sensi2[nadj]);
370: }
371: }
372: }
373: if (ts->vecs_sensip) {
374: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,1./adjoint_time_step,ts->Jacp,PETSC_FALSE); /* get -f_p */
375: if (quadts) {
376: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
377: }
378: if (ts->vecs_sensi2p) {
379: /* lambda_s^T F_PU w_1 */
380: TSComputeIHessianProductFunctionPU(ts,th->stage_time,th->X,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
381: /* lambda_s^T F_PP w_2 */
382: TSComputeIHessianProductFunctionPP(ts,th->stage_time,th->X,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
383: }
385: for (nadj=0; nadj<ts->numcost; nadj++) {
386: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
387: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step,VecsDeltaMu[nadj]);
388: if (quadJp) {
389: MatDenseGetColumn(quadJp,nadj,&xarr);
390: VecPlaceArray(ts->vec_drdp_col,xarr);
391: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step,ts->vec_drdp_col);
392: VecResetArray(ts->vec_drdp_col);
393: MatDenseRestoreColumn(quadJp,&xarr);
394: }
395: if (ts->vecs_sensi2p) {
396: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
397: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,VecsDeltaMu2[nadj]);
398: if (ts->vecs_fpu) {
399: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,ts->vecs_fpu[nadj]);
400: }
401: if (ts->vecs_fpp) {
402: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,ts->vecs_fpp[nadj]);
403: }
404: }
405: }
406: }
408: if (ts->vecs_sensi2) {
409: VecResetArray(ts->vec_sensip_col);
410: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
411: }
412: th->status = TS_STEP_COMPLETE;
413: return(0);
414: }
416: static PetscErrorCode TSAdjointStep_Theta(TS ts)
417: {
418: TS_Theta *th = (TS_Theta*)ts->data;
419: TS quadts = ts->quadraturets;
420: Vec *VecsDeltaLam = th->VecsDeltaLam,*VecsDeltaMu = th->VecsDeltaMu,*VecsSensiTemp = th->VecsSensiTemp;
421: Vec *VecsDeltaLam2 = th->VecsDeltaLam2,*VecsDeltaMu2 = th->VecsDeltaMu2,*VecsSensi2Temp = th->VecsSensi2Temp;
422: PetscInt nadj;
423: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
424: KSP ksp;
425: PetscScalar *xarr;
426: PetscReal adjoint_time_step;
427: PetscReal adjoint_ptime; /* end time of the adjoint time step (ts->ptime is the start time, ususally ts->ptime is larger than adjoint_ptime) */
431: if (th->Theta == 1.) {
432: TSAdjointStepBEuler_Private(ts);
433: return(0);
434: }
435: th->status = TS_STEP_INCOMPLETE;
436: SNESGetKSP(ts->snes,&ksp);
437: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
438: if (quadts) {
439: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
440: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
441: }
442: /* If endpoint=1, th->ptime and th->X0 will be used; if endpoint=0, th->stage_time and th->X will be used. */
443: th->stage_time = th->endpoint ? ts->ptime : (ts->ptime+(1.-th->Theta)*ts->time_step);
444: adjoint_ptime = ts->ptime + ts->time_step;
445: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
447: /* Build RHS for first-order adjoint */
448: /* Cost function has an integral term */
449: if (quadts) {
450: if (th->endpoint) {
451: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
452: } else {
453: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
454: }
455: }
457: for (nadj=0; nadj<ts->numcost; nadj++) {
458: VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);
459: VecScale(VecsSensiTemp[nadj],1./(th->Theta*adjoint_time_step));
460: if (quadJ) {
461: MatDenseGetColumn(quadJ,nadj,&xarr);
462: VecPlaceArray(ts->vec_drdu_col,xarr);
463: VecAXPY(VecsSensiTemp[nadj],1.,ts->vec_drdu_col);
464: VecResetArray(ts->vec_drdu_col);
465: MatDenseRestoreColumn(quadJ,&xarr);
466: }
467: }
469: /* Build LHS for first-order adjoint */
470: th->shift = 1./(th->Theta*adjoint_time_step);
471: if (th->endpoint) {
472: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
473: } else {
474: TSComputeSNESJacobian(ts,th->X,J,Jpre);
475: }
476: KSPSetOperators(ksp,J,Jpre);
478: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
479: for (nadj=0; nadj<ts->numcost; nadj++) {
480: KSPConvergedReason kspreason;
481: KSPSolveTranspose(ksp,VecsSensiTemp[nadj],VecsDeltaLam[nadj]);
482: KSPGetConvergedReason(ksp,&kspreason);
483: if (kspreason < 0) {
484: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
485: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n",ts->steps,nadj);
486: }
487: }
489: /* Second-order adjoint */
490: if (ts->vecs_sensi2) { /* U_{n+1} */
491: if (!th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"Operation not implemented in TS_Theta");
492: /* Get w1 at t_{n+1} from TLM matrix */
493: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
494: VecPlaceArray(ts->vec_sensip_col,xarr);
495: /* lambda_s^T F_UU w_1 */
496: TSComputeIHessianProductFunctionUU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
497: VecResetArray(ts->vec_sensip_col);
498: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
499: /* lambda_s^T F_UP w_2 */
500: TSComputeIHessianProductFunctionUP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
501: for (nadj=0; nadj<ts->numcost; nadj++) { /* compute the residual */
502: VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);
503: VecScale(VecsSensi2Temp[nadj],th->shift);
504: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fuu[nadj]);
505: if (ts->vecs_fup) {
506: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fup[nadj]);
507: }
508: }
509: /* Solve stage equation LHS X = RHS for second-order adjoint */
510: for (nadj=0; nadj<ts->numcost; nadj++) {
511: KSPConvergedReason kspreason;
512: KSPSolveTranspose(ksp,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj]);
513: KSPGetConvergedReason(ksp,&kspreason);
514: if (kspreason < 0) {
515: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
516: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n",ts->steps,nadj);
517: }
518: }
519: }
521: /* Update sensitivities, and evaluate integrals if there is any */
522: if(th->endpoint) { /* two-stage Theta methods with th->Theta!=1, th->Theta==1 leads to BEuler */
523: th->shift = 1./((th->Theta-1.)*adjoint_time_step);
524: th->stage_time = adjoint_ptime;
525: TSComputeSNESJacobian(ts,th->X0,J,Jpre);
526: KSPSetOperators(ksp,J,Jpre);
527: /* R_U at t_n */
528: if (quadts) {
529: TSComputeRHSJacobian(quadts,adjoint_ptime,th->X0,quadJ,NULL);
530: }
531: for (nadj=0; nadj<ts->numcost; nadj++) {
532: MatMultTranspose(J,VecsDeltaLam[nadj],ts->vecs_sensi[nadj]);
533: if (quadJ) {
534: MatDenseGetColumn(quadJ,nadj,&xarr);
535: VecPlaceArray(ts->vec_drdu_col,xarr);
536: VecAXPY(ts->vecs_sensi[nadj],-1.,ts->vec_drdu_col);
537: VecResetArray(ts->vec_drdu_col);
538: MatDenseRestoreColumn(quadJ,&xarr);
539: }
540: VecScale(ts->vecs_sensi[nadj],1./th->shift);
541: }
543: /* Second-order adjoint */
544: if (ts->vecs_sensi2) { /* U_n */
545: /* Get w1 at t_n from TLM matrix */
546: MatDenseGetColumn(th->MatFwdSensip0,0,&xarr);
547: VecPlaceArray(ts->vec_sensip_col,xarr);
548: /* lambda_s^T F_UU w_1 */
549: TSComputeIHessianProductFunctionUU(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
550: VecResetArray(ts->vec_sensip_col);
551: MatDenseRestoreColumn(th->MatFwdSensip0,&xarr);
552: /* lambda_s^T F_UU w_2 */
553: TSComputeIHessianProductFunctionUP(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
554: for (nadj=0; nadj<ts->numcost; nadj++) {
555: /* M^T Lambda_s + h(1-theta) F_U^T Lambda_s + h(1-theta) lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2 */
556: MatMultTranspose(J,VecsDeltaLam2[nadj],ts->vecs_sensi2[nadj]);
557: VecAXPY(ts->vecs_sensi2[nadj],1.,ts->vecs_fuu[nadj]);
558: if (ts->vecs_fup) {
559: VecAXPY(ts->vecs_sensi2[nadj],1.,ts->vecs_fup[nadj]);
560: }
561: VecScale(ts->vecs_sensi2[nadj],1./th->shift);
562: }
563: }
565: th->stage_time = ts->ptime; /* recover the old value */
567: if (ts->vecs_sensip) { /* sensitivities wrt parameters */
568: /* U_{n+1} */
569: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,-1./(th->Theta*adjoint_time_step),ts->Jacp,PETSC_FALSE);
570: if (quadts) {
571: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
572: }
573: for (nadj=0; nadj<ts->numcost; nadj++) {
574: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
575: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step*th->Theta,VecsDeltaMu[nadj]);
576: if (quadJp) {
577: MatDenseGetColumn(quadJp,nadj,&xarr);
578: VecPlaceArray(ts->vec_drdp_col,xarr);
579: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step*th->Theta,ts->vec_drdp_col);
580: VecResetArray(ts->vec_drdp_col);
581: MatDenseRestoreColumn(quadJp,&xarr);
582: }
583: }
584: if (ts->vecs_sensi2p) { /* second-order */
585: /* Get w1 at t_{n+1} from TLM matrix */
586: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
587: VecPlaceArray(ts->vec_sensip_col,xarr);
588: /* lambda_s^T F_PU w_1 */
589: TSComputeIHessianProductFunctionPU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
590: VecResetArray(ts->vec_sensip_col);
591: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
593: /* lambda_s^T F_PP w_2 */
594: TSComputeIHessianProductFunctionPP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
595: for (nadj=0; nadj<ts->numcost; nadj++) {
596: /* Mu2 <- Mu2 + h theta F_P^T Lambda_s + h theta (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
597: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
598: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,VecsDeltaMu2[nadj]);
599: if (ts->vecs_fpu) {
600: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,ts->vecs_fpu[nadj]);
601: }
602: if (ts->vecs_fpp) {
603: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,ts->vecs_fpp[nadj]);
604: }
605: }
606: }
608: /* U_s */
609: TSComputeIJacobianP(ts,adjoint_ptime,th->X0,th->Xdot,1./((th->Theta-1.0)*adjoint_time_step),ts->Jacp,PETSC_FALSE);
610: if (quadts) {
611: TSComputeRHSJacobianP(quadts,adjoint_ptime,th->X0,quadJp);
612: }
613: for (nadj=0; nadj<ts->numcost; nadj++) {
614: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
615: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step*(1.0-th->Theta),VecsDeltaMu[nadj]);
616: if (quadJp) {
617: MatDenseGetColumn(quadJp,nadj,&xarr);
618: VecPlaceArray(ts->vec_drdp_col,xarr);
619: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step*(1.0-th->Theta),ts->vec_drdp_col);
620: VecResetArray(ts->vec_drdp_col);
621: MatDenseRestoreColumn(quadJp,&xarr);
622: }
623: if (ts->vecs_sensi2p) { /* second-order */
624: /* Get w1 at t_n from TLM matrix */
625: MatDenseGetColumn(th->MatFwdSensip0,0,&xarr);
626: VecPlaceArray(ts->vec_sensip_col,xarr);
627: /* lambda_s^T F_PU w_1 */
628: TSComputeIHessianProductFunctionPU(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
629: VecResetArray(ts->vec_sensip_col);
630: MatDenseRestoreColumn(th->MatFwdSensip0,&xarr);
631: /* lambda_s^T F_PP w_2 */
632: TSComputeIHessianProductFunctionPP(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
633: for (nadj=0; nadj<ts->numcost; nadj++) {
634: /* Mu2 <- Mu2 + h(1-theta) F_P^T Lambda_s + h(1-theta) (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
635: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
636: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),VecsDeltaMu2[nadj]);
637: if (ts->vecs_fpu) {
638: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),ts->vecs_fpu[nadj]);
639: }
640: if (ts->vecs_fpp) {
641: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),ts->vecs_fpp[nadj]);
642: }
643: }
644: }
645: }
646: }
647: } else { /* one-stage case */
648: th->shift = 0.0;
649: TSComputeSNESJacobian(ts,th->X,J,Jpre); /* get -f_y */
650: KSPSetOperators(ksp,J,Jpre);
651: if (quadts) {
652: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
653: }
654: for (nadj=0; nadj<ts->numcost; nadj++) {
655: MatMultTranspose(J,VecsDeltaLam[nadj],VecsSensiTemp[nadj]);
656: VecAXPY(ts->vecs_sensi[nadj],-adjoint_time_step,VecsSensiTemp[nadj]);
657: if (quadJ) {
658: MatDenseGetColumn(quadJ,nadj,&xarr);
659: VecPlaceArray(ts->vec_drdu_col,xarr);
660: VecAXPY(ts->vecs_sensi[nadj],adjoint_time_step,ts->vec_drdu_col);
661: VecResetArray(ts->vec_drdu_col);
662: MatDenseRestoreColumn(quadJ,&xarr);
663: }
664: }
665: if (ts->vecs_sensip) {
666: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
667: if (quadts) {
668: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
669: }
670: for (nadj=0; nadj<ts->numcost; nadj++) {
671: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
672: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step,VecsDeltaMu[nadj]);
673: if (quadJp) {
674: MatDenseGetColumn(quadJp,nadj,&xarr);
675: VecPlaceArray(ts->vec_drdp_col,xarr);
676: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step,ts->vec_drdp_col);
677: VecResetArray(ts->vec_drdp_col);
678: MatDenseRestoreColumn(quadJp,&xarr);
679: }
680: }
681: }
682: }
684: th->status = TS_STEP_COMPLETE;
685: return(0);
686: }
688: static PetscErrorCode TSInterpolate_Theta(TS ts,PetscReal t,Vec X)
689: {
690: TS_Theta *th = (TS_Theta*)ts->data;
691: PetscReal dt = t - ts->ptime;
695: VecCopy(ts->vec_sol,th->X);
696: if (th->endpoint) dt *= th->Theta;
697: VecWAXPY(X,dt,th->Xdot,th->X);
698: return(0);
699: }
701: static PetscErrorCode TSEvaluateWLTE_Theta(TS ts,NormType wnormtype,PetscInt *order,PetscReal *wlte)
702: {
703: TS_Theta *th = (TS_Theta*)ts->data;
704: Vec X = ts->vec_sol; /* X = solution */
705: Vec Y = th->vec_lte_work; /* Y = X + LTE */
706: PetscReal wltea,wlter;
710: if (!th->vec_sol_prev) {*wlte = -1; return(0);}
711: /* Cannot compute LTE in first step or in restart after event */
712: if (ts->steprestart) {*wlte = -1; return(0);}
713: /* Compute LTE using backward differences with non-constant time step */
714: {
715: PetscReal h = ts->time_step, h_prev = ts->ptime - ts->ptime_prev;
716: PetscReal a = 1 + h_prev/h;
717: PetscScalar scal[3]; Vec vecs[3];
718: scal[0] = +1/a; scal[1] = -1/(a-1); scal[2] = +1/(a*(a-1));
719: vecs[0] = X; vecs[1] = th->X0; vecs[2] = th->vec_sol_prev;
720: VecCopy(X,Y);
721: VecMAXPY(Y,3,scal,vecs);
722: TSErrorWeightedNorm(ts,X,Y,wnormtype,wlte,&wltea,&wlter);
723: }
724: if (order) *order = 2;
725: return(0);
726: }
728: static PetscErrorCode TSRollBack_Theta(TS ts)
729: {
730: TS_Theta *th = (TS_Theta*)ts->data;
731: TS quadts = ts->quadraturets;
735: VecCopy(th->X0,ts->vec_sol);
736: if (quadts && ts->costintegralfwd) {
737: VecCopy(th->VecCostIntegral0,quadts->vec_sol);
738: }
739: th->status = TS_STEP_INCOMPLETE;
740: if (ts->mat_sensip) {
741: MatCopy(th->MatFwdSensip0,ts->mat_sensip,SAME_NONZERO_PATTERN);
742: }
743: if (quadts && quadts->mat_sensip) {
744: MatCopy(th->MatIntegralSensip0,quadts->mat_sensip,SAME_NONZERO_PATTERN);
745: }
746: return(0);
747: }
749: static PetscErrorCode TSForwardStep_Theta(TS ts)
750: {
751: TS_Theta *th = (TS_Theta*)ts->data;
752: TS quadts = ts->quadraturets;
753: Mat MatDeltaFwdSensip = th->MatDeltaFwdSensip;
754: Vec VecDeltaFwdSensipCol = th->VecDeltaFwdSensipCol;
755: PetscInt ntlm;
756: KSP ksp;
757: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
758: PetscScalar *barr,*xarr;
759: PetscReal previous_shift;
763: previous_shift = th->shift;
764: MatCopy(ts->mat_sensip,th->MatFwdSensip0,SAME_NONZERO_PATTERN);
766: if (quadts && quadts->mat_sensip) {
767: MatCopy(quadts->mat_sensip,th->MatIntegralSensip0,SAME_NONZERO_PATTERN);
768: }
769: SNESGetKSP(ts->snes,&ksp);
770: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
771: if (quadts) {
772: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
773: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
774: }
776: /* Build RHS */
777: if (th->endpoint) { /* 2-stage method*/
778: th->shift = 1./((th->Theta-1.)*th->time_step0);
779: TSComputeIJacobian(ts,th->ptime0,th->X0,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
780: MatMatMult(J,ts->mat_sensip,MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatDeltaFwdSensip);
781: MatScale(MatDeltaFwdSensip,(th->Theta-1.)/th->Theta);
783: /* Add the f_p forcing terms */
784: if (ts->Jacp) {
785: TSComputeIJacobianP(ts,th->ptime0,th->X0,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
786: MatAXPY(MatDeltaFwdSensip,(th->Theta-1.)/th->Theta,ts->Jacp,SUBSET_NONZERO_PATTERN);
787: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
788: MatAXPY(MatDeltaFwdSensip,-1.,ts->Jacp,SUBSET_NONZERO_PATTERN);
789: }
790: } else { /* 1-stage method */
791: th->shift = 0.0;
792: TSComputeIJacobian(ts,th->stage_time,th->X,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
793: MatMatMult(J,ts->mat_sensip,MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatDeltaFwdSensip);
794: MatScale(MatDeltaFwdSensip,-1.);
796: /* Add the f_p forcing terms */
797: if (ts->Jacp) {
798: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
799: MatAXPY(MatDeltaFwdSensip,-1.,ts->Jacp,SUBSET_NONZERO_PATTERN);
800: }
801: }
803: /* Build LHS */
804: th->shift = previous_shift; /* recover the previous shift used in TSStep_Theta() */
805: if (th->endpoint) {
806: TSComputeIJacobian(ts,th->stage_time,ts->vec_sol,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
807: } else {
808: TSComputeIJacobian(ts,th->stage_time,th->X,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
809: }
810: KSPSetOperators(ksp,J,Jpre);
812: /*
813: Evaluate the first stage of integral gradients with the 2-stage method:
814: drdu|t_n*S(t_n) + drdp|t_n
815: This is done before the linear solve because the sensitivity variable S(t_n) will be propagated to S(t_{n+1})
816: */
817: if (th->endpoint) { /* 2-stage method only */
818: if (quadts && quadts->mat_sensip) {
819: TSComputeRHSJacobian(quadts,th->ptime0,th->X0,quadJ,NULL);
820: TSComputeRHSJacobianP(quadts,th->ptime0,th->X0,quadJp);
821: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
822: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
823: MatAXPY(quadts->mat_sensip,th->time_step0*(1.-th->Theta),th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
824: }
825: }
827: /* Solve the tangent linear equation for forward sensitivities to parameters */
828: for (ntlm=0; ntlm<th->num_tlm; ntlm++) {
829: KSPConvergedReason kspreason;
830: MatDenseGetColumn(MatDeltaFwdSensip,ntlm,&barr);
831: VecPlaceArray(VecDeltaFwdSensipCol,barr);
832: if (th->endpoint) {
833: MatDenseGetColumn(ts->mat_sensip,ntlm,&xarr);
834: VecPlaceArray(ts->vec_sensip_col,xarr);
835: KSPSolve(ksp,VecDeltaFwdSensipCol,ts->vec_sensip_col);
836: VecResetArray(ts->vec_sensip_col);
837: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
838: } else {
839: KSPSolve(ksp,VecDeltaFwdSensipCol,VecDeltaFwdSensipCol);
840: }
841: KSPGetConvergedReason(ksp,&kspreason);
842: if (kspreason < 0) {
843: ts->reason = TSFORWARD_DIVERGED_LINEAR_SOLVE;
844: PetscInfo2(ts,"Step=%D, %Dth tangent linear solve, linear solve fails, stopping tangent linear solve\n",ts->steps,ntlm);
845: }
846: VecResetArray(VecDeltaFwdSensipCol);
847: MatDenseRestoreColumn(MatDeltaFwdSensip,&barr);
848: }
850: /*
851: Evaluate the second stage of integral gradients with the 2-stage method:
852: drdu|t_{n+1}*S(t_{n+1}) + drdp|t_{n+1}
853: */
854: if (quadts && quadts->mat_sensip) {
855: if (!th->endpoint) {
856: MatAXPY(ts->mat_sensip,1,MatDeltaFwdSensip,SAME_NONZERO_PATTERN); /* stage sensitivity */
857: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
858: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
859: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
860: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
861: MatAXPY(quadts->mat_sensip,th->time_step0,th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
862: MatAXPY(ts->mat_sensip,(1.-th->Theta)/th->Theta,MatDeltaFwdSensip,SAME_NONZERO_PATTERN);
863: } else {
864: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
865: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
866: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
867: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
868: MatAXPY(quadts->mat_sensip,th->time_step0*th->Theta,th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
869: }
870: } else {
871: if (!th->endpoint) {
872: MatAXPY(ts->mat_sensip,1./th->Theta,MatDeltaFwdSensip,SAME_NONZERO_PATTERN);
873: }
874: }
875: return(0);
876: }
878: static PetscErrorCode TSForwardGetStages_Theta(TS ts,PetscInt *ns,Mat **stagesensip)
879: {
880: TS_Theta *th = (TS_Theta*)ts->data;
883: if (ns) *ns = 1;
884: if (stagesensip) *stagesensip = th->endpoint ? &(th->MatFwdSensip0) : &(th->MatDeltaFwdSensip);
885: return(0);
886: }
888: /*------------------------------------------------------------*/
889: static PetscErrorCode TSReset_Theta(TS ts)
890: {
891: TS_Theta *th = (TS_Theta*)ts->data;
895: VecDestroy(&th->X);
896: VecDestroy(&th->Xdot);
897: VecDestroy(&th->X0);
898: VecDestroy(&th->affine);
900: VecDestroy(&th->vec_sol_prev);
901: VecDestroy(&th->vec_lte_work);
903: VecDestroy(&th->VecCostIntegral0);
904: return(0);
905: }
907: static PetscErrorCode TSAdjointReset_Theta(TS ts)
908: {
909: TS_Theta *th = (TS_Theta*)ts->data;
913: VecDestroyVecs(ts->numcost,&th->VecsDeltaLam);
914: VecDestroyVecs(ts->numcost,&th->VecsDeltaMu);
915: VecDestroyVecs(ts->numcost,&th->VecsDeltaLam2);
916: VecDestroyVecs(ts->numcost,&th->VecsDeltaMu2);
917: VecDestroyVecs(ts->numcost,&th->VecsSensiTemp);
918: VecDestroyVecs(ts->numcost,&th->VecsSensi2Temp);
919: return(0);
920: }
922: static PetscErrorCode TSDestroy_Theta(TS ts)
923: {
927: TSReset_Theta(ts);
928: if (ts->dm) {
929: DMCoarsenHookRemove(ts->dm,DMCoarsenHook_TSTheta,DMRestrictHook_TSTheta,ts);
930: DMSubDomainHookRemove(ts->dm,DMSubDomainHook_TSTheta,DMSubDomainRestrictHook_TSTheta,ts);
931: }
932: PetscFree(ts->data);
933: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetTheta_C",NULL);
934: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetTheta_C",NULL);
935: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetEndpoint_C",NULL);
936: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetEndpoint_C",NULL);
937: return(0);
938: }
940: /*
941: This defines the nonlinear equation that is to be solved with SNES
942: G(U) = F[t0+Theta*dt, U, (U-U0)*shift] = 0
944: Note that U here is the stage argument. This means that U = U_{n+1} only if endpoint = true,
945: otherwise U = theta U_{n+1} + (1 - theta) U0, which for the case of implicit midpoint is
946: U = (U_{n+1} + U0)/2
947: */
948: static PetscErrorCode SNESTSFormFunction_Theta(SNES snes,Vec x,Vec y,TS ts)
949: {
950: TS_Theta *th = (TS_Theta*)ts->data;
952: Vec X0,Xdot;
953: DM dm,dmsave;
954: PetscReal shift = th->shift;
957: SNESGetDM(snes,&dm);
958: /* When using the endpoint variant, this is actually 1/Theta * Xdot */
959: TSThetaGetX0AndXdot(ts,dm,&X0,&Xdot);
960: if (x != X0) {
961: VecAXPBYPCZ(Xdot,-shift,shift,0,X0,x);
962: } else {
963: VecZeroEntries(Xdot);
964: }
965: /* DM monkey-business allows user code to call TSGetDM() inside of functions evaluated on levels of FAS */
966: dmsave = ts->dm;
967: ts->dm = dm;
968: TSComputeIFunction(ts,th->stage_time,x,Xdot,y,PETSC_FALSE);
969: ts->dm = dmsave;
970: TSThetaRestoreX0AndXdot(ts,dm,&X0,&Xdot);
971: return(0);
972: }
974: static PetscErrorCode SNESTSFormJacobian_Theta(SNES snes,Vec x,Mat A,Mat B,TS ts)
975: {
976: TS_Theta *th = (TS_Theta*)ts->data;
978: Vec Xdot;
979: DM dm,dmsave;
980: PetscReal shift = th->shift;
983: SNESGetDM(snes,&dm);
984: /* Xdot has already been computed in SNESTSFormFunction_Theta (SNES guarantees this) */
985: TSThetaGetX0AndXdot(ts,dm,NULL,&Xdot);
987: dmsave = ts->dm;
988: ts->dm = dm;
989: TSComputeIJacobian(ts,th->stage_time,x,Xdot,shift,A,B,PETSC_FALSE);
990: ts->dm = dmsave;
991: TSThetaRestoreX0AndXdot(ts,dm,NULL,&Xdot);
992: return(0);
993: }
995: static PetscErrorCode TSForwardSetUp_Theta(TS ts)
996: {
997: TS_Theta *th = (TS_Theta*)ts->data;
998: TS quadts = ts->quadraturets;
1002: /* combine sensitivities to parameters and sensitivities to initial values into one array */
1003: th->num_tlm = ts->num_parameters;
1004: MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatDeltaFwdSensip);
1005: if (quadts && quadts->mat_sensip) {
1006: MatDuplicate(quadts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatIntegralSensipTemp);
1007: MatDuplicate(quadts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatIntegralSensip0);
1008: }
1009: /* backup sensitivity results for roll-backs */
1010: MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatFwdSensip0);
1012: VecDuplicate(ts->vec_sol,&th->VecDeltaFwdSensipCol);
1013: return(0);
1014: }
1016: static PetscErrorCode TSForwardReset_Theta(TS ts)
1017: {
1018: TS_Theta *th = (TS_Theta*)ts->data;
1019: TS quadts = ts->quadraturets;
1023: if (quadts && quadts->mat_sensip) {
1024: MatDestroy(&th->MatIntegralSensipTemp);
1025: MatDestroy(&th->MatIntegralSensip0);
1026: }
1027: VecDestroy(&th->VecDeltaFwdSensipCol);
1028: MatDestroy(&th->MatDeltaFwdSensip);
1029: MatDestroy(&th->MatFwdSensip0);
1030: return(0);
1031: }
1033: static PetscErrorCode TSSetUp_Theta(TS ts)
1034: {
1035: TS_Theta *th = (TS_Theta*)ts->data;
1036: TS quadts = ts->quadraturets;
1037: PetscBool match;
1041: if (!th->VecCostIntegral0 && quadts && ts->costintegralfwd) { /* back up cost integral */
1042: VecDuplicate(quadts->vec_sol,&th->VecCostIntegral0);
1043: }
1044: if (!th->X) {
1045: VecDuplicate(ts->vec_sol,&th->X);
1046: }
1047: if (!th->Xdot) {
1048: VecDuplicate(ts->vec_sol,&th->Xdot);
1049: }
1050: if (!th->X0) {
1051: VecDuplicate(ts->vec_sol,&th->X0);
1052: }
1053: if (th->endpoint) {
1054: VecDuplicate(ts->vec_sol,&th->affine);
1055: }
1057: th->order = (th->Theta == 0.5) ? 2 : 1;
1058: th->shift = 1/(th->Theta*ts->time_step);
1060: TSGetDM(ts,&ts->dm);
1061: DMCoarsenHookAdd(ts->dm,DMCoarsenHook_TSTheta,DMRestrictHook_TSTheta,ts);
1062: DMSubDomainHookAdd(ts->dm,DMSubDomainHook_TSTheta,DMSubDomainRestrictHook_TSTheta,ts);
1064: TSGetAdapt(ts,&ts->adapt);
1065: TSAdaptCandidatesClear(ts->adapt);
1066: PetscObjectTypeCompare((PetscObject)ts->adapt,TSADAPTNONE,&match);
1067: if (!match) {
1068: VecDuplicate(ts->vec_sol,&th->vec_sol_prev);
1069: VecDuplicate(ts->vec_sol,&th->vec_lte_work);
1070: }
1071: TSGetSNES(ts,&ts->snes);
1072: return(0);
1073: }
1075: /*------------------------------------------------------------*/
1077: static PetscErrorCode TSAdjointSetUp_Theta(TS ts)
1078: {
1079: TS_Theta *th = (TS_Theta*)ts->data;
1083: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsDeltaLam);
1084: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsSensiTemp);
1085: if (ts->vecs_sensip) {
1086: VecDuplicateVecs(ts->vecs_sensip[0],ts->numcost,&th->VecsDeltaMu);
1087: }
1088: if (ts->vecs_sensi2) {
1089: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsDeltaLam2);
1090: VecDuplicateVecs(ts->vecs_sensi2[0],ts->numcost,&th->VecsSensi2Temp);
1091: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1092: if (!ts->ihessianproduct_fuu) ts->vecs_fuu = ts->vecs_guu;
1093: if (!ts->ihessianproduct_fup) ts->vecs_fup = ts->vecs_gup;
1094: }
1095: if (ts->vecs_sensi2p) {
1096: VecDuplicateVecs(ts->vecs_sensi2p[0],ts->numcost,&th->VecsDeltaMu2);
1097: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1098: if (!ts->ihessianproduct_fpu) ts->vecs_fpu = ts->vecs_gpu;
1099: if (!ts->ihessianproduct_fpp) ts->vecs_fpp = ts->vecs_gpp;
1100: }
1101: return(0);
1102: }
1104: static PetscErrorCode TSSetFromOptions_Theta(PetscOptionItems *PetscOptionsObject,TS ts)
1105: {
1106: TS_Theta *th = (TS_Theta*)ts->data;
1110: PetscOptionsHead(PetscOptionsObject,"Theta ODE solver options");
1111: {
1112: PetscOptionsReal("-ts_theta_theta","Location of stage (0<Theta<=1)","TSThetaSetTheta",th->Theta,&th->Theta,NULL);
1113: PetscOptionsBool("-ts_theta_endpoint","Use the endpoint instead of midpoint form of the Theta method","TSThetaSetEndpoint",th->endpoint,&th->endpoint,NULL);
1114: PetscOptionsBool("-ts_theta_initial_guess_extrapolate","Extrapolate stage initial guess from previous solution (sometimes unstable)","TSThetaSetExtrapolate",th->extrapolate,&th->extrapolate,NULL);
1115: }
1116: PetscOptionsTail();
1117: return(0);
1118: }
1120: static PetscErrorCode TSView_Theta(TS ts,PetscViewer viewer)
1121: {
1122: TS_Theta *th = (TS_Theta*)ts->data;
1123: PetscBool iascii;
1127: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1128: if (iascii) {
1129: PetscViewerASCIIPrintf(viewer," Theta=%g\n",(double)th->Theta);
1130: PetscViewerASCIIPrintf(viewer," Extrapolation=%s\n",th->extrapolate ? "yes" : "no");
1131: }
1132: return(0);
1133: }
1135: static PetscErrorCode TSThetaGetTheta_Theta(TS ts,PetscReal *theta)
1136: {
1137: TS_Theta *th = (TS_Theta*)ts->data;
1140: *theta = th->Theta;
1141: return(0);
1142: }
1144: static PetscErrorCode TSThetaSetTheta_Theta(TS ts,PetscReal theta)
1145: {
1146: TS_Theta *th = (TS_Theta*)ts->data;
1149: if (theta <= 0 || 1 < theta) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_OUTOFRANGE,"Theta %g not in range (0,1]",(double)theta);
1150: th->Theta = theta;
1151: th->order = (th->Theta == 0.5) ? 2 : 1;
1152: return(0);
1153: }
1155: static PetscErrorCode TSThetaGetEndpoint_Theta(TS ts,PetscBool *endpoint)
1156: {
1157: TS_Theta *th = (TS_Theta*)ts->data;
1160: *endpoint = th->endpoint;
1161: return(0);
1162: }
1164: static PetscErrorCode TSThetaSetEndpoint_Theta(TS ts,PetscBool flg)
1165: {
1166: TS_Theta *th = (TS_Theta*)ts->data;
1169: th->endpoint = flg;
1170: return(0);
1171: }
1173: #if defined(PETSC_HAVE_COMPLEX)
1174: static PetscErrorCode TSComputeLinearStability_Theta(TS ts,PetscReal xr,PetscReal xi,PetscReal *yr,PetscReal *yi)
1175: {
1176: PetscComplex z = xr + xi*PETSC_i,f;
1177: TS_Theta *th = (TS_Theta*)ts->data;
1178: const PetscReal one = 1.0;
1181: f = (one + (one - th->Theta)*z)/(one - th->Theta*z);
1182: *yr = PetscRealPartComplex(f);
1183: *yi = PetscImaginaryPartComplex(f);
1184: return(0);
1185: }
1186: #endif
1188: static PetscErrorCode TSGetStages_Theta(TS ts,PetscInt *ns,Vec **Y)
1189: {
1190: TS_Theta *th = (TS_Theta*)ts->data;
1193: if (ns) *ns = 1;
1194: if (Y) *Y = th->endpoint ? &(th->X0) : &(th->X);
1195: return(0);
1196: }
1198: /* ------------------------------------------------------------ */
1199: /*MC
1200: TSTHETA - DAE solver using the implicit Theta method
1202: Level: beginner
1204: Options Database:
1205: + -ts_theta_theta <Theta> - Location of stage (0<Theta<=1)
1206: . -ts_theta_endpoint <flag> - Use the endpoint (like Crank-Nicholson) instead of midpoint form of the Theta method
1207: - -ts_theta_initial_guess_extrapolate <flg> - Extrapolate stage initial guess from previous solution (sometimes unstable)
1209: Notes:
1210: $ -ts_type theta -ts_theta_theta 1.0 corresponds to backward Euler (TSBEULER)
1211: $ -ts_type theta -ts_theta_theta 0.5 corresponds to the implicit midpoint rule
1212: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint corresponds to Crank-Nicholson (TSCN)
1214: This method can be applied to DAE.
1216: This method is cast as a 1-stage implicit Runge-Kutta method.
1218: .vb
1219: Theta | Theta
1220: -------------
1221: | 1
1222: .ve
1224: For the default Theta=0.5, this is also known as the implicit midpoint rule.
1226: When the endpoint variant is chosen, the method becomes a 2-stage method with first stage explicit:
1228: .vb
1229: 0 | 0 0
1230: 1 | 1-Theta Theta
1231: -------------------
1232: | 1-Theta Theta
1233: .ve
1235: For the default Theta=0.5, this is the trapezoid rule (also known as Crank-Nicolson, see TSCN).
1237: To apply a diagonally implicit RK method to DAE, the stage formula
1239: $ Y_i = X + h sum_j a_ij Y'_j
1241: is interpreted as a formula for Y'_i in terms of Y_i and known values (Y'_j, j<i)
1243: .seealso: TSCreate(), TS, TSSetType(), TSCN, TSBEULER, TSThetaSetTheta(), TSThetaSetEndpoint()
1245: M*/
1246: PETSC_EXTERN PetscErrorCode TSCreate_Theta(TS ts)
1247: {
1248: TS_Theta *th;
1252: ts->ops->reset = TSReset_Theta;
1253: ts->ops->adjointreset = TSAdjointReset_Theta;
1254: ts->ops->destroy = TSDestroy_Theta;
1255: ts->ops->view = TSView_Theta;
1256: ts->ops->setup = TSSetUp_Theta;
1257: ts->ops->adjointsetup = TSAdjointSetUp_Theta;
1258: ts->ops->adjointreset = TSAdjointReset_Theta;
1259: ts->ops->step = TSStep_Theta;
1260: ts->ops->interpolate = TSInterpolate_Theta;
1261: ts->ops->evaluatewlte = TSEvaluateWLTE_Theta;
1262: ts->ops->rollback = TSRollBack_Theta;
1263: ts->ops->setfromoptions = TSSetFromOptions_Theta;
1264: ts->ops->snesfunction = SNESTSFormFunction_Theta;
1265: ts->ops->snesjacobian = SNESTSFormJacobian_Theta;
1266: #if defined(PETSC_HAVE_COMPLEX)
1267: ts->ops->linearstability = TSComputeLinearStability_Theta;
1268: #endif
1269: ts->ops->getstages = TSGetStages_Theta;
1270: ts->ops->adjointstep = TSAdjointStep_Theta;
1271: ts->ops->adjointintegral = TSAdjointCostIntegral_Theta;
1272: ts->ops->forwardintegral = TSForwardCostIntegral_Theta;
1273: ts->default_adapt_type = TSADAPTNONE;
1275: ts->ops->forwardsetup = TSForwardSetUp_Theta;
1276: ts->ops->forwardreset = TSForwardReset_Theta;
1277: ts->ops->forwardstep = TSForwardStep_Theta;
1278: ts->ops->forwardgetstages = TSForwardGetStages_Theta;
1280: ts->usessnes = PETSC_TRUE;
1282: PetscNewLog(ts,&th);
1283: ts->data = (void*)th;
1285: th->VecsDeltaLam = NULL;
1286: th->VecsDeltaMu = NULL;
1287: th->VecsSensiTemp = NULL;
1288: th->VecsSensi2Temp = NULL;
1290: th->extrapolate = PETSC_FALSE;
1291: th->Theta = 0.5;
1292: th->order = 2;
1293: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetTheta_C",TSThetaGetTheta_Theta);
1294: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetTheta_C",TSThetaSetTheta_Theta);
1295: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetEndpoint_C",TSThetaGetEndpoint_Theta);
1296: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetEndpoint_C",TSThetaSetEndpoint_Theta);
1297: return(0);
1298: }
1300: /*@
1301: TSThetaGetTheta - Get the abscissa of the stage in (0,1].
1303: Not Collective
1305: Input Parameter:
1306: . ts - timestepping context
1308: Output Parameter:
1309: . theta - stage abscissa
1311: Note:
1312: Use of this function is normally only required to hack TSTHETA to use a modified integration scheme.
1314: Level: Advanced
1316: .seealso: TSThetaSetTheta()
1317: @*/
1318: PetscErrorCode TSThetaGetTheta(TS ts,PetscReal *theta)
1319: {
1325: PetscUseMethod(ts,"TSThetaGetTheta_C",(TS,PetscReal*),(ts,theta));
1326: return(0);
1327: }
1329: /*@
1330: TSThetaSetTheta - Set the abscissa of the stage in (0,1].
1332: Not Collective
1334: Input Parameter:
1335: + ts - timestepping context
1336: - theta - stage abscissa
1338: Options Database:
1339: . -ts_theta_theta <theta>
1341: Level: Intermediate
1343: .seealso: TSThetaGetTheta()
1344: @*/
1345: PetscErrorCode TSThetaSetTheta(TS ts,PetscReal theta)
1346: {
1351: PetscTryMethod(ts,"TSThetaSetTheta_C",(TS,PetscReal),(ts,theta));
1352: return(0);
1353: }
1355: /*@
1356: TSThetaGetEndpoint - Gets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule).
1358: Not Collective
1360: Input Parameter:
1361: . ts - timestepping context
1363: Output Parameter:
1364: . endpoint - PETSC_TRUE when using the endpoint variant
1366: Level: Advanced
1368: .seealso: TSThetaSetEndpoint(), TSTHETA, TSCN
1369: @*/
1370: PetscErrorCode TSThetaGetEndpoint(TS ts,PetscBool *endpoint)
1371: {
1377: PetscUseMethod(ts,"TSThetaGetEndpoint_C",(TS,PetscBool*),(ts,endpoint));
1378: return(0);
1379: }
1381: /*@
1382: TSThetaSetEndpoint - Sets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule).
1384: Not Collective
1386: Input Parameter:
1387: + ts - timestepping context
1388: - flg - PETSC_TRUE to use the endpoint variant
1390: Options Database:
1391: . -ts_theta_endpoint <flg>
1393: Level: Intermediate
1395: .seealso: TSTHETA, TSCN
1396: @*/
1397: PetscErrorCode TSThetaSetEndpoint(TS ts,PetscBool flg)
1398: {
1403: PetscTryMethod(ts,"TSThetaSetEndpoint_C",(TS,PetscBool),(ts,flg));
1404: return(0);
1405: }
1407: /*
1408: * TSBEULER and TSCN are straightforward specializations of TSTHETA.
1409: * The creation functions for these specializations are below.
1410: */
1412: static PetscErrorCode TSSetUp_BEuler(TS ts)
1413: {
1414: TS_Theta *th = (TS_Theta*)ts->data;
1418: if (th->Theta != 1.0) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change the default value (1) of theta when using backward Euler\n");
1419: if (th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change to the endpoint form of the Theta methods when using backward Euler\n");
1420: TSSetUp_Theta(ts);
1421: return(0);
1422: }
1424: static PetscErrorCode TSView_BEuler(TS ts,PetscViewer viewer)
1425: {
1427: return(0);
1428: }
1430: /*MC
1431: TSBEULER - ODE solver using the implicit backward Euler method
1433: Level: beginner
1435: Notes:
1436: TSBEULER is equivalent to TSTHETA with Theta=1.0
1438: $ -ts_type theta -ts_theta_theta 1.0
1440: .seealso: TSCreate(), TS, TSSetType(), TSEULER, TSCN, TSTHETA
1442: M*/
1443: PETSC_EXTERN PetscErrorCode TSCreate_BEuler(TS ts)
1444: {
1448: TSCreate_Theta(ts);
1449: TSThetaSetTheta(ts,1.0);
1450: TSThetaSetEndpoint(ts,PETSC_FALSE);
1451: ts->ops->setup = TSSetUp_BEuler;
1452: ts->ops->view = TSView_BEuler;
1453: return(0);
1454: }
1456: static PetscErrorCode TSSetUp_CN(TS ts)
1457: {
1458: TS_Theta *th = (TS_Theta*)ts->data;
1462: if (th->Theta != 0.5) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change the default value (0.5) of theta when using Crank-Nicolson\n");
1463: if (!th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change to the midpoint form of the Theta methods when using Crank-Nicolson\n");
1464: TSSetUp_Theta(ts);
1465: return(0);
1466: }
1468: static PetscErrorCode TSView_CN(TS ts,PetscViewer viewer)
1469: {
1471: return(0);
1472: }
1474: /*MC
1475: TSCN - ODE solver using the implicit Crank-Nicolson method.
1477: Level: beginner
1479: Notes:
1480: TSCN is equivalent to TSTHETA with Theta=0.5 and the "endpoint" option set. I.e.
1482: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint
1484: .seealso: TSCreate(), TS, TSSetType(), TSBEULER, TSTHETA
1486: M*/
1487: PETSC_EXTERN PetscErrorCode TSCreate_CN(TS ts)
1488: {
1492: TSCreate_Theta(ts);
1493: TSThetaSetTheta(ts,0.5);
1494: TSThetaSetEndpoint(ts,PETSC_TRUE);
1495: ts->ops->setup = TSSetUp_CN;
1496: ts->ops->view = TSView_CN;
1497: return(0);
1498: }