Actual source code: theta.c
1: /*
2: Code for timestepping with implicit Theta method
3: */
4: #include <petsc/private/tsimpl.h>
5: #include <petscsnes.h>
6: #include <petscdm.h>
7: #include <petscmat.h>
9: typedef struct {
10: /* context for time stepping */
11: PetscReal stage_time;
12: Vec X0,X,Xdot; /* Storage for stage solution, u^n + dt a_{11} k_1, and time derivative u^{n+1}_t */
13: Vec affine; /* Affine vector needed for residual at beginning of step in endpoint formulation */
14: PetscReal Theta;
15: PetscReal shift; /* Shift parameter for SNES Jacobian, used by forward, TLM and adjoint */
16: PetscInt order;
17: PetscBool endpoint;
18: PetscBool extrapolate;
19: TSStepStatus status;
20: Vec VecCostIntegral0; /* Backup for roll-backs due to events, used by cost integral */
21: PetscReal ptime0; /* Backup for ts->ptime, the start time of current time step, used by TLM and cost integral */
22: PetscReal time_step0; /* Backup for ts->timestep, the step size of current time step, used by TLM and cost integral*/
24: /* context for sensitivity analysis */
25: PetscInt num_tlm; /* Total number of tangent linear equations */
26: Vec *VecsDeltaLam; /* Increment of the adjoint sensitivity w.r.t IC at stage */
27: Vec *VecsDeltaMu; /* Increment of the adjoint sensitivity w.r.t P at stage */
28: Vec *VecsSensiTemp; /* Vector to be multiplied with Jacobian transpose */
29: Mat MatDeltaFwdSensip; /* Increment of the forward sensitivity at stage */
30: Vec VecDeltaFwdSensipCol; /* Working vector for holding one column of the sensitivity matrix */
31: Mat MatFwdSensip0; /* backup for roll-backs due to events */
32: Mat MatIntegralSensipTemp; /* Working vector for forward integral sensitivity */
33: Mat MatIntegralSensip0; /* backup for roll-backs due to events */
34: Vec *VecsDeltaLam2; /* Increment of the 2nd-order adjoint sensitivity w.r.t IC at stage */
35: Vec *VecsDeltaMu2; /* Increment of the 2nd-order adjoint sensitivity w.r.t P at stage */
36: Vec *VecsSensi2Temp; /* Working vectors that holds the residual for the second-order adjoint */
37: Vec *VecsAffine; /* Working vectors to store residuals */
38: /* context for error estimation */
39: Vec vec_sol_prev;
40: Vec vec_lte_work;
41: } TS_Theta;
43: static PetscErrorCode TSThetaGetX0AndXdot(TS ts,DM dm,Vec *X0,Vec *Xdot)
44: {
45: TS_Theta *th = (TS_Theta*)ts->data;
49: if (X0) {
50: if (dm && dm != ts->dm) {
51: DMGetNamedGlobalVector(dm,"TSTheta_X0",X0);
52: } else *X0 = ts->vec_sol;
53: }
54: if (Xdot) {
55: if (dm && dm != ts->dm) {
56: DMGetNamedGlobalVector(dm,"TSTheta_Xdot",Xdot);
57: } else *Xdot = th->Xdot;
58: }
59: return(0);
60: }
62: static PetscErrorCode TSThetaRestoreX0AndXdot(TS ts,DM dm,Vec *X0,Vec *Xdot)
63: {
67: if (X0) {
68: if (dm && dm != ts->dm) {
69: DMRestoreNamedGlobalVector(dm,"TSTheta_X0",X0);
70: }
71: }
72: if (Xdot) {
73: if (dm && dm != ts->dm) {
74: DMRestoreNamedGlobalVector(dm,"TSTheta_Xdot",Xdot);
75: }
76: }
77: return(0);
78: }
80: static PetscErrorCode DMCoarsenHook_TSTheta(DM fine,DM coarse,void *ctx)
81: {
83: return(0);
84: }
86: static PetscErrorCode DMRestrictHook_TSTheta(DM fine,Mat restrct,Vec rscale,Mat inject,DM coarse,void *ctx)
87: {
88: TS ts = (TS)ctx;
90: Vec X0,Xdot,X0_c,Xdot_c;
93: TSThetaGetX0AndXdot(ts,fine,&X0,&Xdot);
94: TSThetaGetX0AndXdot(ts,coarse,&X0_c,&Xdot_c);
95: MatRestrict(restrct,X0,X0_c);
96: MatRestrict(restrct,Xdot,Xdot_c);
97: VecPointwiseMult(X0_c,rscale,X0_c);
98: VecPointwiseMult(Xdot_c,rscale,Xdot_c);
99: TSThetaRestoreX0AndXdot(ts,fine,&X0,&Xdot);
100: TSThetaRestoreX0AndXdot(ts,coarse,&X0_c,&Xdot_c);
101: return(0);
102: }
104: static PetscErrorCode DMSubDomainHook_TSTheta(DM dm,DM subdm,void *ctx)
105: {
107: return(0);
108: }
110: static PetscErrorCode DMSubDomainRestrictHook_TSTheta(DM dm,VecScatter gscat,VecScatter lscat,DM subdm,void *ctx)
111: {
112: TS ts = (TS)ctx;
114: Vec X0,Xdot,X0_sub,Xdot_sub;
117: TSThetaGetX0AndXdot(ts,dm,&X0,&Xdot);
118: TSThetaGetX0AndXdot(ts,subdm,&X0_sub,&Xdot_sub);
120: VecScatterBegin(gscat,X0,X0_sub,INSERT_VALUES,SCATTER_FORWARD);
121: VecScatterEnd(gscat,X0,X0_sub,INSERT_VALUES,SCATTER_FORWARD);
123: VecScatterBegin(gscat,Xdot,Xdot_sub,INSERT_VALUES,SCATTER_FORWARD);
124: VecScatterEnd(gscat,Xdot,Xdot_sub,INSERT_VALUES,SCATTER_FORWARD);
126: TSThetaRestoreX0AndXdot(ts,dm,&X0,&Xdot);
127: TSThetaRestoreX0AndXdot(ts,subdm,&X0_sub,&Xdot_sub);
128: return(0);
129: }
131: static PetscErrorCode TSThetaEvaluateCostIntegral(TS ts)
132: {
133: TS_Theta *th = (TS_Theta*)ts->data;
134: TS quadts = ts->quadraturets;
138: if (th->endpoint) {
139: /* Evolve ts->vec_costintegral to compute integrals */
140: if (th->Theta!=1.0) {
141: TSComputeRHSFunction(quadts,th->ptime0,th->X0,ts->vec_costintegrand);
142: VecAXPY(quadts->vec_sol,th->time_step0*(1.0-th->Theta),ts->vec_costintegrand);
143: }
144: TSComputeRHSFunction(quadts,ts->ptime,ts->vec_sol,ts->vec_costintegrand);
145: VecAXPY(quadts->vec_sol,th->time_step0*th->Theta,ts->vec_costintegrand);
146: } else {
147: TSComputeRHSFunction(quadts,th->stage_time,th->X,ts->vec_costintegrand);
148: VecAXPY(quadts->vec_sol,th->time_step0,ts->vec_costintegrand);
149: }
150: return(0);
151: }
153: static PetscErrorCode TSForwardCostIntegral_Theta(TS ts)
154: {
155: TS_Theta *th = (TS_Theta*)ts->data;
156: TS quadts = ts->quadraturets;
160: /* backup cost integral */
161: VecCopy(quadts->vec_sol,th->VecCostIntegral0);
162: TSThetaEvaluateCostIntegral(ts);
163: return(0);
164: }
166: static PetscErrorCode TSAdjointCostIntegral_Theta(TS ts)
167: {
168: TS_Theta *th = (TS_Theta*)ts->data;
172: /* Like TSForwardCostIntegral(), the adjoint cost integral evaluation relies on ptime0 and time_step0. */
173: th->ptime0 = ts->ptime + ts->time_step;
174: th->time_step0 = -ts->time_step;
175: TSThetaEvaluateCostIntegral(ts);
176: return(0);
177: }
179: static PetscErrorCode TSTheta_SNESSolve(TS ts,Vec b,Vec x)
180: {
181: PetscInt nits,lits;
185: SNESSolve(ts->snes,b,x);
186: SNESGetIterationNumber(ts->snes,&nits);
187: SNESGetLinearSolveIterations(ts->snes,&lits);
188: ts->snes_its += nits; ts->ksp_its += lits;
189: return(0);
190: }
192: static PetscErrorCode TSStep_Theta(TS ts)
193: {
194: TS_Theta *th = (TS_Theta*)ts->data;
195: PetscInt rejections = 0;
196: PetscBool stageok,accept = PETSC_TRUE;
197: PetscReal next_time_step = ts->time_step;
201: if (!ts->steprollback) {
202: if (th->vec_sol_prev) { VecCopy(th->X0,th->vec_sol_prev); }
203: VecCopy(ts->vec_sol,th->X0);
204: }
206: th->status = TS_STEP_INCOMPLETE;
207: while (!ts->reason && th->status != TS_STEP_COMPLETE) {
208: th->shift = 1/(th->Theta*ts->time_step);
209: th->stage_time = ts->ptime + (th->endpoint ? (PetscReal)1 : th->Theta)*ts->time_step;
210: VecCopy(th->X0,th->X);
211: if (th->extrapolate && !ts->steprestart) {
212: VecAXPY(th->X,1/th->shift,th->Xdot);
213: }
214: if (th->endpoint) { /* This formulation assumes linear time-independent mass matrix */
215: if (!th->affine) {VecDuplicate(ts->vec_sol,&th->affine);}
216: VecZeroEntries(th->Xdot);
217: TSComputeIFunction(ts,ts->ptime,th->X0,th->Xdot,th->affine,PETSC_FALSE);
218: VecScale(th->affine,(th->Theta-1)/th->Theta);
219: } else if (th->affine) { /* Just in case th->endpoint is changed between calls to TSStep_Theta() */
220: VecZeroEntries(th->affine);
221: }
222: TSPreStage(ts,th->stage_time);
223: TSTheta_SNESSolve(ts,th->affine,th->X);
224: TSPostStage(ts,th->stage_time,0,&th->X);
225: TSAdaptCheckStage(ts->adapt,ts,th->stage_time,th->X,&stageok);
226: if (!stageok) goto reject_step;
228: th->status = TS_STEP_PENDING;
229: if (th->endpoint) {
230: VecCopy(th->X,ts->vec_sol);
231: } else {
232: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
233: VecAXPY(ts->vec_sol,ts->time_step,th->Xdot);
234: }
235: TSAdaptChoose(ts->adapt,ts,ts->time_step,NULL,&next_time_step,&accept);
236: th->status = accept ? TS_STEP_COMPLETE : TS_STEP_INCOMPLETE;
237: if (!accept) {
238: VecCopy(th->X0,ts->vec_sol);
239: ts->time_step = next_time_step;
240: goto reject_step;
241: }
243: if (ts->forward_solve || ts->costintegralfwd) { /* Save the info for the later use in cost integral evaluation */
244: th->ptime0 = ts->ptime;
245: th->time_step0 = ts->time_step;
246: }
247: ts->ptime += ts->time_step;
248: ts->time_step = next_time_step;
249: break;
251: reject_step:
252: ts->reject++; accept = PETSC_FALSE;
253: if (!ts->reason && ++rejections > ts->max_reject && ts->max_reject >= 0) {
254: ts->reason = TS_DIVERGED_STEP_REJECTED;
255: PetscInfo2(ts,"Step=%D, step rejections %D greater than current TS allowed, stopping solve\n",ts->steps,rejections);
256: }
257: }
258: return(0);
259: }
261: static PetscErrorCode TSAdjointStepBEuler_Private(TS ts)
262: {
263: TS_Theta *th = (TS_Theta*)ts->data;
264: TS quadts = ts->quadraturets;
265: Vec *VecsDeltaLam = th->VecsDeltaLam,*VecsDeltaMu = th->VecsDeltaMu,*VecsSensiTemp = th->VecsSensiTemp;
266: Vec *VecsDeltaLam2 = th->VecsDeltaLam2,*VecsDeltaMu2 = th->VecsDeltaMu2,*VecsSensi2Temp = th->VecsSensi2Temp;
267: PetscInt nadj;
268: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
269: KSP ksp;
270: PetscScalar *xarr;
271: TSEquationType eqtype;
272: PetscBool isexplicitode = PETSC_FALSE;
273: PetscReal adjoint_time_step;
277: TSGetEquationType(ts,&eqtype);
278: if (eqtype == TS_EQ_ODE_EXPLICIT) {
279: isexplicitode = PETSC_TRUE;
280: VecsDeltaLam = ts->vecs_sensi;
281: VecsDeltaLam2 = ts->vecs_sensi2;
282: }
283: th->status = TS_STEP_INCOMPLETE;
284: SNESGetKSP(ts->snes,&ksp);
285: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
286: if (quadts) {
287: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
288: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
289: }
291: th->stage_time = ts->ptime;
292: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
294: /* Build RHS for first-order adjoint lambda_{n+1}/h + r_u^T(n+1) */
295: if (quadts) {
296: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
297: }
299: for (nadj=0; nadj<ts->numcost; nadj++) {
300: VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);
301: VecScale(VecsSensiTemp[nadj],1./adjoint_time_step); /* lambda_{n+1}/h */
302: if (quadJ) {
303: MatDenseGetColumn(quadJ,nadj,&xarr);
304: VecPlaceArray(ts->vec_drdu_col,xarr);
305: VecAXPY(VecsSensiTemp[nadj],1.,ts->vec_drdu_col);
306: VecResetArray(ts->vec_drdu_col);
307: MatDenseRestoreColumn(quadJ,&xarr);
308: }
309: }
311: /* Build LHS for first-order adjoint */
312: th->shift = 1./adjoint_time_step;
313: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
314: KSPSetOperators(ksp,J,Jpre);
316: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
317: for (nadj=0; nadj<ts->numcost; nadj++) {
318: KSPConvergedReason kspreason;
319: KSPSolveTranspose(ksp,VecsSensiTemp[nadj],VecsDeltaLam[nadj]);
320: KSPGetConvergedReason(ksp,&kspreason);
321: if (kspreason < 0) {
322: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
323: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n",ts->steps,nadj);
324: }
325: }
327: if (ts->vecs_sensi2) { /* U_{n+1} */
328: /* Get w1 at t_{n+1} from TLM matrix */
329: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
330: VecPlaceArray(ts->vec_sensip_col,xarr);
331: /* lambda_s^T F_UU w_1 */
332: TSComputeIHessianProductFunctionUU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
333: /* lambda_s^T F_UP w_2 */
334: TSComputeIHessianProductFunctionUP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
335: for (nadj=0; nadj<ts->numcost; nadj++) { /* compute the residual */
336: VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);
337: VecScale(VecsSensi2Temp[nadj],1./adjoint_time_step);
338: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fuu[nadj]);
339: if (ts->vecs_fup) {
340: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fup[nadj]);
341: }
342: }
343: /* Solve stage equation LHS X = RHS for second-order adjoint */
344: for (nadj=0; nadj<ts->numcost; nadj++) {
345: KSPConvergedReason kspreason;
346: KSPSolveTranspose(ksp,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj]);
347: KSPGetConvergedReason(ksp,&kspreason);
348: if (kspreason < 0) {
349: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
350: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n",ts->steps,nadj);
351: }
352: }
353: }
355: /* Update sensitivities, and evaluate integrals if there is any */
356: if (!isexplicitode) {
357: th->shift = 0.0;
358: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
359: KSPSetOperators(ksp,J,Jpre);
360: MatScale(J,-1.);
361: for (nadj=0; nadj<ts->numcost; nadj++) {
362: /* Add f_U \lambda_s to the original RHS */
363: MatMultTransposeAdd(J,VecsDeltaLam[nadj],VecsSensiTemp[nadj],VecsSensiTemp[nadj]);
364: VecScale(VecsSensiTemp[nadj],adjoint_time_step);
365: VecCopy(VecsSensiTemp[nadj],ts->vecs_sensi[nadj]);
366: if (ts->vecs_sensi2) {
367: MatMultTransposeAdd(J,VecsDeltaLam2[nadj],VecsSensi2Temp[nadj],VecsSensi2Temp[nadj]);
368: VecScale(VecsSensi2Temp[nadj],adjoint_time_step);
369: VecCopy(VecsSensi2Temp[nadj],ts->vecs_sensi2[nadj]);
370: }
371: }
372: }
373: if (ts->vecs_sensip) {
374: VecAXPBYPCZ(th->Xdot,-1./adjoint_time_step,1.0/adjoint_time_step,0,th->X0,ts->vec_sol);
375: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,1./adjoint_time_step,ts->Jacp,PETSC_FALSE); /* get -f_p */
376: if (quadts) {
377: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
378: }
379: if (ts->vecs_sensi2p) {
380: /* lambda_s^T F_PU w_1 */
381: TSComputeIHessianProductFunctionPU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
382: /* lambda_s^T F_PP w_2 */
383: TSComputeIHessianProductFunctionPP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
384: }
386: for (nadj=0; nadj<ts->numcost; nadj++) {
387: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
388: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step,VecsDeltaMu[nadj]);
389: if (quadJp) {
390: MatDenseGetColumn(quadJp,nadj,&xarr);
391: VecPlaceArray(ts->vec_drdp_col,xarr);
392: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step,ts->vec_drdp_col);
393: VecResetArray(ts->vec_drdp_col);
394: MatDenseRestoreColumn(quadJp,&xarr);
395: }
396: if (ts->vecs_sensi2p) {
397: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
398: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,VecsDeltaMu2[nadj]);
399: if (ts->vecs_fpu) {
400: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,ts->vecs_fpu[nadj]);
401: }
402: if (ts->vecs_fpp) {
403: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,ts->vecs_fpp[nadj]);
404: }
405: }
406: }
407: }
409: if (ts->vecs_sensi2) {
410: VecResetArray(ts->vec_sensip_col);
411: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
412: }
413: th->status = TS_STEP_COMPLETE;
414: return(0);
415: }
417: static PetscErrorCode TSAdjointStep_Theta(TS ts)
418: {
419: TS_Theta *th = (TS_Theta*)ts->data;
420: TS quadts = ts->quadraturets;
421: Vec *VecsDeltaLam = th->VecsDeltaLam,*VecsDeltaMu = th->VecsDeltaMu,*VecsSensiTemp = th->VecsSensiTemp;
422: Vec *VecsDeltaLam2 = th->VecsDeltaLam2,*VecsDeltaMu2 = th->VecsDeltaMu2,*VecsSensi2Temp = th->VecsSensi2Temp;
423: PetscInt nadj;
424: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
425: KSP ksp;
426: PetscScalar *xarr;
427: PetscReal adjoint_time_step;
428: PetscReal adjoint_ptime; /* end time of the adjoint time step (ts->ptime is the start time, ususally ts->ptime is larger than adjoint_ptime) */
432: if (th->Theta == 1.) {
433: TSAdjointStepBEuler_Private(ts);
434: return(0);
435: }
436: th->status = TS_STEP_INCOMPLETE;
437: SNESGetKSP(ts->snes,&ksp);
438: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
439: if (quadts) {
440: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
441: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
442: }
443: /* If endpoint=1, th->ptime and th->X0 will be used; if endpoint=0, th->stage_time and th->X will be used. */
444: th->stage_time = th->endpoint ? ts->ptime : (ts->ptime+(1.-th->Theta)*ts->time_step);
445: adjoint_ptime = ts->ptime + ts->time_step;
446: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
448: if (!th->endpoint) {
449: /* recover th->X0 using vec_sol and the stage value th->X */
450: VecAXPBYPCZ(th->X0,1.0/(1.0-th->Theta),th->Theta/(th->Theta-1.0),0,th->X,ts->vec_sol);
451: }
453: /* Build RHS for first-order adjoint */
454: /* Cost function has an integral term */
455: if (quadts) {
456: if (th->endpoint) {
457: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
458: } else {
459: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
460: }
461: }
463: for (nadj=0; nadj<ts->numcost; nadj++) {
464: VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);
465: VecScale(VecsSensiTemp[nadj],1./(th->Theta*adjoint_time_step));
466: if (quadJ) {
467: MatDenseGetColumn(quadJ,nadj,&xarr);
468: VecPlaceArray(ts->vec_drdu_col,xarr);
469: VecAXPY(VecsSensiTemp[nadj],1.,ts->vec_drdu_col);
470: VecResetArray(ts->vec_drdu_col);
471: MatDenseRestoreColumn(quadJ,&xarr);
472: }
473: }
475: /* Build LHS for first-order adjoint */
476: th->shift = 1./(th->Theta*adjoint_time_step);
477: if (th->endpoint) {
478: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
479: } else {
480: TSComputeSNESJacobian(ts,th->X,J,Jpre);
481: }
482: KSPSetOperators(ksp,J,Jpre);
484: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
485: for (nadj=0; nadj<ts->numcost; nadj++) {
486: KSPConvergedReason kspreason;
487: KSPSolveTranspose(ksp,VecsSensiTemp[nadj],VecsDeltaLam[nadj]);
488: KSPGetConvergedReason(ksp,&kspreason);
489: if (kspreason < 0) {
490: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
491: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n",ts->steps,nadj);
492: }
493: }
495: /* Second-order adjoint */
496: if (ts->vecs_sensi2) { /* U_{n+1} */
497: if (!th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"Operation not implemented in TS_Theta");
498: /* Get w1 at t_{n+1} from TLM matrix */
499: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
500: VecPlaceArray(ts->vec_sensip_col,xarr);
501: /* lambda_s^T F_UU w_1 */
502: TSComputeIHessianProductFunctionUU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
503: VecResetArray(ts->vec_sensip_col);
504: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
505: /* lambda_s^T F_UP w_2 */
506: TSComputeIHessianProductFunctionUP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
507: for (nadj=0; nadj<ts->numcost; nadj++) { /* compute the residual */
508: VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);
509: VecScale(VecsSensi2Temp[nadj],th->shift);
510: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fuu[nadj]);
511: if (ts->vecs_fup) {
512: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fup[nadj]);
513: }
514: }
515: /* Solve stage equation LHS X = RHS for second-order adjoint */
516: for (nadj=0; nadj<ts->numcost; nadj++) {
517: KSPConvergedReason kspreason;
518: KSPSolveTranspose(ksp,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj]);
519: KSPGetConvergedReason(ksp,&kspreason);
520: if (kspreason < 0) {
521: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
522: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n",ts->steps,nadj);
523: }
524: }
525: }
527: /* Update sensitivities, and evaluate integrals if there is any */
528: if (th->endpoint) { /* two-stage Theta methods with th->Theta!=1, th->Theta==1 leads to BEuler */
529: th->shift = 1./((th->Theta-1.)*adjoint_time_step);
530: th->stage_time = adjoint_ptime;
531: TSComputeSNESJacobian(ts,th->X0,J,Jpre);
532: KSPSetOperators(ksp,J,Jpre);
533: /* R_U at t_n */
534: if (quadts) {
535: TSComputeRHSJacobian(quadts,adjoint_ptime,th->X0,quadJ,NULL);
536: }
537: for (nadj=0; nadj<ts->numcost; nadj++) {
538: MatMultTranspose(J,VecsDeltaLam[nadj],ts->vecs_sensi[nadj]);
539: if (quadJ) {
540: MatDenseGetColumn(quadJ,nadj,&xarr);
541: VecPlaceArray(ts->vec_drdu_col,xarr);
542: VecAXPY(ts->vecs_sensi[nadj],-1.,ts->vec_drdu_col);
543: VecResetArray(ts->vec_drdu_col);
544: MatDenseRestoreColumn(quadJ,&xarr);
545: }
546: VecScale(ts->vecs_sensi[nadj],1./th->shift);
547: }
549: /* Second-order adjoint */
550: if (ts->vecs_sensi2) { /* U_n */
551: /* Get w1 at t_n from TLM matrix */
552: MatDenseGetColumn(th->MatFwdSensip0,0,&xarr);
553: VecPlaceArray(ts->vec_sensip_col,xarr);
554: /* lambda_s^T F_UU w_1 */
555: TSComputeIHessianProductFunctionUU(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
556: VecResetArray(ts->vec_sensip_col);
557: MatDenseRestoreColumn(th->MatFwdSensip0,&xarr);
558: /* lambda_s^T F_UU w_2 */
559: TSComputeIHessianProductFunctionUP(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
560: for (nadj=0; nadj<ts->numcost; nadj++) {
561: /* M^T Lambda_s + h(1-theta) F_U^T Lambda_s + h(1-theta) lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2 */
562: MatMultTranspose(J,VecsDeltaLam2[nadj],ts->vecs_sensi2[nadj]);
563: VecAXPY(ts->vecs_sensi2[nadj],1.,ts->vecs_fuu[nadj]);
564: if (ts->vecs_fup) {
565: VecAXPY(ts->vecs_sensi2[nadj],1.,ts->vecs_fup[nadj]);
566: }
567: VecScale(ts->vecs_sensi2[nadj],1./th->shift);
568: }
569: }
571: th->stage_time = ts->ptime; /* recover the old value */
573: if (ts->vecs_sensip) { /* sensitivities wrt parameters */
574: /* U_{n+1} */
575: th->shift = 1.0/(adjoint_time_step*th->Theta);
576: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,ts->vec_sol);
577: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,-1./(th->Theta*adjoint_time_step),ts->Jacp,PETSC_FALSE);
578: if (quadts) {
579: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
580: }
581: for (nadj=0; nadj<ts->numcost; nadj++) {
582: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
583: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step*th->Theta,VecsDeltaMu[nadj]);
584: if (quadJp) {
585: MatDenseGetColumn(quadJp,nadj,&xarr);
586: VecPlaceArray(ts->vec_drdp_col,xarr);
587: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step*th->Theta,ts->vec_drdp_col);
588: VecResetArray(ts->vec_drdp_col);
589: MatDenseRestoreColumn(quadJp,&xarr);
590: }
591: }
592: if (ts->vecs_sensi2p) { /* second-order */
593: /* Get w1 at t_{n+1} from TLM matrix */
594: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
595: VecPlaceArray(ts->vec_sensip_col,xarr);
596: /* lambda_s^T F_PU w_1 */
597: TSComputeIHessianProductFunctionPU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
598: VecResetArray(ts->vec_sensip_col);
599: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
601: /* lambda_s^T F_PP w_2 */
602: TSComputeIHessianProductFunctionPP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
603: for (nadj=0; nadj<ts->numcost; nadj++) {
604: /* Mu2 <- Mu2 + h theta F_P^T Lambda_s + h theta (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
605: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
606: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,VecsDeltaMu2[nadj]);
607: if (ts->vecs_fpu) {
608: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,ts->vecs_fpu[nadj]);
609: }
610: if (ts->vecs_fpp) {
611: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,ts->vecs_fpp[nadj]);
612: }
613: }
614: }
616: /* U_s */
617: VecZeroEntries(th->Xdot);
618: TSComputeIJacobianP(ts,adjoint_ptime,th->X0,th->Xdot,1./((th->Theta-1.0)*adjoint_time_step),ts->Jacp,PETSC_FALSE);
619: if (quadts) {
620: TSComputeRHSJacobianP(quadts,adjoint_ptime,th->X0,quadJp);
621: }
622: for (nadj=0; nadj<ts->numcost; nadj++) {
623: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
624: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step*(1.0-th->Theta),VecsDeltaMu[nadj]);
625: if (quadJp) {
626: MatDenseGetColumn(quadJp,nadj,&xarr);
627: VecPlaceArray(ts->vec_drdp_col,xarr);
628: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step*(1.0-th->Theta),ts->vec_drdp_col);
629: VecResetArray(ts->vec_drdp_col);
630: MatDenseRestoreColumn(quadJp,&xarr);
631: }
632: if (ts->vecs_sensi2p) { /* second-order */
633: /* Get w1 at t_n from TLM matrix */
634: MatDenseGetColumn(th->MatFwdSensip0,0,&xarr);
635: VecPlaceArray(ts->vec_sensip_col,xarr);
636: /* lambda_s^T F_PU w_1 */
637: TSComputeIHessianProductFunctionPU(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
638: VecResetArray(ts->vec_sensip_col);
639: MatDenseRestoreColumn(th->MatFwdSensip0,&xarr);
640: /* lambda_s^T F_PP w_2 */
641: TSComputeIHessianProductFunctionPP(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
642: for (nadj=0; nadj<ts->numcost; nadj++) {
643: /* Mu2 <- Mu2 + h(1-theta) F_P^T Lambda_s + h(1-theta) (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
644: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
645: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),VecsDeltaMu2[nadj]);
646: if (ts->vecs_fpu) {
647: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),ts->vecs_fpu[nadj]);
648: }
649: if (ts->vecs_fpp) {
650: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),ts->vecs_fpp[nadj]);
651: }
652: }
653: }
654: }
655: }
656: } else { /* one-stage case */
657: th->shift = 0.0;
658: TSComputeSNESJacobian(ts,th->X,J,Jpre); /* get -f_y */
659: KSPSetOperators(ksp,J,Jpre);
660: if (quadts) {
661: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
662: }
663: for (nadj=0; nadj<ts->numcost; nadj++) {
664: MatMultTranspose(J,VecsDeltaLam[nadj],VecsSensiTemp[nadj]);
665: VecAXPY(ts->vecs_sensi[nadj],-adjoint_time_step,VecsSensiTemp[nadj]);
666: if (quadJ) {
667: MatDenseGetColumn(quadJ,nadj,&xarr);
668: VecPlaceArray(ts->vec_drdu_col,xarr);
669: VecAXPY(ts->vecs_sensi[nadj],adjoint_time_step,ts->vec_drdu_col);
670: VecResetArray(ts->vec_drdu_col);
671: MatDenseRestoreColumn(quadJ,&xarr);
672: }
673: }
674: if (ts->vecs_sensip) {
675: th->shift = 1.0/(adjoint_time_step*th->Theta);
676: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
677: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
678: if (quadts) {
679: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
680: }
681: for (nadj=0; nadj<ts->numcost; nadj++) {
682: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
683: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step,VecsDeltaMu[nadj]);
684: if (quadJp) {
685: MatDenseGetColumn(quadJp,nadj,&xarr);
686: VecPlaceArray(ts->vec_drdp_col,xarr);
687: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step,ts->vec_drdp_col);
688: VecResetArray(ts->vec_drdp_col);
689: MatDenseRestoreColumn(quadJp,&xarr);
690: }
691: }
692: }
693: }
695: th->status = TS_STEP_COMPLETE;
696: return(0);
697: }
699: static PetscErrorCode TSInterpolate_Theta(TS ts,PetscReal t,Vec X)
700: {
701: TS_Theta *th = (TS_Theta*)ts->data;
702: PetscReal dt = t - ts->ptime;
706: VecCopy(ts->vec_sol,th->X);
707: if (th->endpoint) dt *= th->Theta;
708: VecWAXPY(X,dt,th->Xdot,th->X);
709: return(0);
710: }
712: static PetscErrorCode TSEvaluateWLTE_Theta(TS ts,NormType wnormtype,PetscInt *order,PetscReal *wlte)
713: {
714: TS_Theta *th = (TS_Theta*)ts->data;
715: Vec X = ts->vec_sol; /* X = solution */
716: Vec Y = th->vec_lte_work; /* Y = X + LTE */
717: PetscReal wltea,wlter;
721: if (!th->vec_sol_prev) {*wlte = -1; return(0);}
722: /* Cannot compute LTE in first step or in restart after event */
723: if (ts->steprestart) {*wlte = -1; return(0);}
724: /* Compute LTE using backward differences with non-constant time step */
725: {
726: PetscReal h = ts->time_step, h_prev = ts->ptime - ts->ptime_prev;
727: PetscReal a = 1 + h_prev/h;
728: PetscScalar scal[3]; Vec vecs[3];
729: scal[0] = +1/a; scal[1] = -1/(a-1); scal[2] = +1/(a*(a-1));
730: vecs[0] = X; vecs[1] = th->X0; vecs[2] = th->vec_sol_prev;
731: VecCopy(X,Y);
732: VecMAXPY(Y,3,scal,vecs);
733: TSErrorWeightedNorm(ts,X,Y,wnormtype,wlte,&wltea,&wlter);
734: }
735: if (order) *order = 2;
736: return(0);
737: }
739: static PetscErrorCode TSRollBack_Theta(TS ts)
740: {
741: TS_Theta *th = (TS_Theta*)ts->data;
742: TS quadts = ts->quadraturets;
746: VecCopy(th->X0,ts->vec_sol);
747: if (quadts && ts->costintegralfwd) {
748: VecCopy(th->VecCostIntegral0,quadts->vec_sol);
749: }
750: th->status = TS_STEP_INCOMPLETE;
751: if (ts->mat_sensip) {
752: MatCopy(th->MatFwdSensip0,ts->mat_sensip,SAME_NONZERO_PATTERN);
753: }
754: if (quadts && quadts->mat_sensip) {
755: MatCopy(th->MatIntegralSensip0,quadts->mat_sensip,SAME_NONZERO_PATTERN);
756: }
757: return(0);
758: }
760: static PetscErrorCode TSForwardStep_Theta(TS ts)
761: {
762: TS_Theta *th = (TS_Theta*)ts->data;
763: TS quadts = ts->quadraturets;
764: Mat MatDeltaFwdSensip = th->MatDeltaFwdSensip;
765: Vec VecDeltaFwdSensipCol = th->VecDeltaFwdSensipCol;
766: PetscInt ntlm;
767: KSP ksp;
768: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
769: PetscScalar *barr,*xarr;
770: PetscReal previous_shift;
774: previous_shift = th->shift;
775: MatCopy(ts->mat_sensip,th->MatFwdSensip0,SAME_NONZERO_PATTERN);
777: if (quadts && quadts->mat_sensip) {
778: MatCopy(quadts->mat_sensip,th->MatIntegralSensip0,SAME_NONZERO_PATTERN);
779: }
780: SNESGetKSP(ts->snes,&ksp);
781: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
782: if (quadts) {
783: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
784: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
785: }
787: /* Build RHS */
788: if (th->endpoint) { /* 2-stage method*/
789: th->shift = 1./((th->Theta-1.)*th->time_step0);
790: TSComputeIJacobian(ts,th->ptime0,th->X0,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
791: MatMatMult(J,ts->mat_sensip,MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatDeltaFwdSensip);
792: MatScale(MatDeltaFwdSensip,(th->Theta-1.)/th->Theta);
794: /* Add the f_p forcing terms */
795: if (ts->Jacp) {
796: VecZeroEntries(th->Xdot);
797: TSComputeIJacobianP(ts,th->ptime0,th->X0,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
798: MatAXPY(MatDeltaFwdSensip,(th->Theta-1.)/th->Theta,ts->Jacp,SUBSET_NONZERO_PATTERN);
799: th->shift = previous_shift;
800: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,ts->vec_sol);
801: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
802: MatAXPY(MatDeltaFwdSensip,-1.,ts->Jacp,SUBSET_NONZERO_PATTERN);
803: }
804: } else { /* 1-stage method */
805: th->shift = 0.0;
806: TSComputeIJacobian(ts,th->stage_time,th->X,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
807: MatMatMult(J,ts->mat_sensip,MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatDeltaFwdSensip);
808: MatScale(MatDeltaFwdSensip,-1.);
810: /* Add the f_p forcing terms */
811: if (ts->Jacp) {
812: th->shift = previous_shift;
813: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
814: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
815: MatAXPY(MatDeltaFwdSensip,-1.,ts->Jacp,SUBSET_NONZERO_PATTERN);
816: }
817: }
819: /* Build LHS */
820: th->shift = previous_shift; /* recover the previous shift used in TSStep_Theta() */
821: if (th->endpoint) {
822: TSComputeIJacobian(ts,th->stage_time,ts->vec_sol,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
823: } else {
824: TSComputeIJacobian(ts,th->stage_time,th->X,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
825: }
826: KSPSetOperators(ksp,J,Jpre);
828: /*
829: Evaluate the first stage of integral gradients with the 2-stage method:
830: drdu|t_n*S(t_n) + drdp|t_n
831: This is done before the linear solve because the sensitivity variable S(t_n) will be propagated to S(t_{n+1})
832: */
833: if (th->endpoint) { /* 2-stage method only */
834: if (quadts && quadts->mat_sensip) {
835: TSComputeRHSJacobian(quadts,th->ptime0,th->X0,quadJ,NULL);
836: TSComputeRHSJacobianP(quadts,th->ptime0,th->X0,quadJp);
837: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
838: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
839: MatAXPY(quadts->mat_sensip,th->time_step0*(1.-th->Theta),th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
840: }
841: }
843: /* Solve the tangent linear equation for forward sensitivities to parameters */
844: for (ntlm=0; ntlm<th->num_tlm; ntlm++) {
845: KSPConvergedReason kspreason;
846: MatDenseGetColumn(MatDeltaFwdSensip,ntlm,&barr);
847: VecPlaceArray(VecDeltaFwdSensipCol,barr);
848: if (th->endpoint) {
849: MatDenseGetColumn(ts->mat_sensip,ntlm,&xarr);
850: VecPlaceArray(ts->vec_sensip_col,xarr);
851: KSPSolve(ksp,VecDeltaFwdSensipCol,ts->vec_sensip_col);
852: VecResetArray(ts->vec_sensip_col);
853: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
854: } else {
855: KSPSolve(ksp,VecDeltaFwdSensipCol,VecDeltaFwdSensipCol);
856: }
857: KSPGetConvergedReason(ksp,&kspreason);
858: if (kspreason < 0) {
859: ts->reason = TSFORWARD_DIVERGED_LINEAR_SOLVE;
860: PetscInfo2(ts,"Step=%D, %Dth tangent linear solve, linear solve fails, stopping tangent linear solve\n",ts->steps,ntlm);
861: }
862: VecResetArray(VecDeltaFwdSensipCol);
863: MatDenseRestoreColumn(MatDeltaFwdSensip,&barr);
864: }
866: /*
867: Evaluate the second stage of integral gradients with the 2-stage method:
868: drdu|t_{n+1}*S(t_{n+1}) + drdp|t_{n+1}
869: */
870: if (quadts && quadts->mat_sensip) {
871: if (!th->endpoint) {
872: MatAXPY(ts->mat_sensip,1,MatDeltaFwdSensip,SAME_NONZERO_PATTERN); /* stage sensitivity */
873: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
874: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
875: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
876: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
877: MatAXPY(quadts->mat_sensip,th->time_step0,th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
878: MatAXPY(ts->mat_sensip,(1.-th->Theta)/th->Theta,MatDeltaFwdSensip,SAME_NONZERO_PATTERN);
879: } else {
880: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
881: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
882: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
883: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
884: MatAXPY(quadts->mat_sensip,th->time_step0*th->Theta,th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
885: }
886: } else {
887: if (!th->endpoint) {
888: MatAXPY(ts->mat_sensip,1./th->Theta,MatDeltaFwdSensip,SAME_NONZERO_PATTERN);
889: }
890: }
891: return(0);
892: }
894: static PetscErrorCode TSForwardGetStages_Theta(TS ts,PetscInt *ns,Mat **stagesensip)
895: {
896: TS_Theta *th = (TS_Theta*)ts->data;
899: if (ns) *ns = 1;
900: if (stagesensip) {
901: *stagesensip = (!th->endpoint && th->Theta != 1.0) ? &(th->MatDeltaFwdSensip) : &(th->MatFwdSensip0);
902: }
903: return(0);
904: }
906: /*------------------------------------------------------------*/
907: static PetscErrorCode TSReset_Theta(TS ts)
908: {
909: TS_Theta *th = (TS_Theta*)ts->data;
913: VecDestroy(&th->X);
914: VecDestroy(&th->Xdot);
915: VecDestroy(&th->X0);
916: VecDestroy(&th->affine);
918: VecDestroy(&th->vec_sol_prev);
919: VecDestroy(&th->vec_lte_work);
921: VecDestroy(&th->VecCostIntegral0);
922: return(0);
923: }
925: static PetscErrorCode TSAdjointReset_Theta(TS ts)
926: {
927: TS_Theta *th = (TS_Theta*)ts->data;
931: VecDestroyVecs(ts->numcost,&th->VecsDeltaLam);
932: VecDestroyVecs(ts->numcost,&th->VecsDeltaMu);
933: VecDestroyVecs(ts->numcost,&th->VecsDeltaLam2);
934: VecDestroyVecs(ts->numcost,&th->VecsDeltaMu2);
935: VecDestroyVecs(ts->numcost,&th->VecsSensiTemp);
936: VecDestroyVecs(ts->numcost,&th->VecsSensi2Temp);
937: return(0);
938: }
940: static PetscErrorCode TSDestroy_Theta(TS ts)
941: {
945: TSReset_Theta(ts);
946: if (ts->dm) {
947: DMCoarsenHookRemove(ts->dm,DMCoarsenHook_TSTheta,DMRestrictHook_TSTheta,ts);
948: DMSubDomainHookRemove(ts->dm,DMSubDomainHook_TSTheta,DMSubDomainRestrictHook_TSTheta,ts);
949: }
950: PetscFree(ts->data);
951: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetTheta_C",NULL);
952: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetTheta_C",NULL);
953: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetEndpoint_C",NULL);
954: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetEndpoint_C",NULL);
955: return(0);
956: }
958: /*
959: This defines the nonlinear equation that is to be solved with SNES
960: G(U) = F[t0+Theta*dt, U, (U-U0)*shift] = 0
962: Note that U here is the stage argument. This means that U = U_{n+1} only if endpoint = true,
963: otherwise U = theta U_{n+1} + (1 - theta) U0, which for the case of implicit midpoint is
964: U = (U_{n+1} + U0)/2
965: */
966: static PetscErrorCode SNESTSFormFunction_Theta(SNES snes,Vec x,Vec y,TS ts)
967: {
968: TS_Theta *th = (TS_Theta*)ts->data;
970: Vec X0,Xdot;
971: DM dm,dmsave;
972: PetscReal shift = th->shift;
975: SNESGetDM(snes,&dm);
976: /* When using the endpoint variant, this is actually 1/Theta * Xdot */
977: TSThetaGetX0AndXdot(ts,dm,&X0,&Xdot);
978: if (x != X0) {
979: VecAXPBYPCZ(Xdot,-shift,shift,0,X0,x);
980: } else {
981: VecZeroEntries(Xdot);
982: }
983: /* DM monkey-business allows user code to call TSGetDM() inside of functions evaluated on levels of FAS */
984: dmsave = ts->dm;
985: ts->dm = dm;
986: TSComputeIFunction(ts,th->stage_time,x,Xdot,y,PETSC_FALSE);
987: ts->dm = dmsave;
988: TSThetaRestoreX0AndXdot(ts,dm,&X0,&Xdot);
989: return(0);
990: }
992: static PetscErrorCode SNESTSFormJacobian_Theta(SNES snes,Vec x,Mat A,Mat B,TS ts)
993: {
994: TS_Theta *th = (TS_Theta*)ts->data;
996: Vec Xdot;
997: DM dm,dmsave;
998: PetscReal shift = th->shift;
1001: SNESGetDM(snes,&dm);
1002: /* Xdot has already been computed in SNESTSFormFunction_Theta (SNES guarantees this) */
1003: TSThetaGetX0AndXdot(ts,dm,NULL,&Xdot);
1005: dmsave = ts->dm;
1006: ts->dm = dm;
1007: TSComputeIJacobian(ts,th->stage_time,x,Xdot,shift,A,B,PETSC_FALSE);
1008: ts->dm = dmsave;
1009: TSThetaRestoreX0AndXdot(ts,dm,NULL,&Xdot);
1010: return(0);
1011: }
1013: static PetscErrorCode TSForwardSetUp_Theta(TS ts)
1014: {
1015: TS_Theta *th = (TS_Theta*)ts->data;
1016: TS quadts = ts->quadraturets;
1020: /* combine sensitivities to parameters and sensitivities to initial values into one array */
1021: th->num_tlm = ts->num_parameters;
1022: MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatDeltaFwdSensip);
1023: if (quadts && quadts->mat_sensip) {
1024: MatDuplicate(quadts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatIntegralSensipTemp);
1025: MatDuplicate(quadts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatIntegralSensip0);
1026: }
1027: /* backup sensitivity results for roll-backs */
1028: MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatFwdSensip0);
1030: VecDuplicate(ts->vec_sol,&th->VecDeltaFwdSensipCol);
1031: return(0);
1032: }
1034: static PetscErrorCode TSForwardReset_Theta(TS ts)
1035: {
1036: TS_Theta *th = (TS_Theta*)ts->data;
1037: TS quadts = ts->quadraturets;
1041: if (quadts && quadts->mat_sensip) {
1042: MatDestroy(&th->MatIntegralSensipTemp);
1043: MatDestroy(&th->MatIntegralSensip0);
1044: }
1045: VecDestroy(&th->VecDeltaFwdSensipCol);
1046: MatDestroy(&th->MatDeltaFwdSensip);
1047: MatDestroy(&th->MatFwdSensip0);
1048: return(0);
1049: }
1051: static PetscErrorCode TSSetUp_Theta(TS ts)
1052: {
1053: TS_Theta *th = (TS_Theta*)ts->data;
1054: TS quadts = ts->quadraturets;
1055: PetscBool match;
1059: if (!th->VecCostIntegral0 && quadts && ts->costintegralfwd) { /* back up cost integral */
1060: VecDuplicate(quadts->vec_sol,&th->VecCostIntegral0);
1061: }
1062: if (!th->X) {
1063: VecDuplicate(ts->vec_sol,&th->X);
1064: }
1065: if (!th->Xdot) {
1066: VecDuplicate(ts->vec_sol,&th->Xdot);
1067: }
1068: if (!th->X0) {
1069: VecDuplicate(ts->vec_sol,&th->X0);
1070: }
1071: if (th->endpoint) {
1072: VecDuplicate(ts->vec_sol,&th->affine);
1073: }
1075: th->order = (th->Theta == 0.5) ? 2 : 1;
1076: th->shift = 1/(th->Theta*ts->time_step);
1078: TSGetDM(ts,&ts->dm);
1079: DMCoarsenHookAdd(ts->dm,DMCoarsenHook_TSTheta,DMRestrictHook_TSTheta,ts);
1080: DMSubDomainHookAdd(ts->dm,DMSubDomainHook_TSTheta,DMSubDomainRestrictHook_TSTheta,ts);
1082: TSGetAdapt(ts,&ts->adapt);
1083: TSAdaptCandidatesClear(ts->adapt);
1084: PetscObjectTypeCompare((PetscObject)ts->adapt,TSADAPTNONE,&match);
1085: if (!match) {
1086: VecDuplicate(ts->vec_sol,&th->vec_sol_prev);
1087: VecDuplicate(ts->vec_sol,&th->vec_lte_work);
1088: }
1089: TSGetSNES(ts,&ts->snes);
1090: return(0);
1091: }
1093: /*------------------------------------------------------------*/
1095: static PetscErrorCode TSAdjointSetUp_Theta(TS ts)
1096: {
1097: TS_Theta *th = (TS_Theta*)ts->data;
1101: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsDeltaLam);
1102: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsSensiTemp);
1103: if (ts->vecs_sensip) {
1104: VecDuplicateVecs(ts->vecs_sensip[0],ts->numcost,&th->VecsDeltaMu);
1105: }
1106: if (ts->vecs_sensi2) {
1107: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsDeltaLam2);
1108: VecDuplicateVecs(ts->vecs_sensi2[0],ts->numcost,&th->VecsSensi2Temp);
1109: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1110: if (!ts->ihessianproduct_fuu) ts->vecs_fuu = ts->vecs_guu;
1111: if (!ts->ihessianproduct_fup) ts->vecs_fup = ts->vecs_gup;
1112: }
1113: if (ts->vecs_sensi2p) {
1114: VecDuplicateVecs(ts->vecs_sensi2p[0],ts->numcost,&th->VecsDeltaMu2);
1115: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1116: if (!ts->ihessianproduct_fpu) ts->vecs_fpu = ts->vecs_gpu;
1117: if (!ts->ihessianproduct_fpp) ts->vecs_fpp = ts->vecs_gpp;
1118: }
1119: return(0);
1120: }
1122: static PetscErrorCode TSSetFromOptions_Theta(PetscOptionItems *PetscOptionsObject,TS ts)
1123: {
1124: TS_Theta *th = (TS_Theta*)ts->data;
1128: PetscOptionsHead(PetscOptionsObject,"Theta ODE solver options");
1129: {
1130: PetscOptionsReal("-ts_theta_theta","Location of stage (0<Theta<=1)","TSThetaSetTheta",th->Theta,&th->Theta,NULL);
1131: PetscOptionsBool("-ts_theta_endpoint","Use the endpoint instead of midpoint form of the Theta method","TSThetaSetEndpoint",th->endpoint,&th->endpoint,NULL);
1132: PetscOptionsBool("-ts_theta_initial_guess_extrapolate","Extrapolate stage initial guess from previous solution (sometimes unstable)","TSThetaSetExtrapolate",th->extrapolate,&th->extrapolate,NULL);
1133: }
1134: PetscOptionsTail();
1135: return(0);
1136: }
1138: static PetscErrorCode TSView_Theta(TS ts,PetscViewer viewer)
1139: {
1140: TS_Theta *th = (TS_Theta*)ts->data;
1141: PetscBool iascii;
1145: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1146: if (iascii) {
1147: PetscViewerASCIIPrintf(viewer," Theta=%g\n",(double)th->Theta);
1148: PetscViewerASCIIPrintf(viewer," Extrapolation=%s\n",th->extrapolate ? "yes" : "no");
1149: }
1150: return(0);
1151: }
1153: static PetscErrorCode TSThetaGetTheta_Theta(TS ts,PetscReal *theta)
1154: {
1155: TS_Theta *th = (TS_Theta*)ts->data;
1158: *theta = th->Theta;
1159: return(0);
1160: }
1162: static PetscErrorCode TSThetaSetTheta_Theta(TS ts,PetscReal theta)
1163: {
1164: TS_Theta *th = (TS_Theta*)ts->data;
1167: if (theta <= 0 || 1 < theta) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_OUTOFRANGE,"Theta %g not in range (0,1]",(double)theta);
1168: th->Theta = theta;
1169: th->order = (th->Theta == 0.5) ? 2 : 1;
1170: return(0);
1171: }
1173: static PetscErrorCode TSThetaGetEndpoint_Theta(TS ts,PetscBool *endpoint)
1174: {
1175: TS_Theta *th = (TS_Theta*)ts->data;
1178: *endpoint = th->endpoint;
1179: return(0);
1180: }
1182: static PetscErrorCode TSThetaSetEndpoint_Theta(TS ts,PetscBool flg)
1183: {
1184: TS_Theta *th = (TS_Theta*)ts->data;
1187: th->endpoint = flg;
1188: return(0);
1189: }
1191: #if defined(PETSC_HAVE_COMPLEX)
1192: static PetscErrorCode TSComputeLinearStability_Theta(TS ts,PetscReal xr,PetscReal xi,PetscReal *yr,PetscReal *yi)
1193: {
1194: PetscComplex z = xr + xi*PETSC_i,f;
1195: TS_Theta *th = (TS_Theta*)ts->data;
1196: const PetscReal one = 1.0;
1199: f = (one + (one - th->Theta)*z)/(one - th->Theta*z);
1200: *yr = PetscRealPartComplex(f);
1201: *yi = PetscImaginaryPartComplex(f);
1202: return(0);
1203: }
1204: #endif
1206: static PetscErrorCode TSGetStages_Theta(TS ts,PetscInt *ns,Vec **Y)
1207: {
1208: TS_Theta *th = (TS_Theta*)ts->data;
1211: if (ns) *ns = 1;
1212: if (Y) {
1213: *Y = (!th->endpoint && th->Theta != 1.0) ? &(th->X) : &(th->X0);
1214: }
1215: return(0);
1216: }
1218: /* ------------------------------------------------------------ */
1219: /*MC
1220: TSTHETA - DAE solver using the implicit Theta method
1222: Level: beginner
1224: Options Database:
1225: + -ts_theta_theta <Theta> - Location of stage (0<Theta<=1)
1226: . -ts_theta_endpoint <flag> - Use the endpoint (like Crank-Nicholson) instead of midpoint form of the Theta method
1227: - -ts_theta_initial_guess_extrapolate <flg> - Extrapolate stage initial guess from previous solution (sometimes unstable)
1229: Notes:
1230: $ -ts_type theta -ts_theta_theta 1.0 corresponds to backward Euler (TSBEULER)
1231: $ -ts_type theta -ts_theta_theta 0.5 corresponds to the implicit midpoint rule
1232: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint corresponds to Crank-Nicholson (TSCN)
1234: This method can be applied to DAE.
1236: This method is cast as a 1-stage implicit Runge-Kutta method.
1238: .vb
1239: Theta | Theta
1240: -------------
1241: | 1
1242: .ve
1244: For the default Theta=0.5, this is also known as the implicit midpoint rule.
1246: When the endpoint variant is chosen, the method becomes a 2-stage method with first stage explicit:
1248: .vb
1249: 0 | 0 0
1250: 1 | 1-Theta Theta
1251: -------------------
1252: | 1-Theta Theta
1253: .ve
1255: For the default Theta=0.5, this is the trapezoid rule (also known as Crank-Nicolson, see TSCN).
1257: To apply a diagonally implicit RK method to DAE, the stage formula
1259: $ Y_i = X + h sum_j a_ij Y'_j
1261: is interpreted as a formula for Y'_i in terms of Y_i and known values (Y'_j, j<i)
1263: .seealso: TSCreate(), TS, TSSetType(), TSCN, TSBEULER, TSThetaSetTheta(), TSThetaSetEndpoint()
1265: M*/
1266: PETSC_EXTERN PetscErrorCode TSCreate_Theta(TS ts)
1267: {
1268: TS_Theta *th;
1272: ts->ops->reset = TSReset_Theta;
1273: ts->ops->adjointreset = TSAdjointReset_Theta;
1274: ts->ops->destroy = TSDestroy_Theta;
1275: ts->ops->view = TSView_Theta;
1276: ts->ops->setup = TSSetUp_Theta;
1277: ts->ops->adjointsetup = TSAdjointSetUp_Theta;
1278: ts->ops->adjointreset = TSAdjointReset_Theta;
1279: ts->ops->step = TSStep_Theta;
1280: ts->ops->interpolate = TSInterpolate_Theta;
1281: ts->ops->evaluatewlte = TSEvaluateWLTE_Theta;
1282: ts->ops->rollback = TSRollBack_Theta;
1283: ts->ops->setfromoptions = TSSetFromOptions_Theta;
1284: ts->ops->snesfunction = SNESTSFormFunction_Theta;
1285: ts->ops->snesjacobian = SNESTSFormJacobian_Theta;
1286: #if defined(PETSC_HAVE_COMPLEX)
1287: ts->ops->linearstability = TSComputeLinearStability_Theta;
1288: #endif
1289: ts->ops->getstages = TSGetStages_Theta;
1290: ts->ops->adjointstep = TSAdjointStep_Theta;
1291: ts->ops->adjointintegral = TSAdjointCostIntegral_Theta;
1292: ts->ops->forwardintegral = TSForwardCostIntegral_Theta;
1293: ts->default_adapt_type = TSADAPTNONE;
1295: ts->ops->forwardsetup = TSForwardSetUp_Theta;
1296: ts->ops->forwardreset = TSForwardReset_Theta;
1297: ts->ops->forwardstep = TSForwardStep_Theta;
1298: ts->ops->forwardgetstages = TSForwardGetStages_Theta;
1300: ts->usessnes = PETSC_TRUE;
1302: PetscNewLog(ts,&th);
1303: ts->data = (void*)th;
1305: th->VecsDeltaLam = NULL;
1306: th->VecsDeltaMu = NULL;
1307: th->VecsSensiTemp = NULL;
1308: th->VecsSensi2Temp = NULL;
1310: th->extrapolate = PETSC_FALSE;
1311: th->Theta = 0.5;
1312: th->order = 2;
1313: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetTheta_C",TSThetaGetTheta_Theta);
1314: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetTheta_C",TSThetaSetTheta_Theta);
1315: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetEndpoint_C",TSThetaGetEndpoint_Theta);
1316: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetEndpoint_C",TSThetaSetEndpoint_Theta);
1317: return(0);
1318: }
1320: /*@
1321: TSThetaGetTheta - Get the abscissa of the stage in (0,1].
1323: Not Collective
1325: Input Parameter:
1326: . ts - timestepping context
1328: Output Parameter:
1329: . theta - stage abscissa
1331: Note:
1332: Use of this function is normally only required to hack TSTHETA to use a modified integration scheme.
1334: Level: Advanced
1336: .seealso: TSThetaSetTheta()
1337: @*/
1338: PetscErrorCode TSThetaGetTheta(TS ts,PetscReal *theta)
1339: {
1345: PetscUseMethod(ts,"TSThetaGetTheta_C",(TS,PetscReal*),(ts,theta));
1346: return(0);
1347: }
1349: /*@
1350: TSThetaSetTheta - Set the abscissa of the stage in (0,1].
1352: Not Collective
1354: Input Parameter:
1355: + ts - timestepping context
1356: - theta - stage abscissa
1358: Options Database:
1359: . -ts_theta_theta <theta>
1361: Level: Intermediate
1363: .seealso: TSThetaGetTheta()
1364: @*/
1365: PetscErrorCode TSThetaSetTheta(TS ts,PetscReal theta)
1366: {
1371: PetscTryMethod(ts,"TSThetaSetTheta_C",(TS,PetscReal),(ts,theta));
1372: return(0);
1373: }
1375: /*@
1376: TSThetaGetEndpoint - Gets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule).
1378: Not Collective
1380: Input Parameter:
1381: . ts - timestepping context
1383: Output Parameter:
1384: . endpoint - PETSC_TRUE when using the endpoint variant
1386: Level: Advanced
1388: .seealso: TSThetaSetEndpoint(), TSTHETA, TSCN
1389: @*/
1390: PetscErrorCode TSThetaGetEndpoint(TS ts,PetscBool *endpoint)
1391: {
1397: PetscUseMethod(ts,"TSThetaGetEndpoint_C",(TS,PetscBool*),(ts,endpoint));
1398: return(0);
1399: }
1401: /*@
1402: TSThetaSetEndpoint - Sets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule).
1404: Not Collective
1406: Input Parameter:
1407: + ts - timestepping context
1408: - flg - PETSC_TRUE to use the endpoint variant
1410: Options Database:
1411: . -ts_theta_endpoint <flg>
1413: Level: Intermediate
1415: .seealso: TSTHETA, TSCN
1416: @*/
1417: PetscErrorCode TSThetaSetEndpoint(TS ts,PetscBool flg)
1418: {
1423: PetscTryMethod(ts,"TSThetaSetEndpoint_C",(TS,PetscBool),(ts,flg));
1424: return(0);
1425: }
1427: /*
1428: * TSBEULER and TSCN are straightforward specializations of TSTHETA.
1429: * The creation functions for these specializations are below.
1430: */
1432: static PetscErrorCode TSSetUp_BEuler(TS ts)
1433: {
1434: TS_Theta *th = (TS_Theta*)ts->data;
1438: if (th->Theta != 1.0) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change the default value (1) of theta when using backward Euler\n");
1439: if (th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change to the endpoint form of the Theta methods when using backward Euler\n");
1440: TSSetUp_Theta(ts);
1441: return(0);
1442: }
1444: static PetscErrorCode TSView_BEuler(TS ts,PetscViewer viewer)
1445: {
1447: return(0);
1448: }
1450: /*MC
1451: TSBEULER - ODE solver using the implicit backward Euler method
1453: Level: beginner
1455: Notes:
1456: TSBEULER is equivalent to TSTHETA with Theta=1.0
1458: $ -ts_type theta -ts_theta_theta 1.0
1460: .seealso: TSCreate(), TS, TSSetType(), TSEULER, TSCN, TSTHETA
1462: M*/
1463: PETSC_EXTERN PetscErrorCode TSCreate_BEuler(TS ts)
1464: {
1468: TSCreate_Theta(ts);
1469: TSThetaSetTheta(ts,1.0);
1470: TSThetaSetEndpoint(ts,PETSC_FALSE);
1471: ts->ops->setup = TSSetUp_BEuler;
1472: ts->ops->view = TSView_BEuler;
1473: return(0);
1474: }
1476: static PetscErrorCode TSSetUp_CN(TS ts)
1477: {
1478: TS_Theta *th = (TS_Theta*)ts->data;
1482: if (th->Theta != 0.5) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change the default value (0.5) of theta when using Crank-Nicolson\n");
1483: if (!th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change to the midpoint form of the Theta methods when using Crank-Nicolson\n");
1484: TSSetUp_Theta(ts);
1485: return(0);
1486: }
1488: static PetscErrorCode TSView_CN(TS ts,PetscViewer viewer)
1489: {
1491: return(0);
1492: }
1494: /*MC
1495: TSCN - ODE solver using the implicit Crank-Nicolson method.
1497: Level: beginner
1499: Notes:
1500: TSCN is equivalent to TSTHETA with Theta=0.5 and the "endpoint" option set. I.e.
1502: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint
1504: .seealso: TSCreate(), TS, TSSetType(), TSBEULER, TSTHETA
1506: M*/
1507: PETSC_EXTERN PetscErrorCode TSCreate_CN(TS ts)
1508: {
1512: TSCreate_Theta(ts);
1513: TSThetaSetTheta(ts,0.5);
1514: TSThetaSetEndpoint(ts,PETSC_TRUE);
1515: ts->ops->setup = TSSetUp_CN;
1516: ts->ops->view = TSView_CN;
1517: return(0);
1518: }