Actual source code: tssen.c

  1: #include <petsc/private/tsimpl.h>
  2: #include <petscdraw.h>

  4: PetscLogEvent TS_AdjointStep,TS_ForwardStep,TS_JacobianPEval;

  6: /* #define TSADJOINT_STAGE */

  8: /* ------------------------ Sensitivity Context ---------------------------*/

 10: /*@C
 11:   TSSetRHSJacobianP - Sets the function that computes the Jacobian of G w.r.t. the parameters P where U_t = G(U,P,t), as well as the location to store the matrix.

 13:   Logically Collective on TS

 15:   Input Parameters:
 16: + ts - TS context obtained from TSCreate()
 17: . Amat - JacobianP matrix
 18: . func - function
 19: - ctx - [optional] user-defined function context

 21:   Calling sequence of func:
 22: $ func (TS ts,PetscReal t,Vec y,Mat A,void *ctx);
 23: +   t - current timestep
 24: .   U - input vector (current ODE solution)
 25: .   A - output matrix
 26: -   ctx - [optional] user-defined function context

 28:   Level: intermediate

 30:   Notes:
 31:     Amat has the same number of rows and the same row parallel layout as u, Amat has the same number of columns and parallel layout as p

 33: .seealso: TSGetRHSJacobianP()
 34: @*/
 35: PetscErrorCode TSSetRHSJacobianP(TS ts,Mat Amat,PetscErrorCode (*func)(TS,PetscReal,Vec,Mat,void*),void *ctx)
 36: {

 40:   ts->rhsjacobianp    = func;
 41:   ts->rhsjacobianpctx = ctx;
 42:   if (Amat) {
 43:     PetscObjectReference((PetscObject)Amat);
 44:     MatDestroy(&ts->Jacprhs);
 45:     ts->Jacprhs = Amat;
 46:   }
 47:   return 0;
 48: }

 50: /*@C
 51:   TSGetRHSJacobianP - Gets the function that computes the Jacobian of G w.r.t. the parameters P where U_t = G(U,P,t), as well as the location to store the matrix.

 53:   Logically Collective on TS

 55:   Input Parameter:
 56: . ts - TS context obtained from TSCreate()

 58:   Output Parameters:
 59: + Amat - JacobianP matrix
 60: . func - function
 61: - ctx - [optional] user-defined function context

 63:   Calling sequence of func:
 64: $ func (TS ts,PetscReal t,Vec y,Mat A,void *ctx);
 65: +   t - current timestep
 66: .   U - input vector (current ODE solution)
 67: .   A - output matrix
 68: -   ctx - [optional] user-defined function context

 70:   Level: intermediate

 72:   Notes:
 73:     Amat has the same number of rows and the same row parallel layout as u, Amat has the same number of columns and parallel layout as p

 75: .seealso: TSSetRHSJacobianP()
 76: @*/
 77: PetscErrorCode TSGetRHSJacobianP(TS ts,Mat *Amat,PetscErrorCode (**func)(TS,PetscReal,Vec,Mat,void*),void **ctx)
 78: {
 79:   if (func) *func = ts->rhsjacobianp;
 80:   if (ctx) *ctx  = ts->rhsjacobianpctx;
 81:   if (Amat) *Amat = ts->Jacprhs;
 82:   return 0;
 83: }

 85: /*@C
 86:   TSComputeRHSJacobianP - Runs the user-defined JacobianP function.

 88:   Collective on TS

 90:   Input Parameters:
 91: . ts   - The TS context obtained from TSCreate()

 93:   Level: developer

 95: .seealso: TSSetRHSJacobianP()
 96: @*/
 97: PetscErrorCode TSComputeRHSJacobianP(TS ts,PetscReal t,Vec U,Mat Amat)
 98: {
 99:   if (!Amat) return 0;

103:   PetscStackPush("TS user JacobianP function for sensitivity analysis");
104:   (*ts->rhsjacobianp)(ts,t,U,Amat,ts->rhsjacobianpctx);
105:   PetscStackPop;
106:   return 0;
107: }

109: /*@C
110:   TSSetIJacobianP - Sets the function that computes the Jacobian of F w.r.t. the parameters P where F(Udot,U,t) = G(U,P,t), as well as the location to store the matrix.

112:   Logically Collective on TS

114:   Input Parameters:
115: + ts - TS context obtained from TSCreate()
116: . Amat - JacobianP matrix
117: . func - function
118: - ctx - [optional] user-defined function context

120:   Calling sequence of func:
121: $ func (TS ts,PetscReal t,Vec y,Mat A,void *ctx);
122: +   t - current timestep
123: .   U - input vector (current ODE solution)
124: .   Udot - time derivative of state vector
125: .   shift - shift to apply, see note below
126: .   A - output matrix
127: -   ctx - [optional] user-defined function context

129:   Level: intermediate

131:   Notes:
132:     Amat has the same number of rows and the same row parallel layout as u, Amat has the same number of columns and parallel layout as p

134: .seealso:
135: @*/
136: PetscErrorCode TSSetIJacobianP(TS ts,Mat Amat,PetscErrorCode (*func)(TS,PetscReal,Vec,Vec,PetscReal,Mat,void*),void *ctx)
137: {

141:   ts->ijacobianp    = func;
142:   ts->ijacobianpctx = ctx;
143:   if (Amat) {
144:     PetscObjectReference((PetscObject)Amat);
145:     MatDestroy(&ts->Jacp);
146:     ts->Jacp = Amat;
147:   }
148:   return 0;
149: }

151: /*@C
152:   TSComputeIJacobianP - Runs the user-defined IJacobianP function.

154:   Collective on TS

156:   Input Parameters:
157: + ts - the TS context
158: . t - current timestep
159: . U - state vector
160: . Udot - time derivative of state vector
161: . shift - shift to apply, see note below
162: - imex - flag indicates if the method is IMEX so that the RHSJacobian should be kept separate

164:   Output Parameters:
165: . A - Jacobian matrix

167:   Level: developer

169: .seealso: TSSetIJacobianP()
170: @*/
171: PetscErrorCode TSComputeIJacobianP(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal shift,Mat Amat,PetscBool imex)
172: {
173:   if (!Amat) return 0;

178:   PetscLogEventBegin(TS_JacobianPEval,ts,U,Amat,0);
179:   if (ts->ijacobianp) {
180:     PetscStackPush("TS user JacobianP function for sensitivity analysis");
181:     (*ts->ijacobianp)(ts,t,U,Udot,shift,Amat,ts->ijacobianpctx);
182:     PetscStackPop;
183:   }
184:   if (imex) {
185:     if (!ts->ijacobianp) {  /* system was written as Udot = G(t,U) */
186:       PetscBool assembled;
187:       MatZeroEntries(Amat);
188:       MatAssembled(Amat,&assembled);
189:       if (!assembled) {
190:         MatAssemblyBegin(Amat,MAT_FINAL_ASSEMBLY);
191:         MatAssemblyEnd(Amat,MAT_FINAL_ASSEMBLY);
192:       }
193:     }
194:   } else {
195:     if (ts->rhsjacobianp) {
196:       TSComputeRHSJacobianP(ts,t,U,ts->Jacprhs);
197:     }
198:     if (ts->Jacprhs == Amat) { /* No IJacobian, so we only have the RHS matrix */
199:       MatScale(Amat,-1);
200:     } else if (ts->Jacprhs) { /* Both IJacobian and RHSJacobian */
201:       MatStructure axpy = DIFFERENT_NONZERO_PATTERN;
202:       if (!ts->ijacobianp) { /* No IJacobianp provided, but we have a separate RHS matrix */
203:         MatZeroEntries(Amat);
204:       }
205:       MatAXPY(Amat,-1,ts->Jacprhs,axpy);
206:     }
207:   }
208:   PetscLogEventEnd(TS_JacobianPEval,ts,U,Amat,0);
209:   return 0;
210: }

212: /*@C
213:     TSSetCostIntegrand - Sets the routine for evaluating the integral term in one or more cost functions

215:     Logically Collective on TS

217:     Input Parameters:
218: +   ts - the TS context obtained from TSCreate()
219: .   numcost - number of gradients to be computed, this is the number of cost functions
220: .   costintegral - vector that stores the integral values
221: .   rf - routine for evaluating the integrand function
222: .   drduf - function that computes the gradients of the r's with respect to u
223: .   drdpf - function that computes the gradients of the r's with respect to p, can be NULL if parametric sensitivity is not desired (mu=NULL)
224: .   fwd - flag indicating whether to evaluate cost integral in the forward run or the adjoint run
225: -   ctx - [optional] user-defined context for private data for the function evaluation routine (may be NULL)

227:     Calling sequence of rf:
228: $   PetscErrorCode rf(TS ts,PetscReal t,Vec U,Vec F,void *ctx);

230:     Calling sequence of drduf:
231: $   PetscErroCode drduf(TS ts,PetscReal t,Vec U,Vec *dRdU,void *ctx);

233:     Calling sequence of drdpf:
234: $   PetscErroCode drdpf(TS ts,PetscReal t,Vec U,Vec *dRdP,void *ctx);

236:     Level: deprecated

238:     Notes:
239:     For optimization there is usually a single cost function (numcost = 1). For sensitivities there may be multiple cost functions

241: .seealso: TSSetRHSJacobianP(), TSGetCostGradients(), TSSetCostGradients()
242: @*/
243: PetscErrorCode TSSetCostIntegrand(TS ts,PetscInt numcost,Vec costintegral,PetscErrorCode (*rf)(TS,PetscReal,Vec,Vec,void*),
244:                                                           PetscErrorCode (*drduf)(TS,PetscReal,Vec,Vec*,void*),
245:                                                           PetscErrorCode (*drdpf)(TS,PetscReal,Vec,Vec*,void*),
246:                                                           PetscBool fwd,void *ctx)
247: {
251:   if (!ts->numcost) ts->numcost=numcost;

253:   if (costintegral) {
254:     PetscObjectReference((PetscObject)costintegral);
255:     VecDestroy(&ts->vec_costintegral);
256:     ts->vec_costintegral = costintegral;
257:   } else {
258:     if (!ts->vec_costintegral) { /* Create a seq vec if user does not provide one */
259:       VecCreateSeq(PETSC_COMM_SELF,numcost,&ts->vec_costintegral);
260:     } else {
261:       VecSet(ts->vec_costintegral,0.0);
262:     }
263:   }
264:   if (!ts->vec_costintegrand) {
265:     VecDuplicate(ts->vec_costintegral,&ts->vec_costintegrand);
266:   } else {
267:     VecSet(ts->vec_costintegrand,0.0);
268:   }
269:   ts->costintegralfwd  = fwd; /* Evaluate the cost integral in forward run if fwd is true */
270:   ts->costintegrand    = rf;
271:   ts->costintegrandctx = ctx;
272:   ts->drdufunction     = drduf;
273:   ts->drdpfunction     = drdpf;
274:   return 0;
275: }

277: /*@C
278:    TSGetCostIntegral - Returns the values of the integral term in the cost functions.
279:    It is valid to call the routine after a backward run.

281:    Not Collective

283:    Input Parameter:
284: .  ts - the TS context obtained from TSCreate()

286:    Output Parameter:
287: .  v - the vector containing the integrals for each cost function

289:    Level: intermediate

291: .seealso: TSSetCostIntegrand()

293: @*/
294: PetscErrorCode  TSGetCostIntegral(TS ts,Vec *v)
295: {
296:   TS             quadts;

300:   TSGetQuadratureTS(ts,NULL,&quadts);
301:   *v = quadts->vec_sol;
302:   return 0;
303: }

305: /*@C
306:    TSComputeCostIntegrand - Evaluates the integral function in the cost functions.

308:    Input Parameters:
309: +  ts - the TS context
310: .  t - current time
311: -  U - state vector, i.e. current solution

313:    Output Parameter:
314: .  Q - vector of size numcost to hold the outputs

316:    Notes:
317:    Most users should not need to explicitly call this routine, as it
318:    is used internally within the sensitivity analysis context.

320:    Level: deprecated

322: .seealso: TSSetCostIntegrand()
323: @*/
324: PetscErrorCode TSComputeCostIntegrand(TS ts,PetscReal t,Vec U,Vec Q)
325: {

330:   PetscLogEventBegin(TS_FunctionEval,ts,U,Q,0);
331:   if (ts->costintegrand) {
332:     PetscStackPush("TS user integrand in the cost function");
333:     (*ts->costintegrand)(ts,t,U,Q,ts->costintegrandctx);
334:     PetscStackPop;
335:   } else {
336:     VecZeroEntries(Q);
337:   }

339:   PetscLogEventEnd(TS_FunctionEval,ts,U,Q,0);
340:   return 0;
341: }

343: /*@C
344:   TSComputeDRDUFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobian()

346:   Level: deprecated

348: @*/
349: PetscErrorCode TSComputeDRDUFunction(TS ts,PetscReal t,Vec U,Vec *DRDU)
350: {
351:   if (!DRDU) return 0;

355:   PetscStackPush("TS user DRDU function for sensitivity analysis");
356:   (*ts->drdufunction)(ts,t,U,DRDU,ts->costintegrandctx);
357:   PetscStackPop;
358:   return 0;
359: }

361: /*@C
362:   TSComputeDRDPFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobianP()

364:   Level: deprecated

366: @*/
367: PetscErrorCode TSComputeDRDPFunction(TS ts,PetscReal t,Vec U,Vec *DRDP)
368: {
369:   if (!DRDP) return 0;

373:   PetscStackPush("TS user DRDP function for sensitivity analysis");
374:   (*ts->drdpfunction)(ts,t,U,DRDP,ts->costintegrandctx);
375:   PetscStackPop;
376:   return 0;
377: }

379: /*@C
380:   TSSetIHessianProduct - Sets the function that computes the vector-Hessian-vector product. The Hessian is the second-order derivative of F (IFunction) w.r.t. the state variable.

382:   Logically Collective on TS

384:   Input Parameters:
385: + ts - TS context obtained from TSCreate()
386: . ihp1 - an array of vectors storing the result of vector-Hessian-vector product for F_UU
387: . hessianproductfunc1 - vector-Hessian-vector product function for F_UU
388: . ihp2 - an array of vectors storing the result of vector-Hessian-vector product for F_UP
389: . hessianproductfunc2 - vector-Hessian-vector product function for F_UP
390: . ihp3 - an array of vectors storing the result of vector-Hessian-vector product for F_PU
391: . hessianproductfunc3 - vector-Hessian-vector product function for F_PU
392: . ihp4 - an array of vectors storing the result of vector-Hessian-vector product for F_PP
393: - hessianproductfunc4 - vector-Hessian-vector product function for F_PP

395:   Calling sequence of ihessianproductfunc:
396: $ ihessianproductfunc (TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV,void *ctx);
397: +   t - current timestep
398: .   U - input vector (current ODE solution)
399: .   Vl - an array of input vectors to be left-multiplied with the Hessian
400: .   Vr - input vector to be right-multiplied with the Hessian
401: .   VHV - an array of output vectors for vector-Hessian-vector product
402: -   ctx - [optional] user-defined function context

404:   Level: intermediate

406:   Notes:
407:   The first Hessian function and the working array are required.
408:   As an example to implement the callback functions, the second callback function calculates the vector-Hessian-vector product
409:   $ Vl_n^T*F_UP*Vr
410:   where the vector Vl_n (n-th element in the array Vl) and Vr are of size N and M respectively, and the Hessian F_UP is of size N x N x M.
411:   Each entry of F_UP corresponds to the derivative
412:   $ F_UP[i][j][k] = \frac{\partial^2 F[i]}{\partial U[j] \partial P[k]}.
413:   The result of the vector-Hessian-vector product for Vl_n needs to be stored in vector VHV_n with the j-th entry being
414:   $ VHV_n[j] = \sum_i \sum_k {Vl_n[i] * F_UP[i][j][k] * Vr[k]}
415:   If the cost function is a scalar, there will be only one vector in Vl and VHV.

417: .seealso:
418: @*/
419: PetscErrorCode TSSetIHessianProduct(TS ts,Vec *ihp1,PetscErrorCode (*ihessianproductfunc1)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
420:                                           Vec *ihp2,PetscErrorCode (*ihessianproductfunc2)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
421:                                           Vec *ihp3,PetscErrorCode (*ihessianproductfunc3)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
422:                                           Vec *ihp4,PetscErrorCode (*ihessianproductfunc4)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
423:                                     void *ctx)
424: {

428:   ts->ihessianproductctx = ctx;
429:   if (ihp1) ts->vecs_fuu = ihp1;
430:   if (ihp2) ts->vecs_fup = ihp2;
431:   if (ihp3) ts->vecs_fpu = ihp3;
432:   if (ihp4) ts->vecs_fpp = ihp4;
433:   ts->ihessianproduct_fuu = ihessianproductfunc1;
434:   ts->ihessianproduct_fup = ihessianproductfunc2;
435:   ts->ihessianproduct_fpu = ihessianproductfunc3;
436:   ts->ihessianproduct_fpp = ihessianproductfunc4;
437:   return 0;
438: }

440: /*@C
441:   TSComputeIHessianProductFunctionUU - Runs the user-defined vector-Hessian-vector product function for Fuu.

443:   Collective on TS

445:   Input Parameters:
446: . ts   - The TS context obtained from TSCreate()

448:   Notes:
449:   TSComputeIHessianProductFunctionUU() is typically used for sensitivity implementation,
450:   so most users would not generally call this routine themselves.

452:   Level: developer

454: .seealso: TSSetIHessianProduct()
455: @*/
456: PetscErrorCode TSComputeIHessianProductFunctionUU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
457: {
458:   if (!VHV) return 0;

462:   if (ts->ihessianproduct_fuu) {
463:     PetscStackPush("TS user IHessianProduct function 1 for sensitivity analysis");
464:     (*ts->ihessianproduct_fuu)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
465:     PetscStackPop;
466:   }
467:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
468:   if (ts->rhshessianproduct_guu) {
469:     PetscInt nadj;
470:     TSComputeRHSHessianProductFunctionUU(ts,t,U,Vl,Vr,VHV);
471:     for (nadj=0; nadj<ts->numcost; nadj++) {
472:       VecScale(VHV[nadj],-1);
473:     }
474:   }
475:   return 0;
476: }

478: /*@C
479:   TSComputeIHessianProductFunctionUP - Runs the user-defined vector-Hessian-vector product function for Fup.

481:   Collective on TS

483:   Input Parameters:
484: . ts   - The TS context obtained from TSCreate()

486:   Notes:
487:   TSComputeIHessianProductFunctionUP() is typically used for sensitivity implementation,
488:   so most users would not generally call this routine themselves.

490:   Level: developer

492: .seealso: TSSetIHessianProduct()
493: @*/
494: PetscErrorCode TSComputeIHessianProductFunctionUP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
495: {
496:   if (!VHV) return 0;

500:   if (ts->ihessianproduct_fup) {
501:     PetscStackPush("TS user IHessianProduct function 2 for sensitivity analysis");
502:     (*ts->ihessianproduct_fup)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
503:     PetscStackPop;
504:   }
505:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
506:   if (ts->rhshessianproduct_gup) {
507:     PetscInt nadj;
508:     TSComputeRHSHessianProductFunctionUP(ts,t,U,Vl,Vr,VHV);
509:     for (nadj=0; nadj<ts->numcost; nadj++) {
510:       VecScale(VHV[nadj],-1);
511:     }
512:   }
513:   return 0;
514: }

516: /*@C
517:   TSComputeIHessianProductFunctionPU - Runs the user-defined vector-Hessian-vector product function for Fpu.

519:   Collective on TS

521:   Input Parameters:
522: . ts   - The TS context obtained from TSCreate()

524:   Notes:
525:   TSComputeIHessianProductFunctionPU() is typically used for sensitivity implementation,
526:   so most users would not generally call this routine themselves.

528:   Level: developer

530: .seealso: TSSetIHessianProduct()
531: @*/
532: PetscErrorCode TSComputeIHessianProductFunctionPU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
533: {
534:   if (!VHV) return 0;

538:   if (ts->ihessianproduct_fpu) {
539:     PetscStackPush("TS user IHessianProduct function 3 for sensitivity analysis");
540:     (*ts->ihessianproduct_fpu)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
541:     PetscStackPop;
542:   }
543:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
544:   if (ts->rhshessianproduct_gpu) {
545:     PetscInt nadj;
546:     TSComputeRHSHessianProductFunctionPU(ts,t,U,Vl,Vr,VHV);
547:     for (nadj=0; nadj<ts->numcost; nadj++) {
548:       VecScale(VHV[nadj],-1);
549:     }
550:   }
551:   return 0;
552: }

554: /*@C
555:   TSComputeIHessianProductFunctionPP - Runs the user-defined vector-Hessian-vector product function for Fpp.

557:   Collective on TS

559:   Input Parameters:
560: . ts   - The TS context obtained from TSCreate()

562:   Notes:
563:   TSComputeIHessianProductFunctionPP() is typically used for sensitivity implementation,
564:   so most users would not generally call this routine themselves.

566:   Level: developer

568: .seealso: TSSetIHessianProduct()
569: @*/
570: PetscErrorCode TSComputeIHessianProductFunctionPP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
571: {
572:   if (!VHV) return 0;

576:   if (ts->ihessianproduct_fpp) {
577:     PetscStackPush("TS user IHessianProduct function 3 for sensitivity analysis");
578:     (*ts->ihessianproduct_fpp)(ts,t,U,Vl,Vr,VHV,ts->ihessianproductctx);
579:     PetscStackPop;
580:   }
581:   /* does not consider IMEX for now, so either IHessian or RHSHessian will be calculated, using the same output VHV */
582:   if (ts->rhshessianproduct_gpp) {
583:     PetscInt nadj;
584:     TSComputeRHSHessianProductFunctionPP(ts,t,U,Vl,Vr,VHV);
585:     for (nadj=0; nadj<ts->numcost; nadj++) {
586:       VecScale(VHV[nadj],-1);
587:     }
588:   }
589:   return 0;
590: }

592: /*@C
593:   TSSetRHSHessianProduct - Sets the function that computes the vector-Hessian-vector product. The Hessian is the second-order derivative of G (RHSFunction) w.r.t. the state variable.

595:   Logically Collective on TS

597:   Input Parameters:
598: + ts - TS context obtained from TSCreate()
599: . rhshp1 - an array of vectors storing the result of vector-Hessian-vector product for G_UU
600: . hessianproductfunc1 - vector-Hessian-vector product function for G_UU
601: . rhshp2 - an array of vectors storing the result of vector-Hessian-vector product for G_UP
602: . hessianproductfunc2 - vector-Hessian-vector product function for G_UP
603: . rhshp3 - an array of vectors storing the result of vector-Hessian-vector product for G_PU
604: . hessianproductfunc3 - vector-Hessian-vector product function for G_PU
605: . rhshp4 - an array of vectors storing the result of vector-Hessian-vector product for G_PP
606: - hessianproductfunc4 - vector-Hessian-vector product function for G_PP

608:   Calling sequence of ihessianproductfunc:
609: $ rhshessianproductfunc (TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV,void *ctx);
610: +   t - current timestep
611: .   U - input vector (current ODE solution)
612: .   Vl - an array of input vectors to be left-multiplied with the Hessian
613: .   Vr - input vector to be right-multiplied with the Hessian
614: .   VHV - an array of output vectors for vector-Hessian-vector product
615: -   ctx - [optional] user-defined function context

617:   Level: intermediate

619:   Notes:
620:   The first Hessian function and the working array are required.
621:   As an example to implement the callback functions, the second callback function calculates the vector-Hessian-vector product
622:   $ Vl_n^T*G_UP*Vr
623:   where the vector Vl_n (n-th element in the array Vl) and Vr are of size N and M respectively, and the Hessian G_UP is of size N x N x M.
624:   Each entry of G_UP corresponds to the derivative
625:   $ G_UP[i][j][k] = \frac{\partial^2 G[i]}{\partial U[j] \partial P[k]}.
626:   The result of the vector-Hessian-vector product for Vl_n needs to be stored in vector VHV_n with j-th entry being
627:   $ VHV_n[j] = \sum_i \sum_k {Vl_n[i] * G_UP[i][j][k] * Vr[k]}
628:   If the cost function is a scalar, there will be only one vector in Vl and VHV.

630: .seealso:
631: @*/
632: PetscErrorCode TSSetRHSHessianProduct(TS ts,Vec *rhshp1,PetscErrorCode (*rhshessianproductfunc1)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
633:                                           Vec *rhshp2,PetscErrorCode (*rhshessianproductfunc2)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
634:                                           Vec *rhshp3,PetscErrorCode (*rhshessianproductfunc3)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
635:                                           Vec *rhshp4,PetscErrorCode (*rhshessianproductfunc4)(TS,PetscReal,Vec,Vec*,Vec,Vec*,void*),
636:                                     void *ctx)
637: {

641:   ts->rhshessianproductctx = ctx;
642:   if (rhshp1) ts->vecs_guu = rhshp1;
643:   if (rhshp2) ts->vecs_gup = rhshp2;
644:   if (rhshp3) ts->vecs_gpu = rhshp3;
645:   if (rhshp4) ts->vecs_gpp = rhshp4;
646:   ts->rhshessianproduct_guu = rhshessianproductfunc1;
647:   ts->rhshessianproduct_gup = rhshessianproductfunc2;
648:   ts->rhshessianproduct_gpu = rhshessianproductfunc3;
649:   ts->rhshessianproduct_gpp = rhshessianproductfunc4;
650:   return 0;
651: }

653: /*@C
654:   TSComputeRHSHessianProductFunctionUU - Runs the user-defined vector-Hessian-vector product function for Guu.

656:   Collective on TS

658:   Input Parameters:
659: . ts   - The TS context obtained from TSCreate()

661:   Notes:
662:   TSComputeRHSHessianProductFunctionUU() is typically used for sensitivity implementation,
663:   so most users would not generally call this routine themselves.

665:   Level: developer

667: .seealso: TSSetRHSHessianProduct()
668: @*/
669: PetscErrorCode TSComputeRHSHessianProductFunctionUU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
670: {
671:   if (!VHV) return 0;

675:   PetscStackPush("TS user RHSHessianProduct function 1 for sensitivity analysis");
676:   (*ts->rhshessianproduct_guu)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
677:   PetscStackPop;
678:   return 0;
679: }

681: /*@C
682:   TSComputeRHSHessianProductFunctionUP - Runs the user-defined vector-Hessian-vector product function for Gup.

684:   Collective on TS

686:   Input Parameters:
687: . ts   - The TS context obtained from TSCreate()

689:   Notes:
690:   TSComputeRHSHessianProductFunctionUP() is typically used for sensitivity implementation,
691:   so most users would not generally call this routine themselves.

693:   Level: developer

695: .seealso: TSSetRHSHessianProduct()
696: @*/
697: PetscErrorCode TSComputeRHSHessianProductFunctionUP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
698: {
699:   if (!VHV) return 0;

703:   PetscStackPush("TS user RHSHessianProduct function 2 for sensitivity analysis");
704:   (*ts->rhshessianproduct_gup)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
705:   PetscStackPop;
706:   return 0;
707: }

709: /*@C
710:   TSComputeRHSHessianProductFunctionPU - Runs the user-defined vector-Hessian-vector product function for Gpu.

712:   Collective on TS

714:   Input Parameters:
715: . ts   - The TS context obtained from TSCreate()

717:   Notes:
718:   TSComputeRHSHessianProductFunctionPU() is typically used for sensitivity implementation,
719:   so most users would not generally call this routine themselves.

721:   Level: developer

723: .seealso: TSSetRHSHessianProduct()
724: @*/
725: PetscErrorCode TSComputeRHSHessianProductFunctionPU(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
726: {
727:   if (!VHV) return 0;

731:   PetscStackPush("TS user RHSHessianProduct function 3 for sensitivity analysis");
732:   (*ts->rhshessianproduct_gpu)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
733:   PetscStackPop;
734:   return 0;
735: }

737: /*@C
738:   TSComputeRHSHessianProductFunctionPP - Runs the user-defined vector-Hessian-vector product function for Gpp.

740:   Collective on TS

742:   Input Parameters:
743: . ts   - The TS context obtained from TSCreate()

745:   Notes:
746:   TSComputeRHSHessianProductFunctionPP() is typically used for sensitivity implementation,
747:   so most users would not generally call this routine themselves.

749:   Level: developer

751: .seealso: TSSetRHSHessianProduct()
752: @*/
753: PetscErrorCode TSComputeRHSHessianProductFunctionPP(TS ts,PetscReal t,Vec U,Vec *Vl,Vec Vr,Vec *VHV)
754: {
755:   if (!VHV) return 0;

759:   PetscStackPush("TS user RHSHessianProduct function 3 for sensitivity analysis");
760:   (*ts->rhshessianproduct_gpp)(ts,t,U,Vl,Vr,VHV,ts->rhshessianproductctx);
761:   PetscStackPop;
762:   return 0;
763: }

765: /* --------------------------- Adjoint sensitivity ---------------------------*/

767: /*@
768:    TSSetCostGradients - Sets the initial value of the gradients of the cost function w.r.t. initial values and w.r.t. the problem parameters
769:       for use by the TSAdjoint routines.

771:    Logically Collective on TS

773:    Input Parameters:
774: +  ts - the TS context obtained from TSCreate()
775: .  numcost - number of gradients to be computed, this is the number of cost functions
776: .  lambda - gradients with respect to the initial condition variables, the dimension and parallel layout of these vectors is the same as the ODE solution vector
777: -  mu - gradients with respect to the parameters, the number of entries in these vectors is the same as the number of parameters

779:    Level: beginner

781:    Notes:
782:     the entries in these vectors must be correctly initialized with the values lamda_i = df/dy|finaltime  mu_i = df/dp|finaltime

784:    After TSAdjointSolve() is called the lamba and the mu contain the computed sensitivities

786: .seealso TSGetCostGradients()
787: @*/
788: PetscErrorCode TSSetCostGradients(TS ts,PetscInt numcost,Vec *lambda,Vec *mu)
789: {
792:   ts->vecs_sensi  = lambda;
793:   ts->vecs_sensip = mu;
795:   ts->numcost  = numcost;
796:   return 0;
797: }

799: /*@
800:    TSGetCostGradients - Returns the gradients from the TSAdjointSolve()

802:    Not Collective, but Vec returned is parallel if TS is parallel

804:    Input Parameter:
805: .  ts - the TS context obtained from TSCreate()

807:    Output Parameters:
808: +  numcost - size of returned arrays
809: .  lambda - vectors containing the gradients of the cost functions with respect to the ODE/DAE solution variables
810: -  mu - vectors containing the gradients of the cost functions with respect to the problem parameters

812:    Level: intermediate

814: .seealso: TSSetCostGradients()
815: @*/
816: PetscErrorCode TSGetCostGradients(TS ts,PetscInt *numcost,Vec **lambda,Vec **mu)
817: {
819:   if (numcost) *numcost = ts->numcost;
820:   if (lambda)  *lambda  = ts->vecs_sensi;
821:   if (mu)      *mu      = ts->vecs_sensip;
822:   return 0;
823: }

825: /*@
826:    TSSetCostHessianProducts - Sets the initial value of the Hessian-vector products of the cost function w.r.t. initial values and w.r.t. the problem parameters
827:       for use by the TSAdjoint routines.

829:    Logically Collective on TS

831:    Input Parameters:
832: +  ts - the TS context obtained from TSCreate()
833: .  numcost - number of cost functions
834: .  lambda2 - Hessian-vector product with respect to the initial condition variables, the dimension and parallel layout of these vectors is the same as the ODE solution vector
835: .  mu2 - Hessian-vector product with respect to the parameters, the number of entries in these vectors is the same as the number of parameters
836: -  dir - the direction vector that are multiplied with the Hessian of the cost functions

838:    Level: beginner

840:    Notes: Hessian of the cost function is completely different from Hessian of the ODE/DAE system

842:    For second-order adjoint, one needs to call this function and then TSAdjointSetForward() before TSSolve().

844:    After TSAdjointSolve() is called, the lamba2 and the mu2 will contain the computed second-order adjoint sensitivities, and can be used to produce Hessian-vector product (not the full Hessian matrix). Users must provide a direction vector; it is usually generated by an optimization solver.

846:    Passing NULL for lambda2 disables the second-order calculation.
847: .seealso: TSAdjointSetForward()
848: @*/
849: PetscErrorCode TSSetCostHessianProducts(TS ts,PetscInt numcost,Vec *lambda2,Vec *mu2,Vec dir)
850: {
853:   ts->numcost       = numcost;
854:   ts->vecs_sensi2   = lambda2;
855:   ts->vecs_sensi2p  = mu2;
856:   ts->vec_dir       = dir;
857:   return 0;
858: }

860: /*@
861:    TSGetCostHessianProducts - Returns the gradients from the TSAdjointSolve()

863:    Not Collective, but Vec returned is parallel if TS is parallel

865:    Input Parameter:
866: .  ts - the TS context obtained from TSCreate()

868:    Output Parameters:
869: +  numcost - number of cost functions
870: .  lambda2 - Hessian-vector product with respect to the initial condition variables, the dimension and parallel layout of these vectors is the same as the ODE solution vector
871: .  mu2 - Hessian-vector product with respect to the parameters, the number of entries in these vectors is the same as the number of parameters
872: -  dir - the direction vector that are multiplied with the Hessian of the cost functions

874:    Level: intermediate

876: .seealso: TSSetCostHessianProducts()
877: @*/
878: PetscErrorCode TSGetCostHessianProducts(TS ts,PetscInt *numcost,Vec **lambda2,Vec **mu2, Vec *dir)
879: {
881:   if (numcost) *numcost = ts->numcost;
882:   if (lambda2) *lambda2 = ts->vecs_sensi2;
883:   if (mu2)     *mu2     = ts->vecs_sensi2p;
884:   if (dir)     *dir     = ts->vec_dir;
885:   return 0;
886: }

888: /*@
889:   TSAdjointSetForward - Trigger the tangent linear solver and initialize the forward sensitivities

891:   Logically Collective on TS

893:   Input Parameters:
894: +  ts - the TS context obtained from TSCreate()
895: -  didp - the derivative of initial values w.r.t. parameters

897:   Level: intermediate

899:   Notes: When computing sensitivies w.r.t. initial condition, set didp to NULL so that the solver will take it as an identity matrix mathematically. TSAdjoint does not reset the tangent linear solver automatically, TSAdjointResetForward() should be called to reset the tangent linear solver.

901: .seealso: TSSetCostHessianProducts(), TSAdjointResetForward()
902: @*/
903: PetscErrorCode TSAdjointSetForward(TS ts,Mat didp)
904: {
905:   Mat            A;
906:   Vec            sp;
907:   PetscScalar    *xarr;
908:   PetscInt       lsize;

910:   ts->forward_solve = PETSC_TRUE; /* turn on tangent linear mode */
913:   /* create a single-column dense matrix */
914:   VecGetLocalSize(ts->vec_sol,&lsize);
915:   MatCreateDense(PetscObjectComm((PetscObject)ts),lsize,PETSC_DECIDE,PETSC_DECIDE,1,NULL,&A);

917:   VecDuplicate(ts->vec_sol,&sp);
918:   MatDenseGetColumn(A,0,&xarr);
919:   VecPlaceArray(sp,xarr);
920:   if (ts->vecs_sensi2p) { /* tangent linear variable initialized as 2*dIdP*dir */
921:     if (didp) {
922:       MatMult(didp,ts->vec_dir,sp);
923:       VecScale(sp,2.);
924:     } else {
925:       VecZeroEntries(sp);
926:     }
927:   } else { /* tangent linear variable initialized as dir */
928:     VecCopy(ts->vec_dir,sp);
929:   }
930:   VecResetArray(sp);
931:   MatDenseRestoreColumn(A,&xarr);
932:   VecDestroy(&sp);

934:   TSForwardSetInitialSensitivities(ts,A); /* if didp is NULL, identity matrix is assumed */

936:   MatDestroy(&A);
937:   return 0;
938: }

940: /*@
941:   TSAdjointResetForward - Reset the tangent linear solver and destroy the tangent linear context

943:   Logically Collective on TS

945:   Input Parameters:
946: .  ts - the TS context obtained from TSCreate()

948:   Level: intermediate

950: .seealso: TSAdjointSetForward()
951: @*/
952: PetscErrorCode TSAdjointResetForward(TS ts)
953: {
954:   ts->forward_solve = PETSC_FALSE; /* turn off tangent linear mode */
955:   TSForwardReset(ts);
956:   return 0;
957: }

959: /*@
960:    TSAdjointSetUp - Sets up the internal data structures for the later use
961:    of an adjoint solver

963:    Collective on TS

965:    Input Parameter:
966: .  ts - the TS context obtained from TSCreate()

968:    Level: advanced

970: .seealso: TSCreate(), TSAdjointStep(), TSSetCostGradients()
971: @*/
972: PetscErrorCode TSAdjointSetUp(TS ts)
973: {
974:   TSTrajectory     tj;
975:   PetscBool        match;

978:   if (ts->adjointsetupcalled) return 0;
981:   TSGetTrajectory(ts,&tj);
982:   PetscObjectTypeCompare((PetscObject)tj,TSTRAJECTORYBASIC,&match);
983:   if (match) {
984:     PetscBool solution_only;
985:     TSTrajectoryGetSolutionOnly(tj,&solution_only);
987:   }
988:   TSTrajectorySetUseHistory(tj,PETSC_FALSE); /* not use TSHistory */

990:   if (ts->quadraturets) { /* if there is integral in the cost function */
991:     VecDuplicate(ts->vecs_sensi[0],&ts->vec_drdu_col);
992:     if (ts->vecs_sensip) {
993:       VecDuplicate(ts->vecs_sensip[0],&ts->vec_drdp_col);
994:     }
995:   }

997:   if (ts->ops->adjointsetup) {
998:     (*ts->ops->adjointsetup)(ts);
999:   }
1000:   ts->adjointsetupcalled = PETSC_TRUE;
1001:   return 0;
1002: }

1004: /*@
1005:    TSAdjointReset - Resets a TSAdjoint context and removes any allocated Vecs and Mats.

1007:    Collective on TS

1009:    Input Parameter:
1010: .  ts - the TS context obtained from TSCreate()

1012:    Level: beginner

1014: .seealso: TSCreate(), TSAdjointSetUp(), TSADestroy()
1015: @*/
1016: PetscErrorCode TSAdjointReset(TS ts)
1017: {
1019:   if (ts->ops->adjointreset) {
1020:     (*ts->ops->adjointreset)(ts);
1021:   }
1022:   if (ts->quadraturets) { /* if there is integral in the cost function */
1023:     VecDestroy(&ts->vec_drdu_col);
1024:     if (ts->vecs_sensip) {
1025:       VecDestroy(&ts->vec_drdp_col);
1026:     }
1027:   }
1028:   ts->vecs_sensi         = NULL;
1029:   ts->vecs_sensip        = NULL;
1030:   ts->vecs_sensi2        = NULL;
1031:   ts->vecs_sensi2p       = NULL;
1032:   ts->vec_dir            = NULL;
1033:   ts->adjointsetupcalled = PETSC_FALSE;
1034:   return 0;
1035: }

1037: /*@
1038:    TSAdjointSetSteps - Sets the number of steps the adjoint solver should take backward in time

1040:    Logically Collective on TS

1042:    Input Parameters:
1043: +  ts - the TS context obtained from TSCreate()
1044: -  steps - number of steps to use

1046:    Level: intermediate

1048:    Notes:
1049:     Normally one does not call this and TSAdjointSolve() integrates back to the original timestep. One can call this
1050:           so as to integrate back to less than the original timestep

1052: .seealso: TSSetExactFinalTime()
1053: @*/
1054: PetscErrorCode TSAdjointSetSteps(TS ts,PetscInt steps)
1055: {
1060:   ts->adjoint_max_steps = steps;
1061:   return 0;
1062: }

1064: /*@C
1065:   TSAdjointSetRHSJacobian - Deprecated, use TSSetRHSJacobianP()

1067:   Level: deprecated

1069: @*/
1070: PetscErrorCode TSAdjointSetRHSJacobian(TS ts,Mat Amat,PetscErrorCode (*func)(TS,PetscReal,Vec,Mat,void*),void *ctx)
1071: {

1075:   ts->rhsjacobianp    = func;
1076:   ts->rhsjacobianpctx = ctx;
1077:   if (Amat) {
1078:     PetscObjectReference((PetscObject)Amat);
1079:     MatDestroy(&ts->Jacp);
1080:     ts->Jacp = Amat;
1081:   }
1082:   return 0;
1083: }

1085: /*@C
1086:   TSAdjointComputeRHSJacobian - Deprecated, use TSComputeRHSJacobianP()

1088:   Level: deprecated

1090: @*/
1091: PetscErrorCode TSAdjointComputeRHSJacobian(TS ts,PetscReal t,Vec U,Mat Amat)
1092: {

1097:   PetscStackPush("TS user JacobianP function for sensitivity analysis");
1098:   (*ts->rhsjacobianp)(ts,t,U,Amat,ts->rhsjacobianpctx);
1099:   PetscStackPop;
1100:   return 0;
1101: }

1103: /*@
1104:   TSAdjointComputeDRDYFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobian()

1106:   Level: deprecated

1108: @*/
1109: PetscErrorCode TSAdjointComputeDRDYFunction(TS ts,PetscReal t,Vec U,Vec *DRDU)
1110: {

1114:   PetscStackPush("TS user DRDY function for sensitivity analysis");
1115:   (*ts->drdufunction)(ts,t,U,DRDU,ts->costintegrandctx);
1116:   PetscStackPop;
1117:   return 0;
1118: }

1120: /*@
1121:   TSAdjointComputeDRDPFunction - Deprecated, use TSGetQuadratureTS() then TSComputeRHSJacobianP()

1123:   Level: deprecated

1125: @*/
1126: PetscErrorCode TSAdjointComputeDRDPFunction(TS ts,PetscReal t,Vec U,Vec *DRDP)
1127: {

1131:   PetscStackPush("TS user DRDP function for sensitivity analysis");
1132:   (*ts->drdpfunction)(ts,t,U,DRDP,ts->costintegrandctx);
1133:   PetscStackPop;
1134:   return 0;
1135: }

1137: /*@C
1138:    TSAdjointMonitorSensi - monitors the first lambda sensitivity

1140:    Level: intermediate

1142: .seealso: TSAdjointMonitorSet()
1143: @*/
1144: PetscErrorCode TSAdjointMonitorSensi(TS ts,PetscInt step,PetscReal ptime,Vec v,PetscInt numcost,Vec *lambda,Vec *mu,PetscViewerAndFormat *vf)
1145: {
1146:   PetscViewer    viewer = vf->viewer;

1149:   PetscViewerPushFormat(viewer,vf->format);
1150:   VecView(lambda[0],viewer);
1151:   PetscViewerPopFormat(viewer);
1152:   return 0;
1153: }

1155: /*@C
1156:    TSAdjointMonitorSetFromOptions - Sets a monitor function and viewer appropriate for the type indicated by the user

1158:    Collective on TS

1160:    Input Parameters:
1161: +  ts - TS object you wish to monitor
1162: .  name - the monitor type one is seeking
1163: .  help - message indicating what monitoring is done
1164: .  manual - manual page for the monitor
1165: .  monitor - the monitor function
1166: -  monitorsetup - a function that is called once ONLY if the user selected this monitor that may set additional features of the TS or PetscViewer objects

1168:    Level: developer

1170: .seealso: PetscOptionsGetViewer(), PetscOptionsGetReal(), PetscOptionsHasName(), PetscOptionsGetString(),
1171:           PetscOptionsGetIntArray(), PetscOptionsGetRealArray(), PetscOptionsBool()
1172:           PetscOptionsInt(), PetscOptionsString(), PetscOptionsReal(), PetscOptionsBool(),
1173:           PetscOptionsName(), PetscOptionsBegin(), PetscOptionsEnd(), PetscOptionsHead(),
1174:           PetscOptionsStringArray(),PetscOptionsRealArray(), PetscOptionsScalar(),
1175:           PetscOptionsBoolGroupBegin(), PetscOptionsBoolGroup(), PetscOptionsBoolGroupEnd(),
1176:           PetscOptionsFList(), PetscOptionsEList()
1177: @*/
1178: PetscErrorCode TSAdjointMonitorSetFromOptions(TS ts,const char name[],const char help[], const char manual[],PetscErrorCode (*monitor)(TS,PetscInt,PetscReal,Vec,PetscInt,Vec*,Vec*,PetscViewerAndFormat*),PetscErrorCode (*monitorsetup)(TS,PetscViewerAndFormat*))
1179: {
1180:   PetscViewer       viewer;
1181:   PetscViewerFormat format;
1182:   PetscBool         flg;

1184:   PetscOptionsGetViewer(PetscObjectComm((PetscObject)ts),((PetscObject) ts)->options,((PetscObject)ts)->prefix,name,&viewer,&format,&flg);
1185:   if (flg) {
1186:     PetscViewerAndFormat *vf;
1187:     PetscViewerAndFormatCreate(viewer,format,&vf);
1188:     PetscObjectDereference((PetscObject)viewer);
1189:     if (monitorsetup) {
1190:       (*monitorsetup)(ts,vf);
1191:     }
1192:     TSAdjointMonitorSet(ts,(PetscErrorCode (*)(TS,PetscInt,PetscReal,Vec,PetscInt,Vec*,Vec*,void*))monitor,vf,(PetscErrorCode (*)(void**))PetscViewerAndFormatDestroy);
1193:   }
1194:   return 0;
1195: }

1197: /*@C
1198:    TSAdjointMonitorSet - Sets an ADDITIONAL function that is to be used at every
1199:    timestep to display the iteration's  progress.

1201:    Logically Collective on TS

1203:    Input Parameters:
1204: +  ts - the TS context obtained from TSCreate()
1205: .  adjointmonitor - monitoring routine
1206: .  adjointmctx - [optional] user-defined context for private data for the
1207:              monitor routine (use NULL if no context is desired)
1208: -  adjointmonitordestroy - [optional] routine that frees monitor context
1209:           (may be NULL)

1211:    Calling sequence of monitor:
1212: $    int adjointmonitor(TS ts,PetscInt steps,PetscReal time,Vec u,PetscInt numcost,Vec *lambda, Vec *mu,void *adjointmctx)

1214: +    ts - the TS context
1215: .    steps - iteration number (after the final time step the monitor routine is called with a step of -1, this is at the final time which may have
1216:                                been interpolated to)
1217: .    time - current time
1218: .    u - current iterate
1219: .    numcost - number of cost functionos
1220: .    lambda - sensitivities to initial conditions
1221: .    mu - sensitivities to parameters
1222: -    adjointmctx - [optional] adjoint monitoring context

1224:    Notes:
1225:    This routine adds an additional monitor to the list of monitors that
1226:    already has been loaded.

1228:    Fortran Notes:
1229:     Only a single monitor function can be set for each TS object

1231:    Level: intermediate

1233: .seealso: TSAdjointMonitorCancel()
1234: @*/
1235: PetscErrorCode TSAdjointMonitorSet(TS ts,PetscErrorCode (*adjointmonitor)(TS,PetscInt,PetscReal,Vec,PetscInt,Vec*,Vec*,void*),void *adjointmctx,PetscErrorCode (*adjointmdestroy)(void**))
1236: {
1237:   PetscInt       i;
1238:   PetscBool      identical;

1241:   for (i=0; i<ts->numbermonitors;i++) {
1242:     PetscMonitorCompare((PetscErrorCode (*)(void))adjointmonitor,adjointmctx,adjointmdestroy,(PetscErrorCode (*)(void))ts->adjointmonitor[i],ts->adjointmonitorcontext[i],ts->adjointmonitordestroy[i],&identical);
1243:     if (identical) return 0;
1244:   }
1246:   ts->adjointmonitor[ts->numberadjointmonitors]          = adjointmonitor;
1247:   ts->adjointmonitordestroy[ts->numberadjointmonitors]   = adjointmdestroy;
1248:   ts->adjointmonitorcontext[ts->numberadjointmonitors++] = (void*)adjointmctx;
1249:   return 0;
1250: }

1252: /*@C
1253:    TSAdjointMonitorCancel - Clears all the adjoint monitors that have been set on a time-step object.

1255:    Logically Collective on TS

1257:    Input Parameters:
1258: .  ts - the TS context obtained from TSCreate()

1260:    Notes:
1261:    There is no way to remove a single, specific monitor.

1263:    Level: intermediate

1265: .seealso: TSAdjointMonitorSet()
1266: @*/
1267: PetscErrorCode TSAdjointMonitorCancel(TS ts)
1268: {
1269:   PetscInt       i;

1272:   for (i=0; i<ts->numberadjointmonitors; i++) {
1273:     if (ts->adjointmonitordestroy[i]) {
1274:       (*ts->adjointmonitordestroy[i])(&ts->adjointmonitorcontext[i]);
1275:     }
1276:   }
1277:   ts->numberadjointmonitors = 0;
1278:   return 0;
1279: }

1281: /*@C
1282:    TSAdjointMonitorDefault - the default monitor of adjoint computations

1284:    Level: intermediate

1286: .seealso: TSAdjointMonitorSet()
1287: @*/
1288: PetscErrorCode TSAdjointMonitorDefault(TS ts,PetscInt step,PetscReal ptime,Vec v,PetscInt numcost,Vec *lambda,Vec *mu,PetscViewerAndFormat *vf)
1289: {
1290:   PetscViewer    viewer = vf->viewer;

1293:   PetscViewerPushFormat(viewer,vf->format);
1294:   PetscViewerASCIIAddTab(viewer,((PetscObject)ts)->tablevel);
1295:   PetscViewerASCIIPrintf(viewer,"%D TS dt %g time %g%s",step,(double)ts->time_step,(double)ptime,ts->steprollback ? " (r)\n" : "\n");
1296:   PetscViewerASCIISubtractTab(viewer,((PetscObject)ts)->tablevel);
1297:   PetscViewerPopFormat(viewer);
1298:   return 0;
1299: }

1301: /*@C
1302:    TSAdjointMonitorDrawSensi - Monitors progress of the adjoint TS solvers by calling
1303:    VecView() for the sensitivities to initial states at each timestep

1305:    Collective on TS

1307:    Input Parameters:
1308: +  ts - the TS context
1309: .  step - current time-step
1310: .  ptime - current time
1311: .  u - current state
1312: .  numcost - number of cost functions
1313: .  lambda - sensitivities to initial conditions
1314: .  mu - sensitivities to parameters
1315: -  dummy - either a viewer or NULL

1317:    Level: intermediate

1319: .seealso: TSAdjointMonitorSet(), TSAdjointMonitorDefault(), VecView()
1320: @*/
1321: PetscErrorCode TSAdjointMonitorDrawSensi(TS ts,PetscInt step,PetscReal ptime,Vec u,PetscInt numcost,Vec *lambda,Vec *mu,void *dummy)
1322: {
1323:   TSMonitorDrawCtx ictx = (TSMonitorDrawCtx)dummy;
1324:   PetscDraw        draw;
1325:   PetscReal        xl,yl,xr,yr,h;
1326:   char             time[32];

1328:   if (!(((ictx->howoften > 0) && (!(step % ictx->howoften))) || ((ictx->howoften == -1) && ts->reason))) return 0;

1330:   VecView(lambda[0],ictx->viewer);
1331:   PetscViewerDrawGetDraw(ictx->viewer,0,&draw);
1332:   PetscSNPrintf(time,32,"Timestep %d Time %g",(int)step,(double)ptime);
1333:   PetscDrawGetCoordinates(draw,&xl,&yl,&xr,&yr);
1334:   h    = yl + .95*(yr - yl);
1335:   PetscDrawStringCentered(draw,.5*(xl+xr),h,PETSC_DRAW_BLACK,time);
1336:   PetscDrawFlush(draw);
1337:   return 0;
1338: }

1340: /*
1341:    TSAdjointSetFromOptions - Sets various TSAdjoint parameters from user options.

1343:    Collective on TSAdjoint

1345:    Input Parameter:
1346: .  ts - the TS context

1348:    Options Database Keys:
1349: +  -ts_adjoint_solve <yes,no> After solving the ODE/DAE solve the adjoint problem (requires -ts_save_trajectory)
1350: .  -ts_adjoint_monitor - print information at each adjoint time step
1351: -  -ts_adjoint_monitor_draw_sensi - monitor the sensitivity of the first cost function wrt initial conditions (lambda[0]) graphically

1353:    Level: developer

1355:    Notes:
1356:     This is not normally called directly by users

1358: .seealso: TSSetSaveTrajectory(), TSTrajectorySetUp()
1359: */
1360: PetscErrorCode TSAdjointSetFromOptions(PetscOptionItems *PetscOptionsObject,TS ts)
1361: {
1362:   PetscBool      tflg,opt;

1365:   PetscOptionsHead(PetscOptionsObject,"TS Adjoint options");
1366:   tflg = ts->adjoint_solve ? PETSC_TRUE : PETSC_FALSE;
1367:   PetscOptionsBool("-ts_adjoint_solve","Solve the adjoint problem immediately after solving the forward problem","",tflg,&tflg,&opt);
1368:   if (opt) {
1369:     TSSetSaveTrajectory(ts);
1370:     ts->adjoint_solve = tflg;
1371:   }
1372:   TSAdjointMonitorSetFromOptions(ts,"-ts_adjoint_monitor","Monitor adjoint timestep size","TSAdjointMonitorDefault",TSAdjointMonitorDefault,NULL);
1373:   TSAdjointMonitorSetFromOptions(ts,"-ts_adjoint_monitor_sensi","Monitor sensitivity in the adjoint computation","TSAdjointMonitorSensi",TSAdjointMonitorSensi,NULL);
1374:   opt  = PETSC_FALSE;
1375:   PetscOptionsName("-ts_adjoint_monitor_draw_sensi","Monitor adjoint sensitivities (lambda only) graphically","TSAdjointMonitorDrawSensi",&opt);
1376:   if (opt) {
1377:     TSMonitorDrawCtx ctx;
1378:     PetscInt         howoften = 1;

1380:     PetscOptionsInt("-ts_adjoint_monitor_draw_sensi","Monitor adjoint sensitivities (lambda only) graphically","TSAdjointMonitorDrawSensi",howoften,&howoften,NULL);
1381:     TSMonitorDrawCtxCreate(PetscObjectComm((PetscObject)ts),NULL,NULL,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&ctx);
1382:     TSAdjointMonitorSet(ts,TSAdjointMonitorDrawSensi,ctx,(PetscErrorCode (*)(void**))TSMonitorDrawCtxDestroy);
1383:   }
1384:   return 0;
1385: }

1387: /*@
1388:    TSAdjointStep - Steps one time step backward in the adjoint run

1390:    Collective on TS

1392:    Input Parameter:
1393: .  ts - the TS context obtained from TSCreate()

1395:    Level: intermediate

1397: .seealso: TSAdjointSetUp(), TSAdjointSolve()
1398: @*/
1399: PetscErrorCode TSAdjointStep(TS ts)
1400: {
1401:   DM               dm;

1404:   TSGetDM(ts,&dm);
1405:   TSAdjointSetUp(ts);
1406:   ts->steps--; /* must decrease the step index before the adjoint step is taken. */

1408:   ts->reason = TS_CONVERGED_ITERATING;
1409:   ts->ptime_prev = ts->ptime;
1411:   PetscLogEventBegin(TS_AdjointStep,ts,0,0,0);
1412:   (*ts->ops->adjointstep)(ts);
1413:   PetscLogEventEnd(TS_AdjointStep,ts,0,0,0);
1414:   ts->adjoint_steps++;

1416:   if (ts->reason < 0) {
1418:   } else if (!ts->reason) {
1419:     if (ts->adjoint_steps >= ts->adjoint_max_steps) ts->reason = TS_CONVERGED_ITS;
1420:   }
1421:   return 0;
1422: }

1424: /*@
1425:    TSAdjointSolve - Solves the discrete ajoint problem for an ODE/DAE

1427:    Collective on TS

1429:    Input Parameter:
1430: .  ts - the TS context obtained from TSCreate()

1432:    Options Database:
1433: . -ts_adjoint_view_solution <viewerinfo> - views the first gradient with respect to the initial values

1435:    Level: intermediate

1437:    Notes:
1438:    This must be called after a call to TSSolve() that solves the forward problem

1440:    By default this will integrate back to the initial time, one can use TSAdjointSetSteps() to step back to a later time

1442: .seealso: TSCreate(), TSSetCostGradients(), TSSetSolution(), TSAdjointStep()
1443: @*/
1444: PetscErrorCode TSAdjointSolve(TS ts)
1445: {
1446:   static PetscBool cite = PETSC_FALSE;
1447: #if defined(TSADJOINT_STAGE)
1448:   PetscLogStage  adjoint_stage;
1449: #endif

1452:   PetscCall(PetscCitationsRegister("@article{tsadjointpaper,\n"
1453:                                  "  title         = {{PETSc TSAdjoint: a discrete adjoint ODE solver for first-order and second-order sensitivity analysis}},\n"
1454:                                  "  author        = {Zhang, Hong and Constantinescu, Emil M.  and Smith, Barry F.},\n"
1455:                                  "  journal       = {arXiv e-preprints},\n"
1456:                                  "  eprint        = {1912.07696},\n"
1457:                                  "  archivePrefix = {arXiv},\n"
1458:                                  "  year          = {2019}\n}\n",&cite));
1459: #if defined(TSADJOINT_STAGE)
1460:   PetscLogStageRegister("TSAdjoint",&adjoint_stage);
1461:   PetscLogStagePush(adjoint_stage);
1462: #endif
1463:   TSAdjointSetUp(ts);

1465:   /* reset time step and iteration counters */
1466:   ts->adjoint_steps     = 0;
1467:   ts->ksp_its           = 0;
1468:   ts->snes_its          = 0;
1469:   ts->num_snes_failures = 0;
1470:   ts->reject            = 0;
1471:   ts->reason            = TS_CONVERGED_ITERATING;

1473:   if (!ts->adjoint_max_steps) ts->adjoint_max_steps = ts->steps;
1474:   if (ts->adjoint_steps >= ts->adjoint_max_steps) ts->reason = TS_CONVERGED_ITS;

1476:   while (!ts->reason) {
1477:     TSTrajectoryGet(ts->trajectory,ts,ts->steps,&ts->ptime);
1478:     TSAdjointMonitor(ts,ts->steps,ts->ptime,ts->vec_sol,ts->numcost,ts->vecs_sensi,ts->vecs_sensip);
1479:     TSAdjointEventHandler(ts);
1480:     TSAdjointStep(ts);
1481:     if ((ts->vec_costintegral || ts->quadraturets) && !ts->costintegralfwd) {
1482:       TSAdjointCostIntegral(ts);
1483:     }
1484:   }
1485:   if (!ts->steps) {
1486:     TSTrajectoryGet(ts->trajectory,ts,ts->steps,&ts->ptime);
1487:     TSAdjointMonitor(ts,ts->steps,ts->ptime,ts->vec_sol,ts->numcost,ts->vecs_sensi,ts->vecs_sensip);
1488:   }
1489:   ts->solvetime = ts->ptime;
1490:   TSTrajectoryViewFromOptions(ts->trajectory,NULL,"-ts_trajectory_view");
1491:   VecViewFromOptions(ts->vecs_sensi[0],(PetscObject) ts, "-ts_adjoint_view_solution");
1492:   ts->adjoint_max_steps = 0;
1493: #if defined(TSADJOINT_STAGE)
1494:   PetscLogStagePop();
1495: #endif
1496:   return 0;
1497: }

1499: /*@C
1500:    TSAdjointMonitor - Runs all user-provided adjoint monitor routines set using TSAdjointMonitorSet()

1502:    Collective on TS

1504:    Input Parameters:
1505: +  ts - time stepping context obtained from TSCreate()
1506: .  step - step number that has just completed
1507: .  ptime - model time of the state
1508: .  u - state at the current model time
1509: .  numcost - number of cost functions (dimension of lambda  or mu)
1510: .  lambda - vectors containing the gradients of the cost functions with respect to the ODE/DAE solution variables
1511: -  mu - vectors containing the gradients of the cost functions with respect to the problem parameters

1513:    Notes:
1514:    TSAdjointMonitor() is typically used automatically within the time stepping implementations.
1515:    Users would almost never call this routine directly.

1517:    Level: developer

1519: @*/
1520: PetscErrorCode TSAdjointMonitor(TS ts,PetscInt step,PetscReal ptime,Vec u,PetscInt numcost,Vec *lambda, Vec *mu)
1521: {
1522:   PetscInt       i,n = ts->numberadjointmonitors;

1526:   VecLockReadPush(u);
1527:   for (i=0; i<n; i++) {
1528:     (*ts->adjointmonitor[i])(ts,step,ptime,u,numcost,lambda,mu,ts->adjointmonitorcontext[i]);
1529:   }
1530:   VecLockReadPop(u);
1531:   return 0;
1532: }

1534: /*@
1535:  TSAdjointCostIntegral - Evaluate the cost integral in the adjoint run.

1537:  Collective on TS

1539:  Input Parameter:
1540:  .  ts - time stepping context

1542:  Level: advanced

1544:  Notes:
1545:  This function cannot be called until TSAdjointStep() has been completed.

1547:  .seealso: TSAdjointSolve(), TSAdjointStep
1548:  @*/
1549: PetscErrorCode TSAdjointCostIntegral(TS ts)
1550: {
1553:   (*ts->ops->adjointintegral)(ts);
1554:   return 0;
1555: }

1557: /* ------------------ Forward (tangent linear) sensitivity  ------------------*/

1559: /*@
1560:   TSForwardSetUp - Sets up the internal data structures for the later use
1561:   of forward sensitivity analysis

1563:   Collective on TS

1565:   Input Parameter:
1566: . ts - the TS context obtained from TSCreate()

1568:   Level: advanced

1570: .seealso: TSCreate(), TSDestroy(), TSSetUp()
1571: @*/
1572: PetscErrorCode TSForwardSetUp(TS ts)
1573: {
1575:   if (ts->forwardsetupcalled) return 0;
1576:   if (ts->ops->forwardsetup) {
1577:     (*ts->ops->forwardsetup)(ts);
1578:   }
1579:   VecDuplicate(ts->vec_sol,&ts->vec_sensip_col);
1580:   ts->forwardsetupcalled = PETSC_TRUE;
1581:   return 0;
1582: }

1584: /*@
1585:   TSForwardReset - Reset the internal data structures used by forward sensitivity analysis

1587:   Collective on TS

1589:   Input Parameter:
1590: . ts - the TS context obtained from TSCreate()

1592:   Level: advanced

1594: .seealso: TSCreate(), TSDestroy(), TSForwardSetUp()
1595: @*/
1596: PetscErrorCode TSForwardReset(TS ts)
1597: {
1598:   TS             quadts = ts->quadraturets;

1601:   if (ts->ops->forwardreset) {
1602:     (*ts->ops->forwardreset)(ts);
1603:   }
1604:   MatDestroy(&ts->mat_sensip);
1605:   if (quadts) {
1606:     MatDestroy(&quadts->mat_sensip);
1607:   }
1608:   VecDestroy(&ts->vec_sensip_col);
1609:   ts->forward_solve      = PETSC_FALSE;
1610:   ts->forwardsetupcalled = PETSC_FALSE;
1611:   return 0;
1612: }

1614: /*@
1615:   TSForwardSetIntegralGradients - Set the vectors holding forward sensitivities of the integral term.

1617:   Input Parameters:
1618: + ts - the TS context obtained from TSCreate()
1619: . numfwdint - number of integrals
1620: - vp - the vectors containing the gradients for each integral w.r.t. parameters

1622:   Level: deprecated

1624: .seealso: TSForwardGetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1625: @*/
1626: PetscErrorCode TSForwardSetIntegralGradients(TS ts,PetscInt numfwdint,Vec *vp)
1627: {
1630:   if (!ts->numcost) ts->numcost = numfwdint;

1632:   ts->vecs_integral_sensip = vp;
1633:   return 0;
1634: }

1636: /*@
1637:   TSForwardGetIntegralGradients - Returns the forward sensitivities ofthe integral term.

1639:   Input Parameter:
1640: . ts - the TS context obtained from TSCreate()

1642:   Output Parameter:
1643: . vp - the vectors containing the gradients for each integral w.r.t. parameters

1645:   Level: deprecated

1647: .seealso: TSForwardSetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1648: @*/
1649: PetscErrorCode TSForwardGetIntegralGradients(TS ts,PetscInt *numfwdint,Vec **vp)
1650: {
1653:   if (numfwdint) *numfwdint = ts->numcost;
1654:   if (vp) *vp = ts->vecs_integral_sensip;
1655:   return 0;
1656: }

1658: /*@
1659:   TSForwardStep - Compute the forward sensitivity for one time step.

1661:   Collective on TS

1663:   Input Parameter:
1664: . ts - time stepping context

1666:   Level: advanced

1668:   Notes:
1669:   This function cannot be called until TSStep() has been completed.

1671: .seealso: TSForwardSetSensitivities(), TSForwardGetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardSetUp()
1672: @*/
1673: PetscErrorCode TSForwardStep(TS ts)
1674: {
1677:   PetscLogEventBegin(TS_ForwardStep,ts,0,0,0);
1678:   (*ts->ops->forwardstep)(ts);
1679:   PetscLogEventEnd(TS_ForwardStep,ts,0,0,0);
1681:   return 0;
1682: }

1684: /*@
1685:   TSForwardSetSensitivities - Sets the initial value of the trajectory sensitivities of solution  w.r.t. the problem parameters and initial values.

1687:   Logically Collective on TS

1689:   Input Parameters:
1690: + ts - the TS context obtained from TSCreate()
1691: . nump - number of parameters
1692: - Smat - sensitivities with respect to the parameters, the number of entries in these vectors is the same as the number of parameters

1694:   Level: beginner

1696:   Notes:
1697:   Forward sensitivity is also called 'trajectory sensitivity' in some fields such as power systems.
1698:   This function turns on a flag to trigger TSSolve() to compute forward sensitivities automatically.
1699:   You must call this function before TSSolve().
1700:   The entries in the sensitivity matrix must be correctly initialized with the values S = dy/dp|startingtime.

1702: .seealso: TSForwardGetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1703: @*/
1704: PetscErrorCode TSForwardSetSensitivities(TS ts,PetscInt nump,Mat Smat)
1705: {
1708:   ts->forward_solve  = PETSC_TRUE;
1709:   if (nump == PETSC_DEFAULT) {
1710:     MatGetSize(Smat,NULL,&ts->num_parameters);
1711:   } else ts->num_parameters = nump;
1712:   PetscObjectReference((PetscObject)Smat);
1713:   MatDestroy(&ts->mat_sensip);
1714:   ts->mat_sensip = Smat;
1715:   return 0;
1716: }

1718: /*@
1719:   TSForwardGetSensitivities - Returns the trajectory sensitivities

1721:   Not Collective, but Vec returned is parallel if TS is parallel

1723:   Output Parameters:
1724: + ts - the TS context obtained from TSCreate()
1725: . nump - number of parameters
1726: - Smat - sensitivities with respect to the parameters, the number of entries in these vectors is the same as the number of parameters

1728:   Level: intermediate

1730: .seealso: TSForwardSetSensitivities(), TSForwardSetIntegralGradients(), TSForwardGetIntegralGradients(), TSForwardStep()
1731: @*/
1732: PetscErrorCode TSForwardGetSensitivities(TS ts,PetscInt *nump,Mat *Smat)
1733: {
1735:   if (nump) *nump = ts->num_parameters;
1736:   if (Smat) *Smat = ts->mat_sensip;
1737:   return 0;
1738: }

1740: /*@
1741:    TSForwardCostIntegral - Evaluate the cost integral in the forward run.

1743:    Collective on TS

1745:    Input Parameter:
1746: .  ts - time stepping context

1748:    Level: advanced

1750:    Notes:
1751:    This function cannot be called until TSStep() has been completed.

1753: .seealso: TSSolve(), TSAdjointCostIntegral()
1754: @*/
1755: PetscErrorCode TSForwardCostIntegral(TS ts)
1756: {
1759:   (*ts->ops->forwardintegral)(ts);
1760:   return 0;
1761: }

1763: /*@
1764:   TSForwardSetInitialSensitivities - Set initial values for tangent linear sensitivities

1766:   Collective on TS

1768:   Input Parameters:
1769: + ts - the TS context obtained from TSCreate()
1770: - didp - parametric sensitivities of the initial condition

1772:   Level: intermediate

1774:   Notes: TSSolve() allows users to pass the initial solution directly to TS. But the tangent linear variables cannot be initialized in this way. This function is used to set initial values for tangent linear variables.

1776: .seealso: TSForwardSetSensitivities()
1777: @*/
1778: PetscErrorCode TSForwardSetInitialSensitivities(TS ts,Mat didp)
1779: {
1782:   if (!ts->mat_sensip) {
1783:     TSForwardSetSensitivities(ts,PETSC_DEFAULT,didp);
1784:   }
1785:   return 0;
1786: }

1788: /*@
1789:    TSForwardGetStages - Get the number of stages and the tangent linear sensitivities at the intermediate stages

1791:    Input Parameter:
1792: .  ts - the TS context obtained from TSCreate()

1794:    Output Parameters:
1795: +  ns - number of stages
1796: -  S - tangent linear sensitivities at the intermediate stages

1798:    Level: advanced

1800: @*/
1801: PetscErrorCode TSForwardGetStages(TS ts,PetscInt *ns,Mat **S)
1802: {

1805:   if (!ts->ops->getstages) *S=NULL;
1806:   else {
1807:     (*ts->ops->forwardgetstages)(ts,ns,S);
1808:   }
1809:   return 0;
1810: }

1812: /*@
1813:    TSCreateQuadratureTS - Create a sub-TS that evaluates integrals over time

1815:    Input Parameters:
1816: +  ts - the TS context obtained from TSCreate()
1817: -  fwd - flag indicating whether to evaluate cost integral in the forward run or the adjoint run

1819:    Output Parameters:
1820: .  quadts - the child TS context

1822:    Level: intermediate

1824: .seealso: TSGetQuadratureTS()
1825: @*/
1826: PetscErrorCode TSCreateQuadratureTS(TS ts,PetscBool fwd,TS *quadts)
1827: {
1828:   char prefix[128];

1832:   TSDestroy(&ts->quadraturets);
1833:   TSCreate(PetscObjectComm((PetscObject)ts),&ts->quadraturets);
1834:   PetscObjectIncrementTabLevel((PetscObject)ts->quadraturets,(PetscObject)ts,1);
1835:   PetscLogObjectParent((PetscObject)ts,(PetscObject)ts->quadraturets);
1836:   PetscSNPrintf(prefix,sizeof(prefix),"%squad_",((PetscObject)ts)->prefix ? ((PetscObject)ts)->prefix : "");
1837:   TSSetOptionsPrefix(ts->quadraturets,prefix);
1838:   *quadts = ts->quadraturets;

1840:   if (ts->numcost) {
1841:     VecCreateSeq(PETSC_COMM_SELF,ts->numcost,&(*quadts)->vec_sol);
1842:   } else {
1843:     VecCreateSeq(PETSC_COMM_SELF,1,&(*quadts)->vec_sol);
1844:   }
1845:   ts->costintegralfwd = fwd;
1846:   return 0;
1847: }

1849: /*@
1850:    TSGetQuadratureTS - Return the sub-TS that evaluates integrals over time

1852:    Input Parameter:
1853: .  ts - the TS context obtained from TSCreate()

1855:    Output Parameters:
1856: +  fwd - flag indicating whether to evaluate cost integral in the forward run or the adjoint run
1857: -  quadts - the child TS context

1859:    Level: intermediate

1861: .seealso: TSCreateQuadratureTS()
1862: @*/
1863: PetscErrorCode TSGetQuadratureTS(TS ts,PetscBool *fwd,TS *quadts)
1864: {
1866:   if (fwd) *fwd = ts->costintegralfwd;
1867:   if (quadts) *quadts = ts->quadraturets;
1868:   return 0;
1869: }

1871: /*@
1872:    TSComputeSNESJacobian - Compute the SNESJacobian

1874:    Input Parameters:
1875: +  ts - the TS context obtained from TSCreate()
1876: -  x - state vector

1878:    Output Parameters:
1879: +  J - Jacobian matrix
1880: -  Jpre - preconditioning matrix for J (may be same as J)

1882:    Level: developer

1884:    Notes:
1885:    Using SNES to compute the Jacobian enables finite differencing when TS Jacobian is not available.
1886: @*/
1887: PetscErrorCode TSComputeSNESJacobian(TS ts,Vec x,Mat J,Mat Jpre)
1888: {
1889:   SNES           snes = ts->snes;
1890:   PetscErrorCode (*jac)(SNES,Vec,Mat,Mat,void*) = NULL;

1892:   /*
1893:     Unlike implicit methods, explicit methods do not have SNESMatFDColoring in the snes object
1894:     because SNESSolve() has not been called yet; so querying SNESMatFDColoring does not work for
1895:     explicit methods. Instead, we check the Jacobian compute function directly to determin if FD
1896:     coloring is used.
1897:   */
1898:   SNESGetJacobian(snes,NULL,NULL,&jac,NULL);
1899:   if (jac == SNESComputeJacobianDefaultColor) {
1900:     Vec f;
1901:     SNESSetSolution(snes,x);
1902:     SNESGetFunction(snes,&f,NULL,NULL);
1903:     /* Force MatFDColoringApply to evaluate the SNES residual function for the base vector */
1904:     SNESComputeFunction(snes,x,f);
1905:   }
1906:   SNESComputeJacobian(snes,x,J,Jpre);
1907:   return 0;
1908: }