Actual source code: taosolver.c
1: #define TAO_DLL
3: #include <petsc/private/taoimpl.h>
5: PetscBool TaoRegisterAllCalled = PETSC_FALSE;
6: PetscFunctionList TaoList = NULL;
8: PetscClassId TAO_CLASSID;
10: PetscLogEvent TAO_Solve;
11: PetscLogEvent TAO_ObjectiveEval;
12: PetscLogEvent TAO_GradientEval;
13: PetscLogEvent TAO_ObjGradEval;
14: PetscLogEvent TAO_HessianEval;
15: PetscLogEvent TAO_JacobianEval;
16: PetscLogEvent TAO_ConstraintsEval;
18: const char *TaoSubSetTypes[] = {"subvec","mask","matrixfree","TaoSubSetType","TAO_SUBSET_",NULL};
20: struct _n_TaoMonitorDrawCtx {
21: PetscViewer viewer;
22: PetscInt howoften; /* when > 0 uses iteration % howoften, when negative only final solution plotted */
23: };
25: /*@
26: TaoCreate - Creates a TAO solver
28: Collective
30: Input Parameter:
31: . comm - MPI communicator
33: Output Parameter:
34: . newtao - the new Tao context
36: Available methods include:
37: + nls - Newton's method with line search for unconstrained minimization
38: . ntr - Newton's method with trust region for unconstrained minimization
39: . ntl - Newton's method with trust region, line search for unconstrained minimization
40: . lmvm - Limited memory variable metric method for unconstrained minimization
41: . cg - Nonlinear conjugate gradient method for unconstrained minimization
42: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
43: . tron - Newton Trust Region method for bound constrained minimization
44: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
45: . blmvm - Limited memory variable metric method for bound constrained minimization
46: . lcl - Linearly constrained Lagrangian method for pde-constrained minimization
47: - pounders - Model-based algorithm for nonlinear least squares
49: Options Database Keys:
50: . -tao_type - select which method TAO should use
52: Level: beginner
54: .seealso: TaoSolve(), TaoDestroy()
55: @*/
56: PetscErrorCode TaoCreate(MPI_Comm comm, Tao *newtao)
57: {
59: Tao tao;
63: *newtao = NULL;
65: TaoInitializePackage();
66: TaoLineSearchInitializePackage();
68: PetscHeaderCreate(tao,TAO_CLASSID,"Tao","Optimization solver","Tao",comm,TaoDestroy,TaoView);
69: tao->ops->computeobjective = NULL;
70: tao->ops->computeobjectiveandgradient = NULL;
71: tao->ops->computegradient = NULL;
72: tao->ops->computehessian = NULL;
73: tao->ops->computeresidual = NULL;
74: tao->ops->computeresidualjacobian = NULL;
75: tao->ops->computeconstraints = NULL;
76: tao->ops->computejacobian = NULL;
77: tao->ops->computejacobianequality = NULL;
78: tao->ops->computejacobianinequality = NULL;
79: tao->ops->computeequalityconstraints = NULL;
80: tao->ops->computeinequalityconstraints = NULL;
81: tao->ops->convergencetest = TaoDefaultConvergenceTest;
82: tao->ops->convergencedestroy = NULL;
83: tao->ops->computedual = NULL;
84: tao->ops->setup = NULL;
85: tao->ops->solve = NULL;
86: tao->ops->view = NULL;
87: tao->ops->setfromoptions = NULL;
88: tao->ops->destroy = NULL;
90: tao->solution=NULL;
91: tao->gradient=NULL;
92: tao->ls_res = NULL;
93: tao->ls_jac = NULL;
94: tao->constraints=NULL;
95: tao->constraints_equality=NULL;
96: tao->constraints_inequality=NULL;
97: tao->res_weights_v=NULL;
98: tao->res_weights_w=NULL;
99: tao->stepdirection=NULL;
100: tao->niter=0;
101: tao->ntotalits=0;
102: tao->XL = NULL;
103: tao->XU = NULL;
104: tao->IL = NULL;
105: tao->IU = NULL;
106: tao->DI = NULL;
107: tao->DE = NULL;
108: tao->gradient_norm = NULL;
109: tao->gradient_norm_tmp = NULL;
110: tao->hessian = NULL;
111: tao->hessian_pre = NULL;
112: tao->jacobian = NULL;
113: tao->jacobian_pre = NULL;
114: tao->jacobian_state = NULL;
115: tao->jacobian_state_pre = NULL;
116: tao->jacobian_state_inv = NULL;
117: tao->jacobian_design = NULL;
118: tao->jacobian_design_pre = NULL;
119: tao->jacobian_equality = NULL;
120: tao->jacobian_equality_pre = NULL;
121: tao->jacobian_inequality = NULL;
122: tao->jacobian_inequality_pre = NULL;
123: tao->state_is = NULL;
124: tao->design_is = NULL;
126: tao->max_it = 10000;
127: tao->max_funcs = 10000;
128: #if defined(PETSC_USE_REAL_SINGLE)
129: tao->gatol = 1e-5;
130: tao->grtol = 1e-5;
131: tao->crtol = 1e-5;
132: tao->catol = 1e-5;
133: #else
134: tao->gatol = 1e-8;
135: tao->grtol = 1e-8;
136: tao->crtol = 1e-8;
137: tao->catol = 1e-8;
138: #endif
139: tao->gttol = 0.0;
140: tao->steptol = 0.0;
141: tao->trust0 = PETSC_INFINITY;
142: tao->fmin = PETSC_NINFINITY;
143: tao->hist_malloc = PETSC_FALSE;
144: tao->hist_reset = PETSC_TRUE;
145: tao->hist_max = 0;
146: tao->hist_len = 0;
147: tao->hist_obj = NULL;
148: tao->hist_resid = NULL;
149: tao->hist_cnorm = NULL;
150: tao->hist_lits = NULL;
152: tao->numbermonitors=0;
153: tao->viewsolution=PETSC_FALSE;
154: tao->viewhessian=PETSC_FALSE;
155: tao->viewgradient=PETSC_FALSE;
156: tao->viewjacobian=PETSC_FALSE;
157: tao->viewconstraints = PETSC_FALSE;
159: tao->bounded = PETSC_FALSE;
160: tao->constrained = PETSC_FALSE;
161: tao->eq_constrained = PETSC_FALSE;
162: tao->ineq_constrained = PETSC_FALSE;
163: tao->ineq_doublesided = PETSC_FALSE;
165: tao->recycle = PETSC_FALSE;
167: tao->header_printed = PETSC_FALSE;
169: /* These flags prevents algorithms from overriding user options */
170: tao->max_it_changed =PETSC_FALSE;
171: tao->max_funcs_changed=PETSC_FALSE;
172: tao->gatol_changed =PETSC_FALSE;
173: tao->grtol_changed =PETSC_FALSE;
174: tao->gttol_changed =PETSC_FALSE;
175: tao->steptol_changed =PETSC_FALSE;
176: tao->trust0_changed =PETSC_FALSE;
177: tao->fmin_changed =PETSC_FALSE;
178: tao->catol_changed =PETSC_FALSE;
179: tao->crtol_changed =PETSC_FALSE;
180: TaoResetStatistics(tao);
181: *newtao = tao;
182: return(0);
183: }
185: /*@
186: TaoSolve - Solves an optimization problem min F(x) s.t. l <= x <= u
188: Collective on Tao
190: Input Parameters:
191: . tao - the Tao context
193: Notes:
194: The user must set up the Tao with calls to TaoSetInitialVector(),
195: TaoSetObjectiveRoutine(),
196: TaoSetGradientRoutine(), and (if using 2nd order method) TaoSetHessianRoutine().
198: You should call TaoGetConvergedReason() or run with -tao_converged_reason to determine if the optimization algorithm actually succeeded or
199: why it failed.
201: Level: beginner
203: .seealso: TaoCreate(), TaoSetObjectiveRoutine(), TaoSetGradientRoutine(), TaoSetHessianRoutine(), TaoGetConvergedReason()
204: @*/
205: PetscErrorCode TaoSolve(Tao tao)
206: {
207: PetscErrorCode ierr;
208: static PetscBool set = PETSC_FALSE;
212: PetscCitationsRegister("@TechReport{tao-user-ref,\n"
213: "title = {Toolkit for Advanced Optimization (TAO) Users Manual},\n"
214: "author = {Todd Munson and Jason Sarich and Stefan Wild and Steve Benson and Lois Curfman McInnes},\n"
215: "Institution = {Argonne National Laboratory},\n"
216: "Year = 2014,\n"
217: "Number = {ANL/MCS-TM-322 - Revision 3.5},\n"
218: "url = {https://www.mcs.anl.gov/research/projects/tao/}\n}\n",&set);
219: tao->header_printed = PETSC_FALSE;
220: TaoSetUp(tao);
221: TaoResetStatistics(tao);
222: if (tao->linesearch) {
223: TaoLineSearchReset(tao->linesearch);
224: }
226: PetscLogEventBegin(TAO_Solve,tao,0,0,0);
227: if (tao->ops->solve) { (*tao->ops->solve)(tao); }
228: PetscLogEventEnd(TAO_Solve,tao,0,0,0);
230: VecViewFromOptions(tao->solution,(PetscObject)tao,"-tao_view_solution");
232: tao->ntotalits += tao->niter;
233: TaoViewFromOptions(tao,NULL,"-tao_view");
235: if (tao->printreason) {
236: if (tao->reason > 0) {
237: PetscPrintf(((PetscObject)tao)->comm,"TAO solve converged due to %s iterations %D\n",TaoConvergedReasons[tao->reason],tao->niter);
238: } else {
239: PetscPrintf(((PetscObject)tao)->comm,"TAO solve did not converge due to %s iteration %D\n",TaoConvergedReasons[tao->reason],tao->niter);
240: }
241: }
242: return(0);
243: }
245: /*@
246: TaoSetUp - Sets up the internal data structures for the later use
247: of a Tao solver
249: Collective on tao
251: Input Parameters:
252: . tao - the TAO context
254: Notes:
255: The user will not need to explicitly call TaoSetUp(), as it will
256: automatically be called in TaoSolve(). However, if the user
257: desires to call it explicitly, it should come after TaoCreate()
258: and any TaoSetSomething() routines, but before TaoSolve().
260: Level: advanced
262: .seealso: TaoCreate(), TaoSolve()
263: @*/
264: PetscErrorCode TaoSetUp(Tao tao)
265: {
270: if (tao->setupcalled) return(0);
272: if (!tao->solution) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_ARG_WRONGSTATE,"Must call TaoSetInitialVector");
273: if (tao->ops->setup) {
274: (*tao->ops->setup)(tao);
275: }
276: tao->setupcalled = PETSC_TRUE;
277: return(0);
278: }
280: /*@C
281: TaoDestroy - Destroys the TAO context that was created with
282: TaoCreate()
284: Collective on Tao
286: Input Parameter:
287: . tao - the Tao context
289: Level: beginner
291: .seealso: TaoCreate(), TaoSolve()
292: @*/
293: PetscErrorCode TaoDestroy(Tao *tao)
294: {
298: if (!*tao) return(0);
300: if (--((PetscObject)*tao)->refct > 0) {*tao = NULL;return(0);}
302: if ((*tao)->ops->destroy) {
303: (*((*tao))->ops->destroy)(*tao);
304: }
305: KSPDestroy(&(*tao)->ksp);
306: TaoLineSearchDestroy(&(*tao)->linesearch);
308: if ((*tao)->ops->convergencedestroy) {
309: (*(*tao)->ops->convergencedestroy)((*tao)->cnvP);
310: if ((*tao)->jacobian_state_inv) {
311: MatDestroy(&(*tao)->jacobian_state_inv);
312: }
313: }
314: VecDestroy(&(*tao)->solution);
315: VecDestroy(&(*tao)->gradient);
316: VecDestroy(&(*tao)->ls_res);
318: if ((*tao)->gradient_norm) {
319: PetscObjectDereference((PetscObject)(*tao)->gradient_norm);
320: VecDestroy(&(*tao)->gradient_norm_tmp);
321: }
323: VecDestroy(&(*tao)->XL);
324: VecDestroy(&(*tao)->XU);
325: VecDestroy(&(*tao)->IL);
326: VecDestroy(&(*tao)->IU);
327: VecDestroy(&(*tao)->DE);
328: VecDestroy(&(*tao)->DI);
329: VecDestroy(&(*tao)->constraints_equality);
330: VecDestroy(&(*tao)->constraints_inequality);
331: VecDestroy(&(*tao)->stepdirection);
332: MatDestroy(&(*tao)->hessian_pre);
333: MatDestroy(&(*tao)->hessian);
334: MatDestroy(&(*tao)->ls_jac);
335: MatDestroy(&(*tao)->ls_jac_pre);
336: MatDestroy(&(*tao)->jacobian_pre);
337: MatDestroy(&(*tao)->jacobian);
338: MatDestroy(&(*tao)->jacobian_state_pre);
339: MatDestroy(&(*tao)->jacobian_state);
340: MatDestroy(&(*tao)->jacobian_state_inv);
341: MatDestroy(&(*tao)->jacobian_design);
342: MatDestroy(&(*tao)->jacobian_equality);
343: MatDestroy(&(*tao)->jacobian_equality_pre);
344: MatDestroy(&(*tao)->jacobian_inequality);
345: MatDestroy(&(*tao)->jacobian_inequality_pre);
346: ISDestroy(&(*tao)->state_is);
347: ISDestroy(&(*tao)->design_is);
348: VecDestroy(&(*tao)->res_weights_v);
349: TaoCancelMonitors(*tao);
350: if ((*tao)->hist_malloc) {
351: PetscFree4((*tao)->hist_obj,(*tao)->hist_resid,(*tao)->hist_cnorm,(*tao)->hist_lits);
352: }
353: if ((*tao)->res_weights_n) {
354: PetscFree((*tao)->res_weights_rows);
355: PetscFree((*tao)->res_weights_cols);
356: PetscFree((*tao)->res_weights_w);
357: }
358: PetscHeaderDestroy(tao);
359: return(0);
360: }
362: /*@
363: TaoSetFromOptions - Sets various Tao parameters from user
364: options.
366: Collective on Tao
368: Input Parameter:
369: . tao - the Tao solver context
371: options Database Keys:
372: + -tao_type <type> - The algorithm that TAO uses (lmvm, nls, etc.)
373: . -tao_gatol <gatol> - absolute error tolerance for ||gradient||
374: . -tao_grtol <grtol> - relative error tolerance for ||gradient||
375: . -tao_gttol <gttol> - reduction of ||gradient|| relative to initial gradient
376: . -tao_max_it <max> - sets maximum number of iterations
377: . -tao_max_funcs <max> - sets maximum number of function evaluations
378: . -tao_fmin <fmin> - stop if function value reaches fmin
379: . -tao_steptol <tol> - stop if trust region radius less than <tol>
380: . -tao_trust0 <t> - initial trust region radius
381: . -tao_monitor - prints function value and residual at each iteration
382: . -tao_smonitor - same as tao_monitor, but truncates very small values
383: . -tao_cmonitor - prints function value, residual, and constraint norm at each iteration
384: . -tao_view_solution - prints solution vector at each iteration
385: . -tao_view_ls_residual - prints least-squares residual vector at each iteration
386: . -tao_view_step - prints step direction vector at each iteration
387: . -tao_view_gradient - prints gradient vector at each iteration
388: . -tao_draw_solution - graphically view solution vector at each iteration
389: . -tao_draw_step - graphically view step vector at each iteration
390: . -tao_draw_gradient - graphically view gradient at each iteration
391: . -tao_fd_gradient - use gradient computed with finite differences
392: . -tao_fd_hessian - use hessian computed with finite differences
393: . -tao_mf_hessian - use matrix-free hessian computed with finite differences
394: . -tao_cancelmonitors - cancels all monitors (except those set with command line)
395: . -tao_view - prints information about the Tao after solving
396: - -tao_converged_reason - prints the reason TAO stopped iterating
398: Notes:
399: To see all options, run your program with the -help option or consult the
400: user's manual. Should be called after TaoCreate() but before TaoSolve()
402: Level: beginner
403: @*/
404: PetscErrorCode TaoSetFromOptions(Tao tao)
405: {
407: TaoType default_type = TAOLMVM;
408: char type[256], monfilename[PETSC_MAX_PATH_LEN];
409: PetscViewer monviewer;
410: PetscBool flg;
411: MPI_Comm comm;
415: PetscObjectGetComm((PetscObject)tao,&comm);
417: /* So no warnings are given about unused options */
418: PetscOptionsHasName(((PetscObject)tao)->options,((PetscObject)tao)->prefix,"-tao_ls_type",&flg);
420: PetscObjectOptionsBegin((PetscObject)tao);
421: {
422: TaoRegisterAll();
423: if (((PetscObject)tao)->type_name) {
424: default_type = ((PetscObject)tao)->type_name;
425: }
426: /* Check for type from options */
427: PetscOptionsFList("-tao_type","Tao Solver type","TaoSetType",TaoList,default_type,type,256,&flg);
428: if (flg) {
429: TaoSetType(tao,type);
430: } else if (!((PetscObject)tao)->type_name) {
431: TaoSetType(tao,default_type);
432: }
434: PetscOptionsReal("-tao_catol","Stop if constraints violations within","TaoSetConstraintTolerances",tao->catol,&tao->catol,&flg);
435: if (flg) tao->catol_changed=PETSC_TRUE;
436: PetscOptionsReal("-tao_crtol","Stop if relative contraint violations within","TaoSetConstraintTolerances",tao->crtol,&tao->crtol,&flg);
437: if (flg) tao->crtol_changed=PETSC_TRUE;
438: PetscOptionsReal("-tao_gatol","Stop if norm of gradient less than","TaoSetTolerances",tao->gatol,&tao->gatol,&flg);
439: if (flg) tao->gatol_changed=PETSC_TRUE;
440: PetscOptionsReal("-tao_grtol","Stop if norm of gradient divided by the function value is less than","TaoSetTolerances",tao->grtol,&tao->grtol,&flg);
441: if (flg) tao->grtol_changed=PETSC_TRUE;
442: PetscOptionsReal("-tao_gttol","Stop if the norm of the gradient is less than the norm of the initial gradient times tol","TaoSetTolerances",tao->gttol,&tao->gttol,&flg);
443: if (flg) tao->gttol_changed=PETSC_TRUE;
444: PetscOptionsInt("-tao_max_it","Stop if iteration number exceeds","TaoSetMaximumIterations",tao->max_it,&tao->max_it,&flg);
445: if (flg) tao->max_it_changed=PETSC_TRUE;
446: PetscOptionsInt("-tao_max_funcs","Stop if number of function evaluations exceeds","TaoSetMaximumFunctionEvaluations",tao->max_funcs,&tao->max_funcs,&flg);
447: if (flg) tao->max_funcs_changed=PETSC_TRUE;
448: PetscOptionsReal("-tao_fmin","Stop if function less than","TaoSetFunctionLowerBound",tao->fmin,&tao->fmin,&flg);
449: if (flg) tao->fmin_changed=PETSC_TRUE;
450: PetscOptionsReal("-tao_steptol","Stop if step size or trust region radius less than","",tao->steptol,&tao->steptol,&flg);
451: if (flg) tao->steptol_changed=PETSC_TRUE;
452: PetscOptionsReal("-tao_trust0","Initial trust region radius","TaoSetTrustRegionRadius",tao->trust0,&tao->trust0,&flg);
453: if (flg) tao->trust0_changed=PETSC_TRUE;
454: PetscOptionsString("-tao_view_solution","view solution vector after each evaluation","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
455: if (flg) {
456: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
457: TaoSetMonitor(tao,TaoSolutionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
458: }
460: PetscOptionsBool("-tao_converged_reason","Print reason for TAO converged","TaoSolve",tao->printreason,&tao->printreason,NULL);
461: PetscOptionsString("-tao_view_gradient","view gradient vector after each evaluation","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
462: if (flg) {
463: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
464: TaoSetMonitor(tao,TaoGradientMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
465: }
467: PetscOptionsString("-tao_view_stepdirection","view step direction vector after each iteration","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
468: if (flg) {
469: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
470: TaoSetMonitor(tao,TaoStepDirectionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
471: }
473: PetscOptionsString("-tao_view_residual","view least-squares residual vector after each evaluation","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
474: if (flg) {
475: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
476: TaoSetMonitor(tao,TaoResidualMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
477: }
479: PetscOptionsString("-tao_monitor","Use the default convergence monitor","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
480: if (flg) {
481: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
482: TaoSetMonitor(tao,TaoMonitorDefault,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
483: }
485: PetscOptionsString("-tao_gmonitor","Use the convergence monitor with extra globalization info","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
486: if (flg) {
487: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
488: TaoSetMonitor(tao,TaoDefaultGMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
489: }
491: PetscOptionsString("-tao_smonitor","Use the short convergence monitor","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
492: if (flg) {
493: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
494: TaoSetMonitor(tao,TaoDefaultSMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
495: }
497: PetscOptionsString("-tao_cmonitor","Use the default convergence monitor with constraint norm","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
498: if (flg) {
499: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
500: TaoSetMonitor(tao,TaoDefaultCMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
501: }
503: flg = PETSC_FALSE;
504: PetscOptionsBool("-tao_cancelmonitors","cancel all monitors and call any registered destroy routines","TaoCancelMonitors",flg,&flg,NULL);
505: if (flg) {TaoCancelMonitors(tao);}
507: flg = PETSC_FALSE;
508: PetscOptionsBool("-tao_draw_solution","Plot solution vector at each iteration","TaoSetMonitor",flg,&flg,NULL);
509: if (flg) {
510: TaoMonitorDrawCtx drawctx;
511: PetscInt howoften = 1;
512: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao),NULL,NULL,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&drawctx);
513: TaoSetMonitor(tao,TaoDrawSolutionMonitor,drawctx,(PetscErrorCode (*)(void**))TaoMonitorDrawCtxDestroy);
514: }
516: flg = PETSC_FALSE;
517: PetscOptionsBool("-tao_draw_step","plots step direction at each iteration","TaoSetMonitor",flg,&flg,NULL);
518: if (flg) {
519: TaoSetMonitor(tao,TaoDrawStepMonitor,NULL,NULL);
520: }
522: flg = PETSC_FALSE;
523: PetscOptionsBool("-tao_draw_gradient","plots gradient at each iteration","TaoSetMonitor",flg,&flg,NULL);
524: if (flg) {
525: TaoMonitorDrawCtx drawctx;
526: PetscInt howoften = 1;
527: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao),NULL,NULL,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&drawctx);
528: TaoSetMonitor(tao,TaoDrawGradientMonitor,drawctx,(PetscErrorCode (*)(void**))TaoMonitorDrawCtxDestroy);
529: }
530: flg = PETSC_FALSE;
531: PetscOptionsBool("-tao_fd_gradient","compute gradient using finite differences","TaoDefaultComputeGradient",flg,&flg,NULL);
532: if (flg) {
533: TaoSetGradientRoutine(tao,TaoDefaultComputeGradient,NULL);
534: }
535: flg = PETSC_FALSE;
536: PetscOptionsBool("-tao_fd_hessian","compute hessian using finite differences","TaoDefaultComputeHessian",flg,&flg,NULL);
537: if (flg) {
538: Mat H;
540: MatCreate(PetscObjectComm((PetscObject)tao),&H);
541: MatSetType(H,MATAIJ);
542: TaoSetHessianRoutine(tao,H,H,TaoDefaultComputeHessian,NULL);
543: MatDestroy(&H);
544: }
545: flg = PETSC_FALSE;
546: PetscOptionsBool("-tao_mf_hessian","compute matrix-free hessian using finite differences","TaoDefaultComputeHessianMFFD",flg,&flg,NULL);
547: if (flg) {
548: Mat H;
550: MatCreate(PetscObjectComm((PetscObject)tao),&H);
551: TaoSetHessianRoutine(tao,H,H,TaoDefaultComputeHessianMFFD,NULL);
552: MatDestroy(&H);
553: }
554: flg = PETSC_FALSE;
555: PetscOptionsBool("-tao_recycle_history","enable recycling/re-using information from the previous TaoSolve() call for some algorithms","TaoSetRecycleHistory",flg,&flg,NULL);
556: if (flg) {
557: TaoSetRecycleHistory(tao, PETSC_TRUE);
558: }
559: PetscOptionsEnum("-tao_subset_type","subset type","",TaoSubSetTypes,(PetscEnum)tao->subset_type,(PetscEnum*)&tao->subset_type,NULL);
561: if (tao->ops->setfromoptions) {
562: (*tao->ops->setfromoptions)(PetscOptionsObject,tao);
563: }
564: }
565: PetscOptionsEnd();
566: return(0);
567: }
569: /*@C
570: TaoViewFromOptions - View from Options
572: Collective on Tao
574: Input Parameters:
575: + A - the Tao context
576: . obj - Optional object
577: - name - command line option
579: Level: intermediate
580: .seealso: Tao, TaoView, PetscObjectViewFromOptions(), TaoCreate()
581: @*/
582: PetscErrorCode TaoViewFromOptions(Tao A,PetscObject obj,const char name[])
583: {
588: PetscObjectViewFromOptions((PetscObject)A,obj,name);
589: return(0);
590: }
592: /*@C
593: TaoView - Prints information about the Tao
595: Collective on Tao
597: InputParameters:
598: + tao - the Tao context
599: - viewer - visualization context
601: Options Database Key:
602: . -tao_view - Calls TaoView() at the end of TaoSolve()
604: Notes:
605: The available visualization contexts include
606: + PETSC_VIEWER_STDOUT_SELF - standard output (default)
607: - PETSC_VIEWER_STDOUT_WORLD - synchronized standard
608: output where only the first processor opens
609: the file. All other processors send their
610: data to the first processor to print.
612: Level: beginner
614: .seealso: PetscViewerASCIIOpen()
615: @*/
616: PetscErrorCode TaoView(Tao tao, PetscViewer viewer)
617: {
618: PetscErrorCode ierr;
619: PetscBool isascii,isstring;
620: TaoType type;
624: if (!viewer) {
625: PetscViewerASCIIGetStdout(((PetscObject)tao)->comm,&viewer);
626: }
630: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
631: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
632: if (isascii) {
633: PetscObjectPrintClassNamePrefixType((PetscObject)tao,viewer);
635: if (tao->ops->view) {
636: PetscViewerASCIIPushTab(viewer);
637: (*tao->ops->view)(tao,viewer);
638: PetscViewerASCIIPopTab(viewer);
639: }
640: if (tao->linesearch) {
641: PetscViewerASCIIPushTab(viewer);
642: TaoLineSearchView(tao->linesearch,viewer);
643: PetscViewerASCIIPopTab(viewer);
644: }
645: if (tao->ksp) {
646: PetscViewerASCIIPushTab(viewer);
647: KSPView(tao->ksp,viewer);
648: PetscViewerASCIIPrintf(viewer,"total KSP iterations: %D\n",tao->ksp_tot_its);
649: PetscViewerASCIIPopTab(viewer);
650: }
652: PetscViewerASCIIPushTab(viewer);
654: if (tao->XL || tao->XU) {
655: PetscViewerASCIIPrintf(viewer,"Active Set subset type: %s\n",TaoSubSetTypes[tao->subset_type]);
656: }
658: PetscViewerASCIIPrintf(viewer,"convergence tolerances: gatol=%g,",(double)tao->gatol);
659: PetscViewerASCIIPrintf(viewer," steptol=%g,",(double)tao->steptol);
660: PetscViewerASCIIPrintf(viewer," gttol=%g\n",(double)tao->gttol);
661: PetscViewerASCIIPrintf(viewer,"Residual in Function/Gradient:=%g\n",(double)tao->residual);
663: if (tao->constrained) {
664: PetscViewerASCIIPrintf(viewer,"convergence tolerances:");
665: PetscViewerASCIIPrintf(viewer," catol=%g,",(double)tao->catol);
666: PetscViewerASCIIPrintf(viewer," crtol=%g\n",(double)tao->crtol);
667: PetscViewerASCIIPrintf(viewer,"Residual in Constraints:=%g\n",(double)tao->cnorm);
668: }
670: if (tao->trust < tao->steptol) {
671: PetscViewerASCIIPrintf(viewer,"convergence tolerances: steptol=%g\n",(double)tao->steptol);
672: PetscViewerASCIIPrintf(viewer,"Final trust region radius:=%g\n",(double)tao->trust);
673: }
675: if (tao->fmin>-1.e25) {
676: PetscViewerASCIIPrintf(viewer,"convergence tolerances: function minimum=%g\n",(double)tao->fmin);
677: }
678: PetscViewerASCIIPrintf(viewer,"Objective value=%g\n",(double)tao->fc);
680: PetscViewerASCIIPrintf(viewer,"total number of iterations=%D, ",tao->niter);
681: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_it);
683: if (tao->nfuncs>0) {
684: PetscViewerASCIIPrintf(viewer,"total number of function evaluations=%D,",tao->nfuncs);
685: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
686: }
687: if (tao->ngrads>0) {
688: PetscViewerASCIIPrintf(viewer,"total number of gradient evaluations=%D,",tao->ngrads);
689: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
690: }
691: if (tao->nfuncgrads>0) {
692: PetscViewerASCIIPrintf(viewer,"total number of function/gradient evaluations=%D,",tao->nfuncgrads);
693: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_funcs);
694: }
695: if (tao->nhess>0) {
696: PetscViewerASCIIPrintf(viewer,"total number of Hessian evaluations=%D\n",tao->nhess);
697: }
698: /* if (tao->linear_its>0) {
699: PetscViewerASCIIPrintf(viewer," total Krylov method iterations=%D\n",tao->linear_its);
700: }*/
701: if (tao->nconstraints>0) {
702: PetscViewerASCIIPrintf(viewer,"total number of constraint function evaluations=%D\n",tao->nconstraints);
703: }
704: if (tao->njac>0) {
705: PetscViewerASCIIPrintf(viewer,"total number of Jacobian evaluations=%D\n",tao->njac);
706: }
708: if (tao->reason>0) {
709: PetscViewerASCIIPrintf(viewer, "Solution converged: ");
710: switch (tao->reason) {
711: case TAO_CONVERGED_GATOL:
712: PetscViewerASCIIPrintf(viewer," ||g(X)|| <= gatol\n");
713: break;
714: case TAO_CONVERGED_GRTOL:
715: PetscViewerASCIIPrintf(viewer," ||g(X)||/|f(X)| <= grtol\n");
716: break;
717: case TAO_CONVERGED_GTTOL:
718: PetscViewerASCIIPrintf(viewer," ||g(X)||/||g(X0)|| <= gttol\n");
719: break;
720: case TAO_CONVERGED_STEPTOL:
721: PetscViewerASCIIPrintf(viewer," Steptol -- step size small\n");
722: break;
723: case TAO_CONVERGED_MINF:
724: PetscViewerASCIIPrintf(viewer," Minf -- f < fmin\n");
725: break;
726: case TAO_CONVERGED_USER:
727: PetscViewerASCIIPrintf(viewer," User Terminated\n");
728: break;
729: default:
730: PetscViewerASCIIPrintf(viewer,"\n");
731: break;
732: }
734: } else {
735: PetscViewerASCIIPrintf(viewer,"Solver terminated: %d",tao->reason);
736: switch (tao->reason) {
737: case TAO_DIVERGED_MAXITS:
738: PetscViewerASCIIPrintf(viewer," Maximum Iterations\n");
739: break;
740: case TAO_DIVERGED_NAN:
741: PetscViewerASCIIPrintf(viewer," NAN or Inf encountered\n");
742: break;
743: case TAO_DIVERGED_MAXFCN:
744: PetscViewerASCIIPrintf(viewer," Maximum Function Evaluations\n");
745: break;
746: case TAO_DIVERGED_LS_FAILURE:
747: PetscViewerASCIIPrintf(viewer," Line Search Failure\n");
748: break;
749: case TAO_DIVERGED_TR_REDUCTION:
750: PetscViewerASCIIPrintf(viewer," Trust Region too small\n");
751: break;
752: case TAO_DIVERGED_USER:
753: PetscViewerASCIIPrintf(viewer," User Terminated\n");
754: break;
755: default:
756: PetscViewerASCIIPrintf(viewer,"\n");
757: break;
758: }
759: }
760: PetscViewerASCIIPopTab(viewer);
761: } else if (isstring) {
762: TaoGetType(tao,&type);
763: PetscViewerStringSPrintf(viewer," %-3.3s",type);
764: }
765: return(0);
766: }
768: /*@
769: TaoSetRecycleHistory - Sets the boolean flag to enable/disable re-using
770: iterate information from the previous TaoSolve(). This feature is disabled by
771: default.
773: For conjugate gradient methods (BNCG), this re-uses the latest search direction
774: from the previous TaoSolve() call when computing the first search direction in a
775: new solution. By default, CG methods set the first search direction to the
776: negative gradient.
778: For quasi-Newton family of methods (BQNLS, BQNKLS, BQNKTR, BQNKTL), this re-uses
779: the accumulated quasi-Newton Hessian approximation from the previous TaoSolve()
780: call. By default, QN family of methods reset the initial Hessian approximation to
781: the identity matrix.
783: For any other algorithm, this setting has no effect.
785: Logically collective on Tao
787: Input Parameters:
788: + tao - the Tao context
789: - recycle - boolean flag
791: Options Database Keys:
792: . -tao_recycle_history
794: Level: intermediate
796: .seealso: TaoSetRecycleHistory(), TAOBNCG, TAOBQNLS, TAOBQNKLS, TAOBQNKTR, TAOBQNKTL
798: @*/
799: PetscErrorCode TaoSetRecycleHistory(Tao tao, PetscBool recycle)
800: {
803: tao->recycle = recycle;
804: return(0);
805: }
807: /*@
808: TaoGetRecycleHistory - Retrieve the boolean flag for re-using iterate information
809: from the previous TaoSolve(). This feature is disabled by default.
811: Logically collective on Tao
813: Input Parameters:
814: , tao - the Tao context
816: Output Parameters:
817: , recycle - boolean flag
819: Options Database Keys:
820: . -tao_recycle_history
822: Level: intermediate
824: .seealso: TaoGetRecycleHistory(), TAOBNCG, TAOBQNLS, TAOBQNKLS, TAOBQNKTR, TAOBQNKTL
826: @*/
827: PetscErrorCode TaoGetRecycleHistory(Tao tao, PetscBool *recycle)
828: {
831: *recycle = tao->recycle;
832: return(0);
833: }
835: /*@
836: TaoSetTolerances - Sets parameters used in TAO convergence tests
838: Logically collective on Tao
840: Input Parameters:
841: + tao - the Tao context
842: . gatol - stop if norm of gradient is less than this
843: . grtol - stop if relative norm of gradient is less than this
844: - gttol - stop if norm of gradient is reduced by this factor
846: Options Database Keys:
847: + -tao_gatol <gatol> - Sets gatol
848: . -tao_grtol <grtol> - Sets grtol
849: - -tao_gttol <gttol> - Sets gttol
851: Stopping Criteria:
852: $ ||g(X)|| <= gatol
853: $ ||g(X)|| / |f(X)| <= grtol
854: $ ||g(X)|| / ||g(X0)|| <= gttol
856: Notes:
857: Use PETSC_DEFAULT to leave one or more tolerances unchanged.
859: Level: beginner
861: .seealso: TaoGetTolerances()
863: @*/
864: PetscErrorCode TaoSetTolerances(Tao tao, PetscReal gatol, PetscReal grtol, PetscReal gttol)
865: {
871: if (gatol != PETSC_DEFAULT) {
872: if (gatol<0) {
873: PetscInfo(tao,"Tried to set negative gatol -- ignored.\n");
874: } else {
875: tao->gatol = PetscMax(0,gatol);
876: tao->gatol_changed=PETSC_TRUE;
877: }
878: }
880: if (grtol != PETSC_DEFAULT) {
881: if (grtol<0) {
882: PetscInfo(tao,"Tried to set negative grtol -- ignored.\n");
883: } else {
884: tao->grtol = PetscMax(0,grtol);
885: tao->grtol_changed=PETSC_TRUE;
886: }
887: }
889: if (gttol != PETSC_DEFAULT) {
890: if (gttol<0) {
891: PetscInfo(tao,"Tried to set negative gttol -- ignored.\n");
892: } else {
893: tao->gttol = PetscMax(0,gttol);
894: tao->gttol_changed=PETSC_TRUE;
895: }
896: }
897: return(0);
898: }
900: /*@
901: TaoSetConstraintTolerances - Sets constraint tolerance parameters used in TAO convergence tests
903: Logically collective on Tao
905: Input Parameters:
906: + tao - the Tao context
907: . catol - absolute constraint tolerance, constraint norm must be less than catol for used for gatol convergence criteria
908: - crtol - relative contraint tolerance, constraint norm must be less than crtol for used for gatol, gttol convergence criteria
910: Options Database Keys:
911: + -tao_catol <catol> - Sets catol
912: - -tao_crtol <crtol> - Sets crtol
914: Notes:
915: Use PETSC_DEFAULT to leave any tolerance unchanged.
917: Level: intermediate
919: .seealso: TaoGetTolerances(), TaoGetConstraintTolerances(), TaoSetTolerances()
921: @*/
922: PetscErrorCode TaoSetConstraintTolerances(Tao tao, PetscReal catol, PetscReal crtol)
923: {
929: if (catol != PETSC_DEFAULT) {
930: if (catol<0) {
931: PetscInfo(tao,"Tried to set negative catol -- ignored.\n");
932: } else {
933: tao->catol = PetscMax(0,catol);
934: tao->catol_changed=PETSC_TRUE;
935: }
936: }
938: if (crtol != PETSC_DEFAULT) {
939: if (crtol<0) {
940: PetscInfo(tao,"Tried to set negative crtol -- ignored.\n");
941: } else {
942: tao->crtol = PetscMax(0,crtol);
943: tao->crtol_changed=PETSC_TRUE;
944: }
945: }
946: return(0);
947: }
949: /*@
950: TaoGetConstraintTolerances - Gets constraint tolerance parameters used in TAO convergence tests
952: Not ollective
954: Input Parameter:
955: . tao - the Tao context
957: Output Parameters:
958: + catol - absolute constraint tolerance, constraint norm must be less than catol for used for gatol convergence criteria
959: - crtol - relative contraint tolerance, constraint norm must be less than crtol for used for gatol, gttol convergence criteria
961: Level: intermediate
963: .seealso: TaoGetTolerances(), TaoSetTolerances(), TaoSetConstraintTolerances()
965: @*/
966: PetscErrorCode TaoGetConstraintTolerances(Tao tao, PetscReal *catol, PetscReal *crtol)
967: {
970: if (catol) *catol = tao->catol;
971: if (crtol) *crtol = tao->crtol;
972: return(0);
973: }
975: /*@
976: TaoSetFunctionLowerBound - Sets a bound on the solution objective value.
977: When an approximate solution with an objective value below this number
978: has been found, the solver will terminate.
980: Logically Collective on Tao
982: Input Parameters:
983: + tao - the Tao solver context
984: - fmin - the tolerance
986: Options Database Keys:
987: . -tao_fmin <fmin> - sets the minimum function value
989: Level: intermediate
991: .seealso: TaoSetTolerances()
992: @*/
993: PetscErrorCode TaoSetFunctionLowerBound(Tao tao,PetscReal fmin)
994: {
997: tao->fmin = fmin;
998: tao->fmin_changed=PETSC_TRUE;
999: return(0);
1000: }
1002: /*@
1003: TaoGetFunctionLowerBound - Gets the bound on the solution objective value.
1004: When an approximate solution with an objective value below this number
1005: has been found, the solver will terminate.
1007: Not collective on Tao
1009: Input Parameters:
1010: . tao - the Tao solver context
1012: OutputParameters:
1013: . fmin - the minimum function value
1015: Level: intermediate
1017: .seealso: TaoSetFunctionLowerBound()
1018: @*/
1019: PetscErrorCode TaoGetFunctionLowerBound(Tao tao,PetscReal *fmin)
1020: {
1023: *fmin = tao->fmin;
1024: return(0);
1025: }
1027: /*@
1028: TaoSetMaximumFunctionEvaluations - Sets a maximum number of
1029: function evaluations.
1031: Logically Collective on Tao
1033: Input Parameters:
1034: + tao - the Tao solver context
1035: - nfcn - the maximum number of function evaluations (>=0)
1037: Options Database Keys:
1038: . -tao_max_funcs <nfcn> - sets the maximum number of function evaluations
1040: Level: intermediate
1042: .seealso: TaoSetTolerances(), TaoSetMaximumIterations()
1043: @*/
1045: PetscErrorCode TaoSetMaximumFunctionEvaluations(Tao tao,PetscInt nfcn)
1046: {
1049: tao->max_funcs = PetscMax(0,nfcn);
1050: tao->max_funcs_changed=PETSC_TRUE;
1051: return(0);
1052: }
1054: /*@
1055: TaoGetMaximumFunctionEvaluations - Sets a maximum number of
1056: function evaluations.
1058: Not Collective
1060: Input Parameters:
1061: . tao - the Tao solver context
1063: Output Parameters:
1064: . nfcn - the maximum number of function evaluations
1066: Level: intermediate
1068: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
1069: @*/
1071: PetscErrorCode TaoGetMaximumFunctionEvaluations(Tao tao,PetscInt *nfcn)
1072: {
1075: *nfcn = tao->max_funcs;
1076: return(0);
1077: }
1079: /*@
1080: TaoGetCurrentFunctionEvaluations - Get current number of
1081: function evaluations.
1083: Not Collective
1085: Input Parameters:
1086: . tao - the Tao solver context
1088: Output Parameters:
1089: . nfuncs - the current number of function evaluations
1091: Level: intermediate
1093: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
1094: @*/
1096: PetscErrorCode TaoGetCurrentFunctionEvaluations(Tao tao,PetscInt *nfuncs)
1097: {
1100: *nfuncs=PetscMax(tao->nfuncs,tao->nfuncgrads);
1101: return(0);
1102: }
1104: /*@
1105: TaoSetMaximumIterations - Sets a maximum number of iterates.
1107: Logically Collective on Tao
1109: Input Parameters:
1110: + tao - the Tao solver context
1111: - maxits - the maximum number of iterates (>=0)
1113: Options Database Keys:
1114: . -tao_max_it <its> - sets the maximum number of iterations
1116: Level: intermediate
1118: .seealso: TaoSetTolerances(), TaoSetMaximumFunctionEvaluations()
1119: @*/
1120: PetscErrorCode TaoSetMaximumIterations(Tao tao,PetscInt maxits)
1121: {
1124: tao->max_it = PetscMax(0,maxits);
1125: tao->max_it_changed=PETSC_TRUE;
1126: return(0);
1127: }
1129: /*@
1130: TaoGetMaximumIterations - Sets a maximum number of iterates.
1132: Not Collective
1134: Input Parameters:
1135: . tao - the Tao solver context
1137: Output Parameters:
1138: . maxits - the maximum number of iterates
1140: Level: intermediate
1142: .seealso: TaoSetMaximumIterations(), TaoGetMaximumFunctionEvaluations()
1143: @*/
1144: PetscErrorCode TaoGetMaximumIterations(Tao tao,PetscInt *maxits)
1145: {
1148: *maxits = tao->max_it;
1149: return(0);
1150: }
1152: /*@
1153: TaoSetInitialTrustRegionRadius - Sets the initial trust region radius.
1155: Logically collective on Tao
1157: Input Parameters:
1158: + tao - a TAO optimization solver
1159: - radius - the trust region radius
1161: Level: intermediate
1163: Options Database Key:
1164: . -tao_trust0 <t0> - sets initial trust region radius
1166: .seealso: TaoGetTrustRegionRadius(), TaoSetTrustRegionTolerance()
1167: @*/
1168: PetscErrorCode TaoSetInitialTrustRegionRadius(Tao tao, PetscReal radius)
1169: {
1172: tao->trust0 = PetscMax(0.0,radius);
1173: tao->trust0_changed=PETSC_TRUE;
1174: return(0);
1175: }
1177: /*@
1178: TaoGetInitialTrustRegionRadius - Sets the initial trust region radius.
1180: Not Collective
1182: Input Parameter:
1183: . tao - a TAO optimization solver
1185: Output Parameter:
1186: . radius - the trust region radius
1188: Level: intermediate
1190: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetCurrentTrustRegionRadius()
1191: @*/
1192: PetscErrorCode TaoGetInitialTrustRegionRadius(Tao tao, PetscReal *radius)
1193: {
1196: *radius = tao->trust0;
1197: return(0);
1198: }
1200: /*@
1201: TaoGetCurrentTrustRegionRadius - Gets the current trust region radius.
1203: Not Collective
1205: Input Parameter:
1206: . tao - a TAO optimization solver
1208: Output Parameter:
1209: . radius - the trust region radius
1211: Level: intermediate
1213: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetInitialTrustRegionRadius()
1214: @*/
1215: PetscErrorCode TaoGetCurrentTrustRegionRadius(Tao tao, PetscReal *radius)
1216: {
1219: *radius = tao->trust;
1220: return(0);
1221: }
1223: /*@
1224: TaoGetTolerances - gets the current values of tolerances
1226: Not Collective
1228: Input Parameter:
1229: . tao - the Tao context
1231: Output Parameters:
1232: + gatol - stop if norm of gradient is less than this
1233: . grtol - stop if relative norm of gradient is less than this
1234: - gttol - stop if norm of gradient is reduced by a this factor
1236: Note: NULL can be used as an argument if not all tolerances values are needed
1238: .seealso TaoSetTolerances()
1240: Level: intermediate
1241: @*/
1242: PetscErrorCode TaoGetTolerances(Tao tao, PetscReal *gatol, PetscReal *grtol, PetscReal *gttol)
1243: {
1246: if (gatol) *gatol=tao->gatol;
1247: if (grtol) *grtol=tao->grtol;
1248: if (gttol) *gttol=tao->gttol;
1249: return(0);
1250: }
1252: /*@
1253: TaoGetKSP - Gets the linear solver used by the optimization solver.
1254: Application writers should use TaoGetKSP if they need direct access
1255: to the PETSc KSP object.
1257: Not Collective
1259: Input Parameters:
1260: . tao - the TAO solver
1262: Output Parameters:
1263: . ksp - the KSP linear solver used in the optimization solver
1265: Level: intermediate
1267: @*/
1268: PetscErrorCode TaoGetKSP(Tao tao, KSP *ksp)
1269: {
1271: *ksp = tao->ksp;
1272: return(0);
1273: }
1275: /*@
1276: TaoGetLinearSolveIterations - Gets the total number of linear iterations
1277: used by the TAO solver
1279: Not Collective
1281: Input Parameter:
1282: . tao - TAO context
1284: Output Parameter:
1285: . lits - number of linear iterations
1287: Notes:
1288: This counter is reset to zero for each successive call to TaoSolve()
1290: Level: intermediate
1292: .seealso: TaoGetKSP()
1293: @*/
1294: PetscErrorCode TaoGetLinearSolveIterations(Tao tao,PetscInt *lits)
1295: {
1299: *lits = tao->ksp_tot_its;
1300: return(0);
1301: }
1303: /*@
1304: TaoGetLineSearch - Gets the line search used by the optimization solver.
1305: Application writers should use TaoGetLineSearch if they need direct access
1306: to the TaoLineSearch object.
1308: Not Collective
1310: Input Parameters:
1311: . tao - the TAO solver
1313: Output Parameters:
1314: . ls - the line search used in the optimization solver
1316: Level: intermediate
1318: @*/
1319: PetscErrorCode TaoGetLineSearch(Tao tao, TaoLineSearch *ls)
1320: {
1322: *ls = tao->linesearch;
1323: return(0);
1324: }
1326: /*@
1327: TaoAddLineSearchCounts - Adds the number of function evaluations spent
1328: in the line search to the running total.
1330: Input Parameters:
1331: + tao - the TAO solver
1332: - ls - the line search used in the optimization solver
1334: Level: developer
1336: .seealso: TaoLineSearchApply()
1337: @*/
1338: PetscErrorCode TaoAddLineSearchCounts(Tao tao)
1339: {
1341: PetscBool flg;
1342: PetscInt nfeval,ngeval,nfgeval;
1346: if (tao->linesearch) {
1347: TaoLineSearchIsUsingTaoRoutines(tao->linesearch,&flg);
1348: if (!flg) {
1349: TaoLineSearchGetNumberFunctionEvaluations(tao->linesearch,&nfeval,&ngeval,&nfgeval);
1350: tao->nfuncs+=nfeval;
1351: tao->ngrads+=ngeval;
1352: tao->nfuncgrads+=nfgeval;
1353: }
1354: }
1355: return(0);
1356: }
1358: /*@
1359: TaoGetSolutionVector - Returns the vector with the current TAO solution
1361: Not Collective
1363: Input Parameter:
1364: . tao - the Tao context
1366: Output Parameter:
1367: . X - the current solution
1369: Level: intermediate
1371: Note: The returned vector will be the same object that was passed into TaoSetInitialVector()
1372: @*/
1373: PetscErrorCode TaoGetSolutionVector(Tao tao, Vec *X)
1374: {
1377: *X = tao->solution;
1378: return(0);
1379: }
1381: /*@
1382: TaoGetGradientVector - Returns the vector with the current TAO gradient
1384: Not Collective
1386: Input Parameter:
1387: . tao - the Tao context
1389: Output Parameter:
1390: . G - the current solution
1392: Level: intermediate
1393: @*/
1394: PetscErrorCode TaoGetGradientVector(Tao tao, Vec *G)
1395: {
1398: *G = tao->gradient;
1399: return(0);
1400: }
1402: /*@
1403: TaoResetStatistics - Initialize the statistics used by TAO for all of the solvers.
1404: These statistics include the iteration number, residual norms, and convergence status.
1405: This routine gets called before solving each optimization problem.
1407: Collective on Tao
1409: Input Parameters:
1410: . solver - the Tao context
1412: Level: developer
1414: .seealso: TaoCreate(), TaoSolve()
1415: @*/
1416: PetscErrorCode TaoResetStatistics(Tao tao)
1417: {
1420: tao->niter = 0;
1421: tao->nfuncs = 0;
1422: tao->nfuncgrads = 0;
1423: tao->ngrads = 0;
1424: tao->nhess = 0;
1425: tao->njac = 0;
1426: tao->nconstraints = 0;
1427: tao->ksp_its = 0;
1428: tao->ksp_tot_its = 0;
1429: tao->reason = TAO_CONTINUE_ITERATING;
1430: tao->residual = 0.0;
1431: tao->cnorm = 0.0;
1432: tao->step = 0.0;
1433: tao->lsflag = PETSC_FALSE;
1434: if (tao->hist_reset) tao->hist_len=0;
1435: return(0);
1436: }
1438: /*@C
1439: TaoSetUpdate - Sets the general-purpose update function called
1440: at the beginning of every iteration of the nonlinear solve. Specifically
1441: it is called at the top of every iteration, after the new solution and the gradient
1442: is determined, but before the Hessian is computed (if applicable).
1444: Logically Collective on Tao
1446: Input Parameters:
1447: + tao - The tao solver context
1448: - func - The function
1450: Calling sequence of func:
1451: $ func (Tao tao, PetscInt step);
1453: . step - The current step of the iteration
1455: Level: advanced
1457: .seealso TaoSolve()
1458: @*/
1459: PetscErrorCode TaoSetUpdate(Tao tao, PetscErrorCode (*func)(Tao, PetscInt,void*), void *ctx)
1460: {
1463: tao->ops->update = func;
1464: tao->user_update = ctx;
1465: return(0);
1466: }
1468: /*@C
1469: TaoSetConvergenceTest - Sets the function that is to be used to test
1470: for convergence o fthe iterative minimization solution. The new convergence
1471: testing routine will replace TAO's default convergence test.
1473: Logically Collective on Tao
1475: Input Parameters:
1476: + tao - the Tao object
1477: . conv - the routine to test for convergence
1478: - ctx - [optional] context for private data for the convergence routine
1479: (may be NULL)
1481: Calling sequence of conv:
1482: $ PetscErrorCode conv(Tao tao, void *ctx)
1484: + tao - the Tao object
1485: - ctx - [optional] convergence context
1487: Note: The new convergence testing routine should call TaoSetConvergedReason().
1489: Level: advanced
1491: .seealso: TaoSetConvergedReason(), TaoGetSolutionStatus(), TaoGetTolerances(), TaoSetMonitor
1493: @*/
1494: PetscErrorCode TaoSetConvergenceTest(Tao tao, PetscErrorCode (*conv)(Tao,void*), void *ctx)
1495: {
1498: (tao)->ops->convergencetest = conv;
1499: (tao)->cnvP = ctx;
1500: return(0);
1501: }
1503: /*@C
1504: TaoSetMonitor - Sets an ADDITIONAL function that is to be used at every
1505: iteration of the solver to display the iteration's
1506: progress.
1508: Logically Collective on Tao
1510: Input Parameters:
1511: + tao - the Tao solver context
1512: . mymonitor - monitoring routine
1513: - mctx - [optional] user-defined context for private data for the
1514: monitor routine (may be NULL)
1516: Calling sequence of mymonitor:
1517: $ PetscErrorCode mymonitor(Tao tao,void *mctx)
1519: + tao - the Tao solver context
1520: - mctx - [optional] monitoring context
1522: Options Database Keys:
1523: + -tao_monitor - sets TaoMonitorDefault()
1524: . -tao_smonitor - sets short monitor
1525: . -tao_cmonitor - same as smonitor plus constraint norm
1526: . -tao_view_solution - view solution at each iteration
1527: . -tao_view_gradient - view gradient at each iteration
1528: . -tao_view_ls_residual - view least-squares residual vector at each iteration
1529: - -tao_cancelmonitors - cancels all monitors that have been hardwired into a code by calls to TaoSetMonitor(), but does not cancel those set via the options database.
1531: Notes:
1532: Several different monitoring routines may be set by calling
1533: TaoSetMonitor() multiple times; all will be called in the
1534: order in which they were set.
1536: Fortran Notes:
1537: Only one monitor function may be set
1539: Level: intermediate
1541: .seealso: TaoMonitorDefault(), TaoCancelMonitors(), TaoSetDestroyRoutine()
1542: @*/
1543: PetscErrorCode TaoSetMonitor(Tao tao, PetscErrorCode (*func)(Tao, void*), void *ctx,PetscErrorCode (*dest)(void**))
1544: {
1546: PetscInt i;
1547: PetscBool identical;
1551: if (tao->numbermonitors >= MAXTAOMONITORS) SETERRQ1(PetscObjectComm((PetscObject)tao),PETSC_ERR_SUP,"Cannot attach another monitor -- max=%d",MAXTAOMONITORS);
1553: for (i=0; i<tao->numbermonitors;i++) {
1554: PetscMonitorCompare((PetscErrorCode (*)(void))func,ctx,dest,(PetscErrorCode (*)(void))tao->monitor[i],tao->monitorcontext[i],tao->monitordestroy[i],&identical);
1555: if (identical) return(0);
1556: }
1557: tao->monitor[tao->numbermonitors] = func;
1558: tao->monitorcontext[tao->numbermonitors] = (void*)ctx;
1559: tao->monitordestroy[tao->numbermonitors] = dest;
1560: ++tao->numbermonitors;
1561: return(0);
1562: }
1564: /*@
1565: TaoCancelMonitors - Clears all the monitor functions for a Tao object.
1567: Logically Collective on Tao
1569: Input Parameters:
1570: . tao - the Tao solver context
1572: Options Database:
1573: . -tao_cancelmonitors - cancels all monitors that have been hardwired
1574: into a code by calls to TaoSetMonitor(), but does not cancel those
1575: set via the options database
1577: Notes:
1578: There is no way to clear one specific monitor from a Tao object.
1580: Level: advanced
1582: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1583: @*/
1584: PetscErrorCode TaoCancelMonitors(Tao tao)
1585: {
1586: PetscInt i;
1591: for (i=0;i<tao->numbermonitors;i++) {
1592: if (tao->monitordestroy[i]) {
1593: (*tao->monitordestroy[i])(&tao->monitorcontext[i]);
1594: }
1595: }
1596: tao->numbermonitors=0;
1597: return(0);
1598: }
1600: /*@
1601: TaoMonitorDefault - Default routine for monitoring progress of the
1602: Tao solvers (default). This monitor prints the function value and gradient
1603: norm at each iteration. It can be turned on from the command line using the
1604: -tao_monitor option
1606: Collective on Tao
1608: Input Parameters:
1609: + tao - the Tao context
1610: - ctx - PetscViewer context or NULL
1612: Options Database Keys:
1613: . -tao_monitor
1615: Level: advanced
1617: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1618: @*/
1619: PetscErrorCode TaoMonitorDefault(Tao tao, void *ctx)
1620: {
1622: PetscInt its, tabs;
1623: PetscReal fct,gnorm;
1624: PetscViewer viewer = (PetscViewer)ctx;
1628: its=tao->niter;
1629: fct=tao->fc;
1630: gnorm=tao->residual;
1631: PetscViewerASCIIGetTab(viewer, &tabs);
1632: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1633: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1634: PetscViewerASCIIPrintf(viewer," Iteration information for %s solve.\n",((PetscObject)tao)->prefix);
1635: tao->header_printed = PETSC_TRUE;
1636: }
1637: PetscViewerASCIIPrintf(viewer,"%3D TAO,",its);
1638: PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1639: if (gnorm >= PETSC_INFINITY) {
1640: PetscViewerASCIIPrintf(viewer," Residual: Inf \n");
1641: } else {
1642: PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1643: }
1644: PetscViewerASCIISetTab(viewer, tabs);
1645: return(0);
1646: }
1648: /*@
1649: TaoDefaultGMonitor - Default routine for monitoring progress of the
1650: Tao solvers (default) with extra detail on the globalization method.
1651: This monitor prints the function value and gradient norm at each
1652: iteration, as well as the step size and trust radius. Note that the
1653: step size and trust radius may be the same for some algorithms.
1654: It can be turned on from the command line using the
1655: -tao_gmonitor option
1657: Collective on Tao
1659: Input Parameters:
1660: + tao - the Tao context
1661: - ctx - PetscViewer context or NULL
1663: Options Database Keys:
1664: . -tao_monitor
1666: Level: advanced
1668: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1669: @*/
1670: PetscErrorCode TaoDefaultGMonitor(Tao tao, void *ctx)
1671: {
1673: PetscInt its, tabs;
1674: PetscReal fct,gnorm,stp,tr;
1675: PetscViewer viewer = (PetscViewer)ctx;
1679: its=tao->niter;
1680: fct=tao->fc;
1681: gnorm=tao->residual;
1682: stp=tao->step;
1683: tr=tao->trust;
1684: PetscViewerASCIIGetTab(viewer, &tabs);
1685: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1686: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1687: PetscViewerASCIIPrintf(viewer," Iteration information for %s solve.\n",((PetscObject)tao)->prefix);
1688: tao->header_printed = PETSC_TRUE;
1689: }
1690: PetscViewerASCIIPrintf(viewer,"%3D TAO,",its);
1691: PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1692: if (gnorm >= PETSC_INFINITY) {
1693: PetscViewerASCIIPrintf(viewer," Residual: Inf,");
1694: } else {
1695: PetscViewerASCIIPrintf(viewer," Residual: %g,",(double)gnorm);
1696: }
1697: PetscViewerASCIIPrintf(viewer," Step: %g, Trust: %g\n",(double)stp,(double)tr);
1698: PetscViewerASCIISetTab(viewer, tabs);
1699: return(0);
1700: }
1702: /*@
1703: TaoDefaultSMonitor - Default routine for monitoring progress of the
1704: solver. Same as TaoMonitorDefault() except
1705: it prints fewer digits of the residual as the residual gets smaller.
1706: This is because the later digits are meaningless and are often
1707: different on different machines; by using this routine different
1708: machines will usually generate the same output. It can be turned on
1709: by using the -tao_smonitor option
1711: Collective on Tao
1713: Input Parameters:
1714: + tao - the Tao context
1715: - ctx - PetscViewer context of type ASCII
1717: Options Database Keys:
1718: . -tao_smonitor
1720: Level: advanced
1722: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1723: @*/
1724: PetscErrorCode TaoDefaultSMonitor(Tao tao, void *ctx)
1725: {
1727: PetscInt its, tabs;
1728: PetscReal fct,gnorm;
1729: PetscViewer viewer = (PetscViewer)ctx;
1733: its=tao->niter;
1734: fct=tao->fc;
1735: gnorm=tao->residual;
1736: PetscViewerASCIIGetTab(viewer, &tabs);
1737: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1738: PetscViewerASCIIPrintf(viewer,"iter = %3D,",its);
1739: PetscViewerASCIIPrintf(viewer," Function value %g,",(double)fct);
1740: if (gnorm >= PETSC_INFINITY) {
1741: PetscViewerASCIIPrintf(viewer," Residual: Inf \n");
1742: } else if (gnorm > 1.e-6) {
1743: PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1744: } else if (gnorm > 1.e-11) {
1745: PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-6 \n");
1746: } else {
1747: PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-11 \n");
1748: }
1749: PetscViewerASCIISetTab(viewer, tabs);
1750: return(0);
1751: }
1753: /*@
1754: TaoDefaultCMonitor - same as TaoMonitorDefault() except
1755: it prints the norm of the constraints function. It can be turned on
1756: from the command line using the -tao_cmonitor option
1758: Collective on Tao
1760: Input Parameters:
1761: + tao - the Tao context
1762: - ctx - PetscViewer context or NULL
1764: Options Database Keys:
1765: . -tao_cmonitor
1767: Level: advanced
1769: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1770: @*/
1771: PetscErrorCode TaoDefaultCMonitor(Tao tao, void *ctx)
1772: {
1774: PetscInt its, tabs;
1775: PetscReal fct,gnorm;
1776: PetscViewer viewer = (PetscViewer)ctx;
1780: its=tao->niter;
1781: fct=tao->fc;
1782: gnorm=tao->residual;
1783: PetscViewerASCIIGetTab(viewer, &tabs);
1784: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1785: PetscViewerASCIIPrintf(viewer,"iter = %D,",its);
1786: PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1787: PetscViewerASCIIPrintf(viewer," Residual: %g ",(double)gnorm);
1788: PetscViewerASCIIPrintf(viewer," Constraint: %g \n",(double)tao->cnorm);
1789: PetscViewerASCIISetTab(viewer, tabs);
1790: return(0);
1791: }
1793: /*@C
1794: TaoSolutionMonitor - Views the solution at each iteration
1795: It can be turned on from the command line using the
1796: -tao_view_solution option
1798: Collective on Tao
1800: Input Parameters:
1801: + tao - the Tao context
1802: - ctx - PetscViewer context or NULL
1804: Options Database Keys:
1805: . -tao_view_solution
1807: Level: advanced
1809: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1810: @*/
1811: PetscErrorCode TaoSolutionMonitor(Tao tao, void *ctx)
1812: {
1814: PetscViewer viewer = (PetscViewer)ctx;
1818: VecView(tao->solution, viewer);
1819: return(0);
1820: }
1822: /*@C
1823: TaoGradientMonitor - Views the gradient at each iteration
1824: It can be turned on from the command line using the
1825: -tao_view_gradient option
1827: Collective on Tao
1829: Input Parameters:
1830: + tao - the Tao context
1831: - ctx - PetscViewer context or NULL
1833: Options Database Keys:
1834: . -tao_view_gradient
1836: Level: advanced
1838: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1839: @*/
1840: PetscErrorCode TaoGradientMonitor(Tao tao, void *ctx)
1841: {
1843: PetscViewer viewer = (PetscViewer)ctx;
1847: VecView(tao->gradient, viewer);
1848: return(0);
1849: }
1851: /*@C
1852: TaoStepDirectionMonitor - Views the gradient at each iteration
1853: It can be turned on from the command line using the
1854: -tao_view_gradient option
1856: Collective on Tao
1858: Input Parameters:
1859: + tao - the Tao context
1860: - ctx - PetscViewer context or NULL
1862: Options Database Keys:
1863: . -tao_view_gradient
1865: Level: advanced
1867: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1868: @*/
1869: PetscErrorCode TaoStepDirectionMonitor(Tao tao, void *ctx)
1870: {
1872: PetscViewer viewer = (PetscViewer)ctx;
1876: VecView(tao->stepdirection, viewer);
1877: return(0);
1878: }
1880: /*@C
1881: TaoDrawSolutionMonitor - Plots the solution at each iteration
1882: It can be turned on from the command line using the
1883: -tao_draw_solution option
1885: Collective on Tao
1887: Input Parameters:
1888: + tao - the Tao context
1889: - ctx - TaoMonitorDraw context
1891: Options Database Keys:
1892: . -tao_draw_solution
1894: Level: advanced
1896: .seealso: TaoSolutionMonitor(), TaoSetMonitor(), TaoDrawGradientMonitor
1897: @*/
1898: PetscErrorCode TaoDrawSolutionMonitor(Tao tao, void *ctx)
1899: {
1900: PetscErrorCode ierr;
1901: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1904: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return(0);
1905: VecView(tao->solution,ictx->viewer);
1906: return(0);
1907: }
1909: /*@C
1910: TaoDrawGradientMonitor - Plots the gradient at each iteration
1911: It can be turned on from the command line using the
1912: -tao_draw_gradient option
1914: Collective on Tao
1916: Input Parameters:
1917: + tao - the Tao context
1918: - ctx - PetscViewer context
1920: Options Database Keys:
1921: . -tao_draw_gradient
1923: Level: advanced
1925: .seealso: TaoGradientMonitor(), TaoSetMonitor(), TaoDrawSolutionMonitor
1926: @*/
1927: PetscErrorCode TaoDrawGradientMonitor(Tao tao, void *ctx)
1928: {
1929: PetscErrorCode ierr;
1930: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1933: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return(0);
1934: VecView(tao->gradient,ictx->viewer);
1935: return(0);
1936: }
1938: /*@C
1939: TaoDrawStepMonitor - Plots the step direction at each iteration
1940: It can be turned on from the command line using the
1941: -tao_draw_step option
1943: Collective on Tao
1945: Input Parameters:
1946: + tao - the Tao context
1947: - ctx - PetscViewer context
1949: Options Database Keys:
1950: . -tao_draw_step
1952: Level: advanced
1954: .seealso: TaoSetMonitor(), TaoDrawSolutionMonitor
1955: @*/
1956: PetscErrorCode TaoDrawStepMonitor(Tao tao, void *ctx)
1957: {
1959: PetscViewer viewer = (PetscViewer)(ctx);
1962: VecView(tao->stepdirection, viewer);
1963: return(0);
1964: }
1966: /*@C
1967: TaoResidualMonitor - Views the least-squares residual at each iteration
1968: It can be turned on from the command line using the
1969: -tao_view_ls_residual option
1971: Collective on Tao
1973: Input Parameters:
1974: + tao - the Tao context
1975: - ctx - PetscViewer context or NULL
1977: Options Database Keys:
1978: . -tao_view_ls_residual
1980: Level: advanced
1982: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1983: @*/
1984: PetscErrorCode TaoResidualMonitor(Tao tao, void *ctx)
1985: {
1987: PetscViewer viewer = (PetscViewer)ctx;
1991: VecView(tao->ls_res,viewer);
1992: return(0);
1993: }
1995: /*@
1996: TaoDefaultConvergenceTest - Determines whether the solver should continue iterating
1997: or terminate.
1999: Collective on Tao
2001: Input Parameters:
2002: + tao - the Tao context
2003: - dummy - unused dummy context
2005: Output Parameter:
2006: . reason - for terminating
2008: Notes:
2009: This routine checks the residual in the optimality conditions, the
2010: relative residual in the optimity conditions, the number of function
2011: evaluations, and the function value to test convergence. Some
2012: solvers may use different convergence routines.
2014: Level: developer
2016: .seealso: TaoSetTolerances(),TaoGetConvergedReason(),TaoSetConvergedReason()
2017: @*/
2019: PetscErrorCode TaoDefaultConvergenceTest(Tao tao,void *dummy)
2020: {
2021: PetscInt niter=tao->niter, nfuncs=PetscMax(tao->nfuncs,tao->nfuncgrads);
2022: PetscInt max_funcs=tao->max_funcs;
2023: PetscReal gnorm=tao->residual, gnorm0=tao->gnorm0;
2024: PetscReal f=tao->fc, steptol=tao->steptol,trradius=tao->step;
2025: PetscReal gatol=tao->gatol,grtol=tao->grtol,gttol=tao->gttol;
2026: PetscReal catol=tao->catol,crtol=tao->crtol;
2027: PetscReal fmin=tao->fmin, cnorm=tao->cnorm;
2028: TaoConvergedReason reason=tao->reason;
2029: PetscErrorCode ierr;
2033: if (reason != TAO_CONTINUE_ITERATING) {
2034: return(0);
2035: }
2037: if (PetscIsInfOrNanReal(f)) {
2038: PetscInfo(tao,"Failed to converged, function value is Inf or NaN\n");
2039: reason = TAO_DIVERGED_NAN;
2040: } else if (f <= fmin && cnorm <=catol) {
2041: PetscInfo2(tao,"Converged due to function value %g < minimum function value %g\n", (double)f,(double)fmin);
2042: reason = TAO_CONVERGED_MINF;
2043: } else if (gnorm<= gatol && cnorm <=catol) {
2044: PetscInfo2(tao,"Converged due to residual norm ||g(X)||=%g < %g\n",(double)gnorm,(double)gatol);
2045: reason = TAO_CONVERGED_GATOL;
2046: } else if (f!=0 && PetscAbsReal(gnorm/f) <= grtol && cnorm <= crtol) {
2047: PetscInfo2(tao,"Converged due to residual ||g(X)||/|f(X)| =%g < %g\n",(double)(gnorm/f),(double)grtol);
2048: reason = TAO_CONVERGED_GRTOL;
2049: } else if (gnorm0 != 0 && ((gttol == 0 && gnorm == 0) || gnorm/gnorm0 < gttol) && cnorm <= crtol) {
2050: PetscInfo2(tao,"Converged due to relative residual norm ||g(X)||/||g(X0)|| = %g < %g\n",(double)(gnorm/gnorm0),(double)gttol);
2051: reason = TAO_CONVERGED_GTTOL;
2052: } else if (nfuncs > max_funcs) {
2053: PetscInfo2(tao,"Exceeded maximum number of function evaluations: %D > %D\n", nfuncs,max_funcs);
2054: reason = TAO_DIVERGED_MAXFCN;
2055: } else if (tao->lsflag != 0) {
2056: PetscInfo(tao,"Tao Line Search failure.\n");
2057: reason = TAO_DIVERGED_LS_FAILURE;
2058: } else if (trradius < steptol && niter > 0) {
2059: PetscInfo2(tao,"Trust region/step size too small: %g < %g\n", (double)trradius,(double)steptol);
2060: reason = TAO_CONVERGED_STEPTOL;
2061: } else if (niter >= tao->max_it) {
2062: PetscInfo2(tao,"Exceeded maximum number of iterations: %D > %D\n",niter,tao->max_it);
2063: reason = TAO_DIVERGED_MAXITS;
2064: } else {
2065: reason = TAO_CONTINUE_ITERATING;
2066: }
2067: tao->reason = reason;
2068: return(0);
2069: }
2071: /*@C
2072: TaoSetOptionsPrefix - Sets the prefix used for searching for all
2073: TAO options in the database.
2075: Logically Collective on Tao
2077: Input Parameters:
2078: + tao - the Tao context
2079: - prefix - the prefix string to prepend to all TAO option requests
2081: Notes:
2082: A hyphen (-) must NOT be given at the beginning of the prefix name.
2083: The first character of all runtime options is AUTOMATICALLY the hyphen.
2085: For example, to distinguish between the runtime options for two
2086: different TAO solvers, one could call
2087: .vb
2088: TaoSetOptionsPrefix(tao1,"sys1_")
2089: TaoSetOptionsPrefix(tao2,"sys2_")
2090: .ve
2092: This would enable use of different options for each system, such as
2093: .vb
2094: -sys1_tao_method blmvm -sys1_tao_gtol 1.e-3
2095: -sys2_tao_method lmvm -sys2_tao_gtol 1.e-4
2096: .ve
2098: Level: advanced
2100: .seealso: TaoAppendOptionsPrefix(), TaoGetOptionsPrefix()
2101: @*/
2103: PetscErrorCode TaoSetOptionsPrefix(Tao tao, const char p[])
2104: {
2108: PetscObjectSetOptionsPrefix((PetscObject)tao,p);
2109: if (tao->linesearch) {
2110: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
2111: }
2112: if (tao->ksp) {
2113: KSPSetOptionsPrefix(tao->ksp,p);
2114: }
2115: return(0);
2116: }
2118: /*@C
2119: TaoAppendOptionsPrefix - Appends to the prefix used for searching for all
2120: TAO options in the database.
2122: Logically Collective on Tao
2124: Input Parameters:
2125: + tao - the Tao solver context
2126: - prefix - the prefix string to prepend to all TAO option requests
2128: Notes:
2129: A hyphen (-) must NOT be given at the beginning of the prefix name.
2130: The first character of all runtime options is AUTOMATICALLY the hyphen.
2132: Level: advanced
2134: .seealso: TaoSetOptionsPrefix(), TaoGetOptionsPrefix()
2135: @*/
2136: PetscErrorCode TaoAppendOptionsPrefix(Tao tao, const char p[])
2137: {
2141: PetscObjectAppendOptionsPrefix((PetscObject)tao,p);
2142: if (tao->linesearch) {
2143: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
2144: }
2145: if (tao->ksp) {
2146: KSPSetOptionsPrefix(tao->ksp,p);
2147: }
2148: return(0);
2149: }
2151: /*@C
2152: TaoGetOptionsPrefix - Gets the prefix used for searching for all
2153: TAO options in the database
2155: Not Collective
2157: Input Parameters:
2158: . tao - the Tao context
2160: Output Parameters:
2161: . prefix - pointer to the prefix string used is returned
2163: Notes:
2164: On the fortran side, the user should pass in a string 'prefix' of
2165: sufficient length to hold the prefix.
2167: Level: advanced
2169: .seealso: TaoSetOptionsPrefix(), TaoAppendOptionsPrefix()
2170: @*/
2171: PetscErrorCode TaoGetOptionsPrefix(Tao tao, const char *p[])
2172: {
2173: return PetscObjectGetOptionsPrefix((PetscObject)tao,p);
2174: }
2176: /*@C
2177: TaoSetType - Sets the method for the unconstrained minimization solver.
2179: Collective on Tao
2181: Input Parameters:
2182: + solver - the Tao solver context
2183: - type - a known method
2185: Options Database Key:
2186: . -tao_type <type> - Sets the method; use -help for a list
2187: of available methods (for instance, "-tao_type lmvm" or "-tao_type tron")
2189: Available methods include:
2190: + nls - Newton's method with line search for unconstrained minimization
2191: . ntr - Newton's method with trust region for unconstrained minimization
2192: . ntl - Newton's method with trust region, line search for unconstrained minimization
2193: . lmvm - Limited memory variable metric method for unconstrained minimization
2194: . cg - Nonlinear conjugate gradient method for unconstrained minimization
2195: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
2196: . tron - Newton Trust Region method for bound constrained minimization
2197: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
2198: . blmvm - Limited memory variable metric method for bound constrained minimization
2199: - pounders - Model-based algorithm pounder extended for nonlinear least squares
2201: Level: intermediate
2203: .seealso: TaoCreate(), TaoGetType(), TaoType
2205: @*/
2206: PetscErrorCode TaoSetType(Tao tao, TaoType type)
2207: {
2209: PetscErrorCode (*create_xxx)(Tao);
2210: PetscBool issame;
2215: PetscObjectTypeCompare((PetscObject)tao,type,&issame);
2216: if (issame) return(0);
2218: PetscFunctionListFind(TaoList, type, (void(**)(void))&create_xxx);
2219: if (!create_xxx) SETERRQ1(PetscObjectComm((PetscObject)tao),PETSC_ERR_ARG_UNKNOWN_TYPE,"Unable to find requested Tao type %s",type);
2221: /* Destroy the existing solver information */
2222: if (tao->ops->destroy) {
2223: (*tao->ops->destroy)(tao);
2224: }
2225: KSPDestroy(&tao->ksp);
2226: TaoLineSearchDestroy(&tao->linesearch);
2227: VecDestroy(&tao->gradient);
2228: VecDestroy(&tao->stepdirection);
2230: tao->ops->setup = NULL;
2231: tao->ops->solve = NULL;
2232: tao->ops->view = NULL;
2233: tao->ops->setfromoptions = NULL;
2234: tao->ops->destroy = NULL;
2236: tao->setupcalled = PETSC_FALSE;
2238: (*create_xxx)(tao);
2239: PetscObjectChangeTypeName((PetscObject)tao,type);
2240: return(0);
2241: }
2243: /*MC
2244: TaoRegister - Adds a method to the TAO package for unconstrained minimization.
2246: Synopsis:
2247: TaoRegister(char *name_solver,char *path,char *name_Create,PetscErrorCode (*routine_Create)(Tao))
2249: Not collective
2251: Input Parameters:
2252: + sname - name of a new user-defined solver
2253: - func - routine to Create method context
2255: Notes:
2256: TaoRegister() may be called multiple times to add several user-defined solvers.
2258: Sample usage:
2259: .vb
2260: TaoRegister("my_solver",MySolverCreate);
2261: .ve
2263: Then, your solver can be chosen with the procedural interface via
2264: $ TaoSetType(tao,"my_solver")
2265: or at runtime via the option
2266: $ -tao_type my_solver
2268: Level: advanced
2270: .seealso: TaoRegisterAll(), TaoRegisterDestroy()
2271: M*/
2272: PetscErrorCode TaoRegister(const char sname[], PetscErrorCode (*func)(Tao))
2273: {
2277: TaoInitializePackage();
2278: PetscFunctionListAdd(&TaoList,sname, (void (*)(void))func);
2279: return(0);
2280: }
2282: /*@C
2283: TaoRegisterDestroy - Frees the list of minimization solvers that were
2284: registered by TaoRegisterDynamic().
2286: Not Collective
2288: Level: advanced
2290: .seealso: TaoRegisterAll(), TaoRegister()
2291: @*/
2292: PetscErrorCode TaoRegisterDestroy(void)
2293: {
2296: PetscFunctionListDestroy(&TaoList);
2297: TaoRegisterAllCalled = PETSC_FALSE;
2298: return(0);
2299: }
2301: /*@
2302: TaoGetIterationNumber - Gets the number of Tao iterations completed
2303: at this time.
2305: Not Collective
2307: Input Parameter:
2308: . tao - Tao context
2310: Output Parameter:
2311: . iter - iteration number
2313: Notes:
2314: For example, during the computation of iteration 2 this would return 1.
2316: Level: intermediate
2318: .seealso: TaoGetLinearSolveIterations(), TaoGetResidualNorm(), TaoGetObjective()
2319: @*/
2320: PetscErrorCode TaoGetIterationNumber(Tao tao,PetscInt *iter)
2321: {
2325: *iter = tao->niter;
2326: return(0);
2327: }
2329: /*@
2330: TaoGetObjective - Gets the current value of the objective function
2331: at this time.
2333: Not Collective
2335: Input Parameter:
2336: . tao - Tao context
2338: Output Parameter:
2339: . value - the current value
2341: Level: intermediate
2343: .seealso: TaoGetLinearSolveIterations(), TaoGetIterationNumber(), TaoGetResidualNorm()
2344: @*/
2345: PetscErrorCode TaoGetObjective(Tao tao,PetscReal *value)
2346: {
2350: *value = tao->fc;
2351: return(0);
2352: }
2354: /*@
2355: TaoGetResidualNorm - Gets the current value of the norm of the residual
2356: at this time.
2358: Not Collective
2360: Input Parameter:
2361: . tao - Tao context
2363: Output Parameter:
2364: . value - the current value
2366: Level: intermediate
2368: Developer Note: This is the 2-norm of the residual, we cannot use TaoGetGradientNorm() because that has
2369: a different meaning. For some reason Tao sometimes calls the gradient the residual.
2371: .seealso: TaoGetLinearSolveIterations(), TaoGetIterationNumber(), TaoGetObjective()
2372: @*/
2373: PetscErrorCode TaoGetResidualNorm(Tao tao,PetscReal *value)
2374: {
2378: *value = tao->residual;
2379: return(0);
2380: }
2382: /*@
2383: TaoSetIterationNumber - Sets the current iteration number.
2385: Not Collective
2387: Input Parameters:
2388: + tao - Tao context
2389: - iter - iteration number
2391: Level: developer
2393: .seealso: TaoGetLinearSolveIterations()
2394: @*/
2395: PetscErrorCode TaoSetIterationNumber(Tao tao,PetscInt iter)
2396: {
2401: PetscObjectSAWsTakeAccess((PetscObject)tao);
2402: tao->niter = iter;
2403: PetscObjectSAWsGrantAccess((PetscObject)tao);
2404: return(0);
2405: }
2407: /*@
2408: TaoGetTotalIterationNumber - Gets the total number of Tao iterations
2409: completed. This number keeps accumulating if multiple solves
2410: are called with the Tao object.
2412: Not Collective
2414: Input Parameter:
2415: . tao - Tao context
2417: Output Parameter:
2418: . iter - iteration number
2420: Notes:
2421: The total iteration count is updated after each solve, if there is a current
2422: TaoSolve() in progress then those iterations are not yet counted.
2424: Level: intermediate
2426: .seealso: TaoGetLinearSolveIterations()
2427: @*/
2428: PetscErrorCode TaoGetTotalIterationNumber(Tao tao,PetscInt *iter)
2429: {
2433: *iter = tao->ntotalits;
2434: return(0);
2435: }
2437: /*@
2438: TaoSetTotalIterationNumber - Sets the current total iteration number.
2440: Not Collective
2442: Input Parameters:
2443: + tao - Tao context
2444: - iter - iteration number
2446: Level: developer
2448: .seealso: TaoGetLinearSolveIterations()
2449: @*/
2450: PetscErrorCode TaoSetTotalIterationNumber(Tao tao,PetscInt iter)
2451: {
2456: PetscObjectSAWsTakeAccess((PetscObject)tao);
2457: tao->ntotalits = iter;
2458: PetscObjectSAWsGrantAccess((PetscObject)tao);
2459: return(0);
2460: }
2462: /*@
2463: TaoSetConvergedReason - Sets the termination flag on a Tao object
2465: Logically Collective on Tao
2467: Input Parameters:
2468: + tao - the Tao context
2469: - reason - one of
2470: $ TAO_CONVERGED_ATOL (2),
2471: $ TAO_CONVERGED_RTOL (3),
2472: $ TAO_CONVERGED_STEPTOL (4),
2473: $ TAO_CONVERGED_MINF (5),
2474: $ TAO_CONVERGED_USER (6),
2475: $ TAO_DIVERGED_MAXITS (-2),
2476: $ TAO_DIVERGED_NAN (-4),
2477: $ TAO_DIVERGED_MAXFCN (-5),
2478: $ TAO_DIVERGED_LS_FAILURE (-6),
2479: $ TAO_DIVERGED_TR_REDUCTION (-7),
2480: $ TAO_DIVERGED_USER (-8),
2481: $ TAO_CONTINUE_ITERATING (0)
2483: Level: intermediate
2485: @*/
2486: PetscErrorCode TaoSetConvergedReason(Tao tao, TaoConvergedReason reason)
2487: {
2490: tao->reason = reason;
2491: return(0);
2492: }
2494: /*@
2495: TaoGetConvergedReason - Gets the reason the Tao iteration was stopped.
2497: Not Collective
2499: Input Parameter:
2500: . tao - the Tao solver context
2502: Output Parameter:
2503: . reason - one of
2504: $ TAO_CONVERGED_GATOL (3) ||g(X)|| < gatol
2505: $ TAO_CONVERGED_GRTOL (4) ||g(X)|| / f(X) < grtol
2506: $ TAO_CONVERGED_GTTOL (5) ||g(X)|| / ||g(X0)|| < gttol
2507: $ TAO_CONVERGED_STEPTOL (6) step size small
2508: $ TAO_CONVERGED_MINF (7) F < F_min
2509: $ TAO_CONVERGED_USER (8) User defined
2510: $ TAO_DIVERGED_MAXITS (-2) its > maxits
2511: $ TAO_DIVERGED_NAN (-4) Numerical problems
2512: $ TAO_DIVERGED_MAXFCN (-5) fevals > max_funcsals
2513: $ TAO_DIVERGED_LS_FAILURE (-6) line search failure
2514: $ TAO_DIVERGED_TR_REDUCTION (-7) trust region failure
2515: $ TAO_DIVERGED_USER(-8) (user defined)
2516: $ TAO_CONTINUE_ITERATING (0)
2518: where
2519: + X - current solution
2520: . X0 - initial guess
2521: . f(X) - current function value
2522: . f(X*) - true solution (estimated)
2523: . g(X) - current gradient
2524: . its - current iterate number
2525: . maxits - maximum number of iterates
2526: . fevals - number of function evaluations
2527: - max_funcsals - maximum number of function evaluations
2529: Level: intermediate
2531: .seealso: TaoSetConvergenceTest(), TaoSetTolerances()
2533: @*/
2534: PetscErrorCode TaoGetConvergedReason(Tao tao, TaoConvergedReason *reason)
2535: {
2539: *reason = tao->reason;
2540: return(0);
2541: }
2543: /*@
2544: TaoGetSolutionStatus - Get the current iterate, objective value,
2545: residual, infeasibility, and termination
2547: Not Collective
2549: Input Parameter:
2550: . tao - the Tao context
2552: Output Parameters:
2553: + iterate - the current iterate number (>=0)
2554: . f - the current function value
2555: . gnorm - the square of the gradient norm, duality gap, or other measure indicating distance from optimality.
2556: . cnorm - the infeasibility of the current solution with regard to the constraints.
2557: . xdiff - the step length or trust region radius of the most recent iterate.
2558: - reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2560: Level: intermediate
2562: Note:
2563: TAO returns the values set by the solvers in the routine TaoMonitor().
2565: Note:
2566: If any of the output arguments are set to NULL, no corresponding value will be returned.
2568: .seealso: TaoMonitor(), TaoGetConvergedReason()
2569: @*/
2570: PetscErrorCode TaoGetSolutionStatus(Tao tao, PetscInt *its, PetscReal *f, PetscReal *gnorm, PetscReal *cnorm, PetscReal *xdiff, TaoConvergedReason *reason)
2571: {
2573: if (its) *its=tao->niter;
2574: if (f) *f=tao->fc;
2575: if (gnorm) *gnorm=tao->residual;
2576: if (cnorm) *cnorm=tao->cnorm;
2577: if (reason) *reason=tao->reason;
2578: if (xdiff) *xdiff=tao->step;
2579: return(0);
2580: }
2582: /*@C
2583: TaoGetType - Gets the current Tao algorithm.
2585: Not Collective
2587: Input Parameter:
2588: . tao - the Tao solver context
2590: Output Parameter:
2591: . type - Tao method
2593: Level: intermediate
2595: @*/
2596: PetscErrorCode TaoGetType(Tao tao,TaoType *type)
2597: {
2601: *type=((PetscObject)tao)->type_name;
2602: return(0);
2603: }
2605: /*@C
2606: TaoMonitor - Monitor the solver and the current solution. This
2607: routine will record the iteration number and residual statistics,
2608: call any monitors specified by the user, and calls the convergence-check routine.
2610: Input Parameters:
2611: + tao - the Tao context
2612: . its - the current iterate number (>=0)
2613: . f - the current objective function value
2614: . res - the gradient norm, square root of the duality gap, or other measure indicating distince from optimality. This measure will be recorded and
2615: used for some termination tests.
2616: . cnorm - the infeasibility of the current solution with regard to the constraints.
2617: - steplength - multiple of the step direction added to the previous iterate.
2619: Output Parameters:
2620: . reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2622: Options Database Key:
2623: . -tao_monitor - Use the default monitor, which prints statistics to standard output
2625: .seealso TaoGetConvergedReason(), TaoMonitorDefault(), TaoSetMonitor()
2627: Level: developer
2629: @*/
2630: PetscErrorCode TaoMonitor(Tao tao, PetscInt its, PetscReal f, PetscReal res, PetscReal cnorm, PetscReal steplength)
2631: {
2633: PetscInt i;
2637: tao->fc = f;
2638: tao->residual = res;
2639: tao->cnorm = cnorm;
2640: tao->step = steplength;
2641: if (!its) {
2642: tao->cnorm0 = cnorm; tao->gnorm0 = res;
2643: }
2644: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(res)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
2645: for (i=0;i<tao->numbermonitors;i++) {
2646: (*tao->monitor[i])(tao,tao->monitorcontext[i]);
2647: }
2648: return(0);
2649: }
2651: /*@
2652: TaoSetConvergenceHistory - Sets the array used to hold the convergence history.
2654: Logically Collective on Tao
2656: Input Parameters:
2657: + tao - the Tao solver context
2658: . obj - array to hold objective value history
2659: . resid - array to hold residual history
2660: . cnorm - array to hold constraint violation history
2661: . lits - integer array holds the number of linear iterations for each Tao iteration
2662: . na - size of obj, resid, and cnorm
2663: - reset - PetscTrue indicates each new minimization resets the history counter to zero,
2664: else it continues storing new values for new minimizations after the old ones
2666: Notes:
2667: If set, TAO will fill the given arrays with the indicated
2668: information at each iteration. If 'obj','resid','cnorm','lits' are
2669: *all* NULL then space (using size na, or 1000 if na is PETSC_DECIDE or
2670: PETSC_DEFAULT) is allocated for the history.
2671: If not all are NULL, then only the non-NULL information categories
2672: will be stored, the others will be ignored.
2674: Any convergence information after iteration number 'na' will not be stored.
2676: This routine is useful, e.g., when running a code for purposes
2677: of accurate performance monitoring, when no I/O should be done
2678: during the section of code that is being timed.
2680: Level: intermediate
2682: .seealso: TaoGetConvergenceHistory()
2684: @*/
2685: PetscErrorCode TaoSetConvergenceHistory(Tao tao, PetscReal obj[], PetscReal resid[], PetscReal cnorm[], PetscInt lits[], PetscInt na,PetscBool reset)
2686: {
2696: if (na == PETSC_DECIDE || na == PETSC_DEFAULT) na = 1000;
2697: if (!obj && !resid && !cnorm && !lits) {
2698: PetscCalloc4(na,&obj,na,&resid,na,&cnorm,na,&lits);
2699: tao->hist_malloc = PETSC_TRUE;
2700: }
2702: tao->hist_obj = obj;
2703: tao->hist_resid = resid;
2704: tao->hist_cnorm = cnorm;
2705: tao->hist_lits = lits;
2706: tao->hist_max = na;
2707: tao->hist_reset = reset;
2708: tao->hist_len = 0;
2709: return(0);
2710: }
2712: /*@C
2713: TaoGetConvergenceHistory - Gets the arrays used to hold the convergence history.
2715: Collective on Tao
2717: Input Parameter:
2718: . tao - the Tao context
2720: Output Parameters:
2721: + obj - array used to hold objective value history
2722: . resid - array used to hold residual history
2723: . cnorm - array used to hold constraint violation history
2724: . lits - integer array used to hold linear solver iteration count
2725: - nhist - size of obj, resid, cnorm, and lits
2727: Notes:
2728: This routine must be preceded by calls to TaoSetConvergenceHistory()
2729: and TaoSolve(), otherwise it returns useless information.
2731: The calling sequence for this routine in Fortran is
2732: $ call TaoGetConvergenceHistory(Tao tao, PetscInt nhist, PetscErrorCode ierr)
2734: This routine is useful, e.g., when running a code for purposes
2735: of accurate performance monitoring, when no I/O should be done
2736: during the section of code that is being timed.
2738: Level: advanced
2740: .seealso: TaoSetConvergenceHistory()
2742: @*/
2743: PetscErrorCode TaoGetConvergenceHistory(Tao tao, PetscReal **obj, PetscReal **resid, PetscReal **cnorm, PetscInt **lits, PetscInt *nhist)
2744: {
2747: if (obj) *obj = tao->hist_obj;
2748: if (cnorm) *cnorm = tao->hist_cnorm;
2749: if (resid) *resid = tao->hist_resid;
2750: if (nhist) *nhist = tao->hist_len;
2751: return(0);
2752: }
2754: /*@
2755: TaoSetApplicationContext - Sets the optional user-defined context for
2756: a solver.
2758: Logically Collective on Tao
2760: Input Parameters:
2761: + tao - the Tao context
2762: - usrP - optional user context
2764: Level: intermediate
2766: .seealso: TaoGetApplicationContext(), TaoSetApplicationContext()
2767: @*/
2768: PetscErrorCode TaoSetApplicationContext(Tao tao,void *usrP)
2769: {
2772: tao->user = usrP;
2773: return(0);
2774: }
2776: /*@
2777: TaoGetApplicationContext - Gets the user-defined context for a
2778: TAO solvers.
2780: Not Collective
2782: Input Parameter:
2783: . tao - Tao context
2785: Output Parameter:
2786: . usrP - user context
2788: Level: intermediate
2790: .seealso: TaoSetApplicationContext()
2791: @*/
2792: PetscErrorCode TaoGetApplicationContext(Tao tao,void *usrP)
2793: {
2796: *(void**)usrP = tao->user;
2797: return(0);
2798: }
2800: /*@
2801: TaoSetGradientNorm - Sets the matrix used to define the inner product that measures the size of the gradient.
2803: Collective on tao
2805: Input Parameters:
2806: + tao - the Tao context
2807: - M - gradient norm
2809: Level: beginner
2811: .seealso: TaoGetGradientNorm(), TaoGradientNorm()
2812: @*/
2813: PetscErrorCode TaoSetGradientNorm(Tao tao, Mat M)
2814: {
2819: PetscObjectReference((PetscObject)M);
2820: MatDestroy(&tao->gradient_norm);
2821: VecDestroy(&tao->gradient_norm_tmp);
2822: tao->gradient_norm = M;
2823: MatCreateVecs(M, NULL, &tao->gradient_norm_tmp);
2824: return(0);
2825: }
2827: /*@
2828: TaoGetGradientNorm - Returns the matrix used to define the inner product for measuring the size of the gradient.
2830: Not Collective
2832: Input Parameter:
2833: . tao - Tao context
2835: Output Parameter:
2836: . M - gradient norm
2838: Level: beginner
2840: .seealso: TaoSetGradientNorm(), TaoGradientNorm()
2841: @*/
2842: PetscErrorCode TaoGetGradientNorm(Tao tao, Mat *M)
2843: {
2846: *M = tao->gradient_norm;
2847: return(0);
2848: }
2850: /*@C
2851: TaoGradientNorm - Compute the norm with respect to the inner product the user has set.
2853: Collective on tao
2855: Input Parameters:
2856: + tao - the Tao context
2857: . gradient - the gradient to be computed
2858: - norm - the norm type
2860: Output Parameter:
2861: . gnorm - the gradient norm
2863: Level: developer
2865: .seealso: TaoSetGradientNorm(), TaoGetGradientNorm()
2866: @*/
2867: PetscErrorCode TaoGradientNorm(Tao tao, Vec gradient, NormType type, PetscReal *gnorm)
2868: {
2876: if (tao->gradient_norm) {
2877: PetscScalar gnorms;
2879: if (type != NORM_2) SETERRQ(PetscObjectComm((PetscObject)gradient), PETSC_ERR_ARG_WRONG, "Norm type must be NORM_2 if an inner product for the gradient norm is set.");
2880: MatMult(tao->gradient_norm, gradient, tao->gradient_norm_tmp);
2881: VecDot(gradient, tao->gradient_norm_tmp, &gnorms);
2882: *gnorm = PetscRealPart(PetscSqrtScalar(gnorms));
2883: } else {
2884: VecNorm(gradient, type, gnorm);
2885: }
2886: return(0);
2887: }
2889: /*@C
2890: TaoMonitorDrawCtxCreate - Creates the monitor context for TaoMonitorDrawCtx
2892: Collective on Tao
2894: Output Patameter:
2895: . ctx - the monitor context
2897: Options Database:
2898: . -tao_draw_solution_initial - show initial guess as well as current solution
2900: Level: intermediate
2902: .seealso: TaoMonitorSet(), TaoMonitorDefault(), VecView(), TaoMonitorDrawCtx()
2903: @*/
2904: PetscErrorCode TaoMonitorDrawCtxCreate(MPI_Comm comm,const char host[],const char label[],int x,int y,int m,int n,PetscInt howoften,TaoMonitorDrawCtx *ctx)
2905: {
2906: PetscErrorCode ierr;
2909: PetscNew(ctx);
2910: PetscViewerDrawOpen(comm,host,label,x,y,m,n,&(*ctx)->viewer);
2911: PetscViewerSetFromOptions((*ctx)->viewer);
2912: (*ctx)->howoften = howoften;
2913: return(0);
2914: }
2916: /*@C
2917: TaoMonitorDrawCtxDestroy - Destroys the monitor context for TaoMonitorDrawSolution()
2919: Collective on Tao
2921: Input Parameters:
2922: . ctx - the monitor context
2924: Level: intermediate
2926: .seealso: TaoMonitorSet(), TaoMonitorDefault(), VecView(), TaoMonitorDrawSolution()
2927: @*/
2928: PetscErrorCode TaoMonitorDrawCtxDestroy(TaoMonitorDrawCtx *ictx)
2929: {
2933: PetscViewerDestroy(&(*ictx)->viewer);
2934: PetscFree(*ictx);
2935: return(0);
2936: }