Actual source code: taosolver.c
petsc-3.5.4 2015-05-23
1: #define TAO_DLL
3: #include <petsc-private/taoimpl.h> /*I "petsctao.h" I*/
5: PetscBool TaoRegisterAllCalled = PETSC_FALSE;
6: PetscFunctionList TaoList = NULL;
8: PetscClassId TAO_CLASSID;
9: PetscLogEvent Tao_Solve, Tao_ObjectiveEval, Tao_GradientEval, Tao_ObjGradientEval, Tao_HessianEval, Tao_ConstraintsEval, Tao_JacobianEval;
11: const char *TaoSubSetTypes[] = { "subvec","mask","matrixfree","TaoSubSetType","TAO_SUBSET_",0};
15: /*@
16: TaoCreate - Creates a TAO solver
18: Collective on MPI_Comm
20: Input Parameter:
21: . comm - MPI communicator
23: Output Parameter:
24: . newtao - the new Tao context
26: Available methods include:
27: + nls - Newton's method with line search for unconstrained minimization
28: . ntr - Newton's method with trust region for unconstrained minimization
29: . ntl - Newton's method with trust region, line search for unconstrained minimization
30: . lmvm - Limited memory variable metric method for unconstrained minimization
31: . cg - Nonlinear conjugate gradient method for unconstrained minimization
32: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
33: . tron - Newton Trust Region method for bound constrained minimization
34: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
35: . blmvm - Limited memory variable metric method for bound constrained minimization
36: . lcl - Linearly constrained Lagrangian method for pde-constrained minimization
37: - pounders - Model-based algorithm for nonlinear least squares
39: Options Database Keys:
40: . -tao_type - select which method TAO should use
42: Level: beginner
44: .seealso: TaoSolve(), TaoDestroy()
45: @*/
46: PetscErrorCode TaoCreate(MPI_Comm comm, Tao *newtao)
47: {
49: Tao tao;
53: *newtao = NULL;
55: TaoInitializePackage();
56: TaoLineSearchInitializePackage();
58: PetscHeaderCreate(tao,_p_Tao, struct _TaoOps, TAO_CLASSID,"Tao",0,0,comm,TaoDestroy,TaoView);
59: tao->ops->computeobjective=0;
60: tao->ops->computeobjectiveandgradient=0;
61: tao->ops->computegradient=0;
62: tao->ops->computehessian=0;
63: tao->ops->computeseparableobjective=0;
64: tao->ops->computeconstraints=0;
65: tao->ops->computejacobian=0;
66: tao->ops->computejacobianequality=0;
67: tao->ops->computejacobianinequality=0;
68: tao->ops->computeequalityconstraints=0;
69: tao->ops->computeinequalityconstraints=0;
70: tao->ops->convergencetest=TaoDefaultConvergenceTest;
71: tao->ops->convergencedestroy=0;
72: tao->ops->computedual=0;
73: tao->ops->setup=0;
74: tao->ops->solve=0;
75: tao->ops->view=0;
76: tao->ops->setfromoptions=0;
77: tao->ops->destroy=0;
79: tao->solution=NULL;
80: tao->gradient=NULL;
81: tao->sep_objective = NULL;
82: tao->constraints=NULL;
83: tao->constraints_equality=NULL;
84: tao->constraints_inequality=NULL;
85: tao->stepdirection=NULL;
86: tao->XL = NULL;
87: tao->XU = NULL;
88: tao->IL = NULL;
89: tao->IU = NULL;
90: tao->DI = NULL;
91: tao->DE = NULL;
92: tao->hessian = NULL;
93: tao->hessian_pre = NULL;
94: tao->jacobian = NULL;
95: tao->jacobian_pre = NULL;
96: tao->jacobian_state = NULL;
97: tao->jacobian_state_pre = NULL;
98: tao->jacobian_state_inv = NULL;
99: tao->jacobian_design = NULL;
100: tao->jacobian_design_pre = NULL;
101: tao->jacobian_equality = NULL;
102: tao->jacobian_equality_pre = NULL;
103: tao->jacobian_inequality = NULL;
104: tao->jacobian_inequality_pre = NULL;
105: tao->state_is = NULL;
106: tao->design_is = NULL;
108: tao->max_it = 10000;
109: tao->max_funcs = 10000;
110: #if defined(PETSC_USE_REAL_SINGLE)
111: tao->fatol = 1e-5;
112: tao->frtol = 1e-5;
113: tao->gatol = 1e-5;
114: tao->grtol = 1e-5;
115: #else
116: tao->fatol = 1e-8;
117: tao->frtol = 1e-8;
118: tao->gatol = 1e-8;
119: tao->grtol = 1e-8;
120: #endif
121: tao->gttol = 0.0;
122: tao->catol = 0.0;
123: tao->crtol = 0.0;
124: tao->xtol = 0.0;
125: tao->steptol = 0.0;
126: tao->trust0 = PETSC_INFINITY;
127: tao->fmin = PETSC_NINFINITY;
128: tao->hist_reset = PETSC_TRUE;
129: tao->hist_max = 0;
130: tao->hist_len = 0;
131: tao->hist_obj = NULL;
132: tao->hist_resid = NULL;
133: tao->hist_cnorm = NULL;
135: tao->numbermonitors=0;
136: tao->viewsolution=PETSC_FALSE;
137: tao->viewhessian=PETSC_FALSE;
138: tao->viewgradient=PETSC_FALSE;
139: tao->viewjacobian=PETSC_FALSE;
140: tao->viewconstraints = PETSC_FALSE;
142: TaoResetStatistics(tao);
143: *newtao = tao;
144: return(0);
145: }
149: /*@
150: TaoSolve - Solves an optimization problem min F(x) s.t. l <= x <= u
152: Collective on Tao
154: Input Parameters:
155: . tao - the Tao context
157: Notes:
158: The user must set up the Tao with calls to TaoSetInitialVector(),
159: TaoSetObjectiveRoutine(),
160: TaoSetGradientRoutine(), and (if using 2nd order method) TaoSetHessianRoutine().
162: Level: beginner
164: .seealso: TaoCreate(), TaoSetObjectiveRoutine(), TaoSetGradientRoutine(), TaoSetHessianRoutine()
165: @*/
166: PetscErrorCode TaoSolve(Tao tao)
167: {
168: PetscErrorCode ierr;
169: static PetscBool set = PETSC_FALSE;
173: PetscCitationsRegister("@TechReport{tao-user-ref,\n"
174: "title = {Toolkit for Advanced Optimization (TAO) Users Manual},\n"
175: "author = {Todd Munson and Jason Sarich and Stefan Wild and Steve Benson and Lois Curfman McInnes},\n"
176: "Institution = {Argonne National Laboratory},\n"
177: "Year = 2014,\n"
178: "Number = {ANL/MCS-TM-322 - Revision 3.5},\n"
179: "url = {http://www.mcs.anl.gov/tao}\n}\n",&set);
181: TaoSetUp(tao);
182: TaoResetStatistics(tao);
183: if (tao->linesearch) {
184: TaoLineSearchReset(tao->linesearch);
185: }
187: PetscLogEventBegin(Tao_Solve,tao,0,0,0);
188: if (tao->ops->solve){ (*tao->ops->solve)(tao); }
189: PetscLogEventEnd(Tao_Solve,tao,0,0,0);
191: TaoViewFromOptions(tao,NULL,"-tao_view");
193: if (tao->printreason) {
194: if (tao->reason > 0) {
195: PetscPrintf(((PetscObject)tao)->comm,"TAO solve converged due to %s\n",TaoConvergedReasons[tao->reason]);
196: } else {
197: PetscPrintf(((PetscObject)tao)->comm,"TAO solve did not converge due to %s\n",TaoConvergedReasons[tao->reason]);
198: }
199: }
200: return(0);
201: }
205: /*@
206: TaoSetUp - Sets up the internal data structures for the later use
207: of a Tao solver
209: Collective on tao
211: Input Parameters:
212: . tao - the TAO context
214: Notes:
215: The user will not need to explicitly call TaoSetUp(), as it will
216: automatically be called in TaoSolve(). However, if the user
217: desires to call it explicitly, it should come after TaoCreate()
218: and any TaoSetSomething() routines, but before TaoSolve().
220: Level: advanced
222: .seealso: TaoCreate(), TaoSolve()
223: @*/
224: PetscErrorCode TaoSetUp(Tao tao)
225: {
230: if (tao->setupcalled) return(0);
232: if (!tao->solution) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must call TaoSetInitialVector");
233: if (tao->ops->setup) {
234: (*tao->ops->setup)(tao);
235: }
236: tao->setupcalled = PETSC_TRUE;
237: return(0);
238: }
242: /*@
243: TaoDestroy - Destroys the TAO context that was created with
244: TaoCreate()
246: Collective on Tao
248: Input Parameter:
249: . tao - the Tao context
251: Level: beginner
253: .seealso: TaoCreate(), TaoSolve()
254: @*/
255: PetscErrorCode TaoDestroy(Tao *tao)
256: {
260: if (!*tao) return(0);
262: if (--((PetscObject)*tao)->refct > 0) {*tao=0;return(0);}
264: if ((*tao)->ops->destroy) {
265: (*((*tao))->ops->destroy)(*tao);
266: }
267: KSPDestroy(&(*tao)->ksp);
268: TaoLineSearchDestroy(&(*tao)->linesearch);
270: if ((*tao)->ops->convergencedestroy) {
271: (*(*tao)->ops->convergencedestroy)((*tao)->cnvP);
272: if ((*tao)->jacobian_state_inv) {
273: MatDestroy(&(*tao)->jacobian_state_inv);
274: }
275: }
276: VecDestroy(&(*tao)->solution);
277: VecDestroy(&(*tao)->gradient);
279: VecDestroy(&(*tao)->XL);
280: VecDestroy(&(*tao)->XU);
281: VecDestroy(&(*tao)->IL);
282: VecDestroy(&(*tao)->IU);
283: VecDestroy(&(*tao)->DE);
284: VecDestroy(&(*tao)->DI);
285: VecDestroy(&(*tao)->constraints_equality);
286: VecDestroy(&(*tao)->constraints_inequality);
287: VecDestroy(&(*tao)->stepdirection);
288: MatDestroy(&(*tao)->hessian_pre);
289: MatDestroy(&(*tao)->hessian);
290: MatDestroy(&(*tao)->jacobian_pre);
291: MatDestroy(&(*tao)->jacobian);
292: MatDestroy(&(*tao)->jacobian_state_pre);
293: MatDestroy(&(*tao)->jacobian_state);
294: MatDestroy(&(*tao)->jacobian_state_inv);
295: MatDestroy(&(*tao)->jacobian_design);
296: MatDestroy(&(*tao)->jacobian_equality);
297: MatDestroy(&(*tao)->jacobian_equality_pre);
298: MatDestroy(&(*tao)->jacobian_inequality);
299: MatDestroy(&(*tao)->jacobian_inequality_pre);
300: ISDestroy(&(*tao)->state_is);
301: ISDestroy(&(*tao)->design_is);
302: TaoCancelMonitors(*tao);
303: PetscHeaderDestroy(tao);
304: return(0);
305: }
309: /*@
310: TaoSetFromOptions - Sets various Tao parameters from user
311: options.
313: Collective on Tao
315: Input Paremeter:
316: . tao - the Tao solver context
318: options Database Keys:
319: + -tao_type <type> - The algorithm that TAO uses (lmvm, nls, etc.)
320: . -tao_fatol <fatol> - absolute error tolerance in function value
321: . -tao_frtol <frtol> - relative error tolerance in function value
322: . -tao_gatol <gatol> - absolute error tolerance for ||gradient||
323: . -tao_grtol <grtol> - relative error tolerance for ||gradient||
324: . -tao_gttol <gttol> - reduction of ||gradient|| relative to initial gradient
325: . -tao_max_it <max> - sets maximum number of iterations
326: . -tao_max_funcs <max> - sets maximum number of function evaluations
327: . -tao_fmin <fmin> - stop if function value reaches fmin
328: . -tao_steptol <tol> - stop if trust region radius less than <tol>
329: . -tao_trust0 <t> - initial trust region radius
330: . -tao_monitor - prints function value and residual at each iteration
331: . -tao_smonitor - same as tao_monitor, but truncates very small values
332: . -tao_cmonitor - prints function value, residual, and constraint norm at each iteration
333: . -tao_view_solution - prints solution vector at each iteration
334: . -tao_view_separableobjective - prints separable objective vector at each iteration
335: . -tao_view_step - prints step direction vector at each iteration
336: . -tao_view_gradient - prints gradient vector at each iteration
337: . -tao_draw_solution - graphically view solution vector at each iteration
338: . -tao_draw_step - graphically view step vector at each iteration
339: . -tao_draw_gradient - graphically view gradient at each iteration
340: . -tao_fd_gradient - use gradient computed with finite differences
341: . -tao_cancelmonitors - cancels all monitors (except those set with command line)
342: . -tao_view - prints information about the Tao after solving
343: - -tao_converged_reason - prints the reason TAO stopped iterating
345: Notes:
346: To see all options, run your program with the -help option or consult the
347: user's manual. Should be called after TaoCreate() but before TaoSolve()
349: Level: beginner
350: @*/
351: PetscErrorCode TaoSetFromOptions(Tao tao)
352: {
354: const TaoType default_type = TAOLMVM;
355: const char *prefix;
356: char type[256], monfilename[PETSC_MAX_PATH_LEN];
357: PetscViewer monviewer;
358: PetscBool flg;
359: MPI_Comm comm;
363: PetscObjectGetComm((PetscObject)tao,&comm);
364: TaoGetOptionsPrefix(tao,&prefix);
365: /* So no warnings are given about unused options */
366: PetscOptionsHasName(prefix,"-tao_ls_type",&flg);
368: PetscObjectOptionsBegin((PetscObject)tao);
369: {
370: if (!TaoRegisterAllCalled) {
371: TaoRegisterAll();
372: }
373: if (((PetscObject)tao)->type_name) {
374: default_type = ((PetscObject)tao)->type_name;
375: }
376: /* Check for type from options */
377: PetscOptionsFList("-tao_type","Tao Solver type","TaoSetType",TaoList,default_type,type,256,&flg);
378: if (flg) {
379: TaoSetType(tao,type);
380: } else if (!((PetscObject)tao)->type_name) {
381: TaoSetType(tao,default_type);
382: }
384: PetscOptionsReal("-tao_fatol","Stop if solution within","TaoSetTolerances",tao->fatol,&tao->fatol,&flg);
385: PetscOptionsReal("-tao_frtol","Stop if relative solution within","TaoSetTolerances",tao->frtol,&tao->frtol,&flg);
386: PetscOptionsReal("-tao_catol","Stop if constraints violations within","TaoSetConstraintTolerances",tao->catol,&tao->catol,&flg);
387: PetscOptionsReal("-tao_crtol","Stop if relative contraint violations within","TaoSetConstraintTolerances",tao->crtol,&tao->crtol,&flg);
388: PetscOptionsReal("-tao_gatol","Stop if norm of gradient less than","TaoSetTolerances",tao->gatol,&tao->gatol,&flg);
389: PetscOptionsReal("-tao_grtol","Stop if norm of gradient divided by the function value is less than","TaoSetTolerances",tao->grtol,&tao->grtol,&flg);
390: PetscOptionsReal("-tao_gttol","Stop if the norm of the gradient is less than the norm of the initial gradient times tol","TaoSetTolerances",tao->gttol,&tao->gttol,&flg);
391: PetscOptionsInt("-tao_max_it","Stop if iteration number exceeds","TaoSetMaximumIterations",tao->max_it,&tao->max_it,&flg);
392: PetscOptionsInt("-tao_max_funcs","Stop if number of function evaluations exceeds","TaoSetMaximumFunctionEvaluations",tao->max_funcs,&tao->max_funcs,&flg);
393: PetscOptionsReal("-tao_fmin","Stop if function less than","TaoSetFunctionLowerBound",tao->fmin,&tao->fmin,&flg);
394: PetscOptionsReal("-tao_steptol","Stop if step size or trust region radius less than","",tao->steptol,&tao->steptol,&flg);
395: PetscOptionsReal("-tao_trust0","Initial trust region radius","TaoSetTrustRegionRadius",tao->trust0,&tao->trust0,&flg);
397: PetscOptionsString("-tao_view_solution","view solution vector after each evaluation","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
398: if (flg) {
399: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
400: TaoSetMonitor(tao,TaoSolutionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
401: }
403: PetscOptionsBool("-tao_converged_reason","Print reason for TAO converged","TaoSolve",flg,&flg,NULL);
404: if (flg) {
405: tao->printreason = PETSC_TRUE;
406: }
407: PetscOptionsString("-tao_view_gradient","view gradient vector after each evaluation","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
408: if (flg) {
409: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
410: TaoSetMonitor(tao,TaoGradientMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
411: }
413: PetscOptionsString("-tao_view_stepdirection","view step direction vector after each iteration","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
414: if (flg) {
415: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
416: TaoSetMonitor(tao,TaoStepDirectionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
417: }
419: PetscOptionsString("-tao_view_separableobjective","view separable objective vector after each evaluation","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
420: if (flg) {
421: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
422: TaoSetMonitor(tao,TaoSeparableObjectiveMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
423: }
425: PetscOptionsString("-tao_monitor","Use the default convergence monitor","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
426: if (flg) {
427: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
428: TaoSetMonitor(tao,TaoDefaultMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
429: }
431: PetscOptionsString("-tao_smonitor","Use the short convergence monitor","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
432: if (flg) {
433: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
434: TaoSetMonitor(tao,TaoDefaultSMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
435: }
437: PetscOptionsString("-tao_cmonitor","Use the default convergence monitor with constraint norm","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
438: if (flg) {
439: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
440: TaoSetMonitor(tao,TaoDefaultCMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
441: }
444: PetscOptionsBool("-tao_cancelmonitors","cancel all monitors and call any registered destroy routines","TaoCancelMonitors",PETSC_FALSE,&flg,NULL);
445: if (flg) {TaoCancelMonitors(tao);}
447: PetscOptionsBool("-tao_draw_solution","Plot solution vector at each iteration","TaoSetMonitor",PETSC_FALSE,&flg,NULL);
448: if (flg) {
449: TaoSetMonitor(tao,TaoDrawSolutionMonitor,NULL,NULL);
450: }
452: PetscOptionsBool("-tao_draw_step","plots step direction at each iteration","TaoSetMonitor",PETSC_FALSE,&flg,NULL);
453: if (flg) {
454: TaoSetMonitor(tao,TaoDrawStepMonitor,NULL,NULL);
455: }
457: PetscOptionsBool("-tao_draw_gradient","plots gradient at each iteration","TaoSetMonitor",PETSC_FALSE,&flg,NULL);
458: if (flg) {
459: TaoSetMonitor(tao,TaoDrawGradientMonitor,NULL,NULL);
460: }
461: PetscOptionsBool("-tao_fd_gradient","compute gradient using finite differences","TaoDefaultComputeGradient",PETSC_FALSE,&flg,NULL);
462: if (flg) {
463: TaoSetGradientRoutine(tao,TaoDefaultComputeGradient,NULL);
464: }
465: PetscOptionsEnum("-tao_subset_type","subset type", "", TaoSubSetTypes,(PetscEnum)tao->subset_type, (PetscEnum*)&tao->subset_type, 0);
467: if (tao->ops->setfromoptions) {
468: (*tao->ops->setfromoptions)(tao);
469: }
470: }
471: PetscOptionsEnd();
472: return(0);
473: }
477: /*@C
478: TaoView - Prints information about the Tao
480: Collective on Tao
482: InputParameters:
483: + tao - the Tao context
484: - viewer - visualization context
486: Options Database Key:
487: . -tao_view - Calls TaoView() at the end of TaoSolve()
489: Notes:
490: The available visualization contexts include
491: + PETSC_VIEWER_STDOUT_SELF - standard output (default)
492: - PETSC_VIEWER_STDOUT_WORLD - synchronized standard
493: output where only the first processor opens
494: the file. All other processors send their
495: data to the first processor to print.
497: Level: beginner
499: .seealso: PetscViewerASCIIOpen()
500: @*/
501: PetscErrorCode TaoView(Tao tao, PetscViewer viewer)
502: {
503: PetscErrorCode ierr;
504: PetscBool isascii,isstring;
505: const TaoType type;
509: if (!viewer) {
510: PetscViewerASCIIGetStdout(((PetscObject)tao)->comm,&viewer);
511: }
515: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
516: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
517: if (isascii) {
518: PetscObjectPrintClassNamePrefixType((PetscObject)tao,viewer);
519: PetscViewerASCIIPushTab(viewer);
521: if (tao->ops->view) {
522: PetscViewerASCIIPushTab(viewer);
523: (*tao->ops->view)(tao,viewer);
524: PetscViewerASCIIPopTab(viewer);
525: }
526: if (tao->linesearch) {
527: PetscObjectPrintClassNamePrefixType((PetscObject)(tao->linesearch),viewer);
528: }
529: if (tao->ksp) {
530: PetscObjectPrintClassNamePrefixType((PetscObject)(tao->ksp),viewer);
531: PetscViewerASCIIPrintf(viewer,"total KSP iterations: %D\n",tao->ksp_its);
532: }
533: if (tao->XL || tao->XU) {
534: PetscViewerASCIIPrintf(viewer,"Active Set subset type: %s\n",TaoSubSetTypes[tao->subset_type]);
535: }
537: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances: fatol=%g,",(double)tao->fatol);
538: ierr=PetscViewerASCIIPrintf(viewer," frtol=%g\n",(double)tao->frtol);
540: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances: gatol=%g,",(double)tao->gatol);
541: ierr=PetscViewerASCIIPrintf(viewer," steptol=%g,",(double)tao->steptol);
542: ierr=PetscViewerASCIIPrintf(viewer," gttol=%g\n",(double)tao->gttol);
544: PetscViewerASCIIPrintf(viewer,"Residual in Function/Gradient:=%g\n",(double)tao->residual);
546: if (tao->cnorm>0 || tao->catol>0 || tao->crtol>0){
547: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances:");
548: ierr=PetscViewerASCIIPrintf(viewer," catol=%g,",(double)tao->catol);
549: ierr=PetscViewerASCIIPrintf(viewer," crtol=%g\n",(double)tao->crtol);
550: PetscViewerASCIIPrintf(viewer,"Residual in Constraints:=%g\n",(double)tao->cnorm);
551: }
553: if (tao->trust < tao->steptol){
554: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances: steptol=%g\n",(double)tao->steptol);
555: ierr=PetscViewerASCIIPrintf(viewer,"Final trust region radius:=%g\n",(double)tao->trust);
556: }
558: if (tao->fmin>-1.e25){
559: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances: function minimum=%g\n",(double)tao->fmin);
560: }
561: PetscViewerASCIIPrintf(viewer,"Objective value=%g\n",(double)tao->fc);
563: PetscViewerASCIIPrintf(viewer,"total number of iterations=%D, ",tao->niter);
564: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_it);
566: if (tao->nfuncs>0){
567: PetscViewerASCIIPrintf(viewer,"total number of function evaluations=%D,",tao->nfuncs);
568: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
569: }
570: if (tao->ngrads>0){
571: PetscViewerASCIIPrintf(viewer,"total number of gradient evaluations=%D,",tao->ngrads);
572: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
573: }
574: if (tao->nfuncgrads>0){
575: PetscViewerASCIIPrintf(viewer,"total number of function/gradient evaluations=%D,",tao->nfuncgrads);
576: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_funcs);
577: }
578: if (tao->nhess>0){
579: PetscViewerASCIIPrintf(viewer,"total number of Hessian evaluations=%D\n",tao->nhess);
580: }
581: /* if (tao->linear_its>0){
582: PetscViewerASCIIPrintf(viewer," total Krylov method iterations=%D\n",tao->linear_its);
583: }*/
584: if (tao->nconstraints>0){
585: PetscViewerASCIIPrintf(viewer,"total number of constraint function evaluations=%D\n",tao->nconstraints);
586: }
587: if (tao->njac>0){
588: PetscViewerASCIIPrintf(viewer,"total number of Jacobian evaluations=%D\n",tao->njac);
589: }
591: if (tao->reason>0){
592: PetscViewerASCIIPrintf(viewer, "Solution converged: ");
593: switch (tao->reason) {
594: case TAO_CONVERGED_FATOL:
595: PetscViewerASCIIPrintf(viewer,"estimated f(x)-f(X*) <= fatol\n");
596: break;
597: case TAO_CONVERGED_FRTOL:
598: PetscViewerASCIIPrintf(viewer,"estimated |f(x)-f(X*)|/|f(X*)| <= frtol\n");
599: break;
600: case TAO_CONVERGED_GATOL:
601: PetscViewerASCIIPrintf(viewer," ||g(X)|| <= gatol\n");
602: break;
603: case TAO_CONVERGED_GRTOL:
604: PetscViewerASCIIPrintf(viewer," ||g(X)||/|f(X)| <= grtol\n");
605: break;
606: case TAO_CONVERGED_GTTOL:
607: PetscViewerASCIIPrintf(viewer," ||g(X)||/||g(X0)|| <= gttol\n");
608: break;
609: case TAO_CONVERGED_STEPTOL:
610: PetscViewerASCIIPrintf(viewer," Steptol -- step size small\n");
611: break;
612: case TAO_CONVERGED_MINF:
613: PetscViewerASCIIPrintf(viewer," Minf -- f < fmin\n");
614: break;
615: case TAO_CONVERGED_USER:
616: PetscViewerASCIIPrintf(viewer," User Terminated\n");
617: break;
618: default:
619: PetscViewerASCIIPrintf(viewer,"\n");
620: break;
621: }
623: } else {
624: PetscViewerASCIIPrintf(viewer,"Solver terminated: %D",tao->reason);
625: switch (tao->reason) {
626: case TAO_DIVERGED_MAXITS:
627: PetscViewerASCIIPrintf(viewer," Maximum Iterations\n");
628: break;
629: case TAO_DIVERGED_NAN:
630: PetscViewerASCIIPrintf(viewer," NAN or Inf encountered\n");
631: break;
632: case TAO_DIVERGED_MAXFCN:
633: PetscViewerASCIIPrintf(viewer," Maximum Function Evaluations\n");
634: break;
635: case TAO_DIVERGED_LS_FAILURE:
636: PetscViewerASCIIPrintf(viewer," Line Search Failure\n");
637: break;
638: case TAO_DIVERGED_TR_REDUCTION:
639: PetscViewerASCIIPrintf(viewer," Trust Region too small\n");
640: break;
641: case TAO_DIVERGED_USER:
642: PetscViewerASCIIPrintf(viewer," User Terminated\n");
643: break;
644: default:
645: PetscViewerASCIIPrintf(viewer,"\n");
646: break;
647: }
648: }
649: PetscViewerASCIIPopTab(viewer);
650: } else if (isstring) {
651: TaoGetType(tao,&type);
652: PetscViewerStringSPrintf(viewer," %-3.3s",type);
653: }
654: return(0);
655: }
659: /*@
660: TaoSetTolerances - Sets parameters used in TAO convergence tests
662: Logically collective on Tao
664: Input Parameters:
665: + tao - the Tao context
666: . fatol - absolute convergence tolerance
667: . frtol - relative convergence tolerance
668: . gatol - stop if norm of gradient is less than this
669: . grtol - stop if relative norm of gradient is less than this
670: - gttol - stop if norm of gradient is reduced by this factor
672: Options Database Keys:
673: + -tao_fatol <fatol> - Sets fatol
674: . -tao_frtol <frtol> - Sets frtol
675: . -tao_gatol <gatol> - Sets gatol
676: . -tao_grtol <grtol> - Sets grtol
677: - -tao_gttol <gttol> - Sets gttol
679: Stopping Criteria:
680: $ f(X) - f(X*) (estimated) <= fatol
681: $ |f(X) - f(X*)| (estimated) / |f(X)| <= frtol
682: $ ||g(X)|| <= gatol
683: $ ||g(X)|| / |f(X)| <= grtol
684: $ ||g(X)|| / ||g(X0)|| <= gttol
686: Notes:
687: Use PETSC_DEFAULT to leave one or more tolerances unchanged.
689: Level: beginner
691: .seealso: TaoGetTolerances()
693: @*/
694: PetscErrorCode TaoSetTolerances(Tao tao, PetscReal fatol, PetscReal frtol, PetscReal gatol, PetscReal grtol, PetscReal gttol)
695: {
701: if (fatol != PETSC_DEFAULT) {
702: if (fatol<0) {
703: PetscInfo(tao,"Tried to set negative fatol -- ignored.");
704: } else {
705: tao->fatol = PetscMax(0,fatol);
706: }
707: }
708: if (frtol != PETSC_DEFAULT) {
709: if (frtol<0) {
710: PetscInfo(tao,"Tried to set negative frtol -- ignored.");
711: } else {
712: tao->frtol = PetscMax(0,frtol);
713: }
714: }
716: if (gatol != PETSC_DEFAULT) {
717: if (gatol<0) {
718: PetscInfo(tao,"Tried to set negative gatol -- ignored.");
719: } else {
720: tao->gatol = PetscMax(0,gatol);
721: }
722: }
724: if (grtol != PETSC_DEFAULT) {
725: if (grtol<0) {
726: PetscInfo(tao,"Tried to set negative grtol -- ignored.");
727: } else {
728: tao->grtol = PetscMax(0,grtol);
729: }
730: }
732: if (gttol != PETSC_DEFAULT) {
733: if (gttol<0) {
734: PetscInfo(tao,"Tried to set negative gttol -- ignored.");
735: } else {
736: tao->gttol = PetscMax(0,gttol);
737: }
738: }
739: return(0);
740: }
744: /*@
745: TaoSetConstraintTolerances - Sets constraint tolerance parameters used in TAO convergence tests
747: Logically collective on Tao
749: Input Parameters:
750: + tao - the Tao context
751: . catol - absolute constraint tolerance, constraint norm must be less than catol for used for fatol, gatol convergence criteria
752: - crtol - relative contraint tolerance, constraint norm must be less than crtol for used for fatol, gatol, gttol convergence criteria
754: Options Database Keys:
755: + -tao_catol <catol> - Sets catol
756: - -tao_crtol <crtol> - Sets crtol
758: Level: intermediate
760: .seealso: TaoGetTolerances(), TaoGetConstraintTolerances(), TaoSetTolerances()
762: @*/
763: PetscErrorCode TaoSetConstraintTolerances(Tao tao, PetscReal catol, PetscReal crtol)
764: {
770: if (catol != PETSC_DEFAULT) {
771: if (catol<0) {
772: PetscInfo(tao,"Tried to set negative catol -- ignored.");
773: } else {
774: tao->catol = PetscMax(0,catol);
775: }
776: }
778: if (crtol != PETSC_DEFAULT) {
779: if (crtol<0) {
780: PetscInfo(tao,"Tried to set negative crtol -- ignored.");
781: } else {
782: tao->crtol = PetscMax(0,crtol);
783: }
784: }
785: return(0);
786: }
790: /*@
791: TaoGetConstraintTolerances - Gets constraint tolerance parameters used in TAO convergence tests
793: Not ollective
795: Input Parameter:
796: . tao - the Tao context
798: Output Parameter:
799: + catol - absolute constraint tolerance, constraint norm must be less than catol for used for fatol, gatol convergence criteria
800: - crtol - relative contraint tolerance, constraint norm must be less than crtol for used for fatol, gatol, gttol convergence criteria
802: Level: intermediate
804: .seealso: TaoGetTolerances(), TaoSetTolerances(), TaoSetConstraintTolerances()
806: @*/
807: PetscErrorCode TaoGetConstraintTolerances(Tao tao, PetscReal *catol, PetscReal *crtol)
808: {
811: if (catol) *catol = tao->catol;
812: if (crtol) *crtol = tao->crtol;
813: return(0);
814: }
818: /*@
819: TaoSetFunctionLowerBound - Sets a bound on the solution objective value.
820: When an approximate solution with an objective value below this number
821: has been found, the solver will terminate.
823: Logically Collective on Tao
825: Input Parameters:
826: + tao - the Tao solver context
827: - fmin - the tolerance
829: Options Database Keys:
830: . -tao_fmin <fmin> - sets the minimum function value
832: Level: intermediate
834: .seealso: TaoSetTolerances()
835: @*/
836: PetscErrorCode TaoSetFunctionLowerBound(Tao tao,PetscReal fmin)
837: {
840: tao->fmin = fmin;
841: return(0);
842: }
846: /*@
847: TaoGetFunctionLowerBound - Sets a bound on the solution objective value.
848: When an approximate solution with an objective value below this number
849: has been found, the solver will terminate.
851: Not collective on Tao
853: Input Parameters:
854: . tao - the Tao solver context
856: OutputParameters:
857: . fmin - the minimum function value
859: Level: intermediate
861: .seealso: TaoSetFunctionLowerBound()
862: @*/
863: PetscErrorCode TaoGetFunctionLowerBound(Tao tao,PetscReal *fmin)
864: {
867: *fmin = tao->fmin;
868: return(0);
869: }
873: /*@
874: TaoSetMaximumFunctionEvaluations - Sets a maximum number of
875: function evaluations.
877: Logically Collective on Tao
879: Input Parameters:
880: + tao - the Tao solver context
881: - nfcn - the maximum number of function evaluations (>=0)
883: Options Database Keys:
884: . -tao_max_funcs <nfcn> - sets the maximum number of function evaluations
886: Level: intermediate
888: .seealso: TaoSetTolerances(), TaoSetMaximumIterations()
889: @*/
891: PetscErrorCode TaoSetMaximumFunctionEvaluations(Tao tao,PetscInt nfcn)
892: {
895: tao->max_funcs = PetscMax(0,nfcn);
896: return(0);
897: }
901: /*@
902: TaoGetMaximumFunctionEvaluations - Sets a maximum number of
903: function evaluations.
905: Not Collective
907: Input Parameters:
908: . tao - the Tao solver context
910: Output Parameters:
911: . nfcn - the maximum number of function evaluations
913: Level: intermediate
915: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
916: @*/
918: PetscErrorCode TaoGetMaximumFunctionEvaluations(Tao tao,PetscInt *nfcn)
919: {
922: *nfcn = tao->max_funcs;
923: return(0);
924: }
928: /*@
929: TaoSetMaximumIterations - Sets a maximum number of iterates.
931: Logically Collective on Tao
933: Input Parameters:
934: + tao - the Tao solver context
935: - maxits - the maximum number of iterates (>=0)
937: Options Database Keys:
938: . -tao_max_it <its> - sets the maximum number of iterations
940: Level: intermediate
942: .seealso: TaoSetTolerances(), TaoSetMaximumFunctionEvaluations()
943: @*/
944: PetscErrorCode TaoSetMaximumIterations(Tao tao,PetscInt maxits)
945: {
948: tao->max_it = PetscMax(0,maxits);
949: return(0);
950: }
954: /*@
955: TaoGetMaximumIterations - Sets a maximum number of iterates.
957: Not Collective
959: Input Parameters:
960: . tao - the Tao solver context
962: Output Parameters:
963: . maxits - the maximum number of iterates
965: Level: intermediate
967: .seealso: TaoSetMaximumIterations(), TaoGetMaximumFunctionEvaluations()
968: @*/
969: PetscErrorCode TaoGetMaximumIterations(Tao tao,PetscInt *maxits)
970: {
973: *maxits = tao->max_it;
974: return(0);
975: }
979: /*@
980: TaoSetInitialTrustRegionRadius - Sets the initial trust region radius.
982: Logically collective on Tao
984: Input Parameter:
985: + tao - a TAO optimization solver
986: - radius - the trust region radius
988: Level: intermediate
990: Options Database Key:
991: . -tao_trust0 <t0> - sets initial trust region radius
993: .seealso: TaoGetTrustRegionRadius(), TaoSetTrustRegionTolerance()
994: @*/
995: PetscErrorCode TaoSetInitialTrustRegionRadius(Tao tao, PetscReal radius)
996: {
999: tao->trust0 = PetscMax(0.0,radius);
1000: return(0);
1001: }
1005: /*@
1006: TaoGetInitialTrustRegionRadius - Sets the initial trust region radius.
1008: Not Collective
1010: Input Parameter:
1011: . tao - a TAO optimization solver
1013: Output Parameter:
1014: . radius - the trust region radius
1016: Level: intermediate
1018: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetCurrentTrustRegionRadius()
1019: @*/
1020: PetscErrorCode TaoGetInitialTrustRegionRadius(Tao tao, PetscReal *radius)
1021: {
1024: *radius = tao->trust0;
1025: return(0);
1026: }
1030: /*@
1031: TaoGetCurrentTrustRegionRadius - Gets the current trust region radius.
1033: Not Collective
1035: Input Parameter:
1036: . tao - a TAO optimization solver
1038: Output Parameter:
1039: . radius - the trust region radius
1041: Level: intermediate
1043: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetInitialTrustRegionRadius()
1044: @*/
1045: PetscErrorCode TaoGetCurrentTrustRegionRadius(Tao tao, PetscReal *radius)
1046: {
1049: *radius = tao->trust;
1050: return(0);
1051: }
1055: /*@
1056: TaoGetTolerances - gets the current values of tolerances
1058: Not Collective
1060: Input Parameters:
1061: . tao - the Tao context
1063: Output Parameters:
1064: + fatol - absolute convergence tolerance
1065: . frtol - relative convergence tolerance
1066: . gatol - stop if norm of gradient is less than this
1067: . grtol - stop if relative norm of gradient is less than this
1068: - gttol - stop if norm of gradient is reduced by a this factor
1070: Note: NULL can be used as an argument if not all tolerances values are needed
1072: .seealso TaoSetTolerances()
1074: Level: intermediate
1075: @*/
1076: PetscErrorCode TaoGetTolerances(Tao tao, PetscReal *fatol, PetscReal *frtol, PetscReal *gatol, PetscReal *grtol, PetscReal *gttol)
1077: {
1080: if (fatol) *fatol=tao->fatol;
1081: if (frtol) *frtol=tao->frtol;
1082: if (gatol) *gatol=tao->gatol;
1083: if (grtol) *grtol=tao->grtol;
1084: if (gttol) *gttol=tao->gttol;
1085: return(0);
1086: }
1090: /*@
1091: TaoGetKSP - Gets the linear solver used by the optimization solver.
1092: Application writers should use TaoGetKSP if they need direct access
1093: to the PETSc KSP object.
1095: Not Collective
1097: Input Parameters:
1098: . tao - the TAO solver
1100: Output Parameters:
1101: . ksp - the KSP linear solver used in the optimization solver
1103: Level: intermediate
1105: @*/
1106: PetscErrorCode TaoGetKSP(Tao tao, KSP *ksp)
1107: {
1109: *ksp = tao->ksp;
1110: return(0);
1111: }
1115: /*@
1116: TaoGetLineSearch - Gets the line search used by the optimization solver.
1117: Application writers should use TaoGetLineSearch if they need direct access
1118: to the TaoLineSearch object.
1120: Not Collective
1122: Input Parameters:
1123: . tao - the TAO solver
1125: Output Parameters:
1126: . ls - the line search used in the optimization solver
1128: Level: intermediate
1130: @*/
1131: PetscErrorCode TaoGetLineSearch(Tao tao, TaoLineSearch *ls)
1132: {
1134: *ls = tao->linesearch;
1135: return(0);
1136: }
1140: /*@
1141: TaoAddLineSearchCounts - Adds the number of function evaluations spent
1142: in the line search to the running total.
1144: Input Parameters:
1145: + tao - the TAO solver
1146: - ls - the line search used in the optimization solver
1148: Level: developer
1150: .seealso: TaoLineSearchApply()
1151: @*/
1152: PetscErrorCode TaoAddLineSearchCounts(Tao tao)
1153: {
1155: PetscBool flg;
1156: PetscInt nfeval,ngeval,nfgeval;
1160: if (tao->linesearch) {
1161: TaoLineSearchIsUsingTaoRoutines(tao->linesearch,&flg);
1162: if (flg == PETSC_FALSE) {
1163: TaoLineSearchGetNumberFunctionEvaluations(tao->linesearch,&nfeval,&ngeval,&nfgeval);
1164: tao->nfuncs+=nfeval;
1165: tao->ngrads+=ngeval;
1166: tao->nfuncgrads+=nfgeval;
1167: }
1168: }
1169: return(0);
1170: }
1174: /*@
1175: TaoGetSolutionVector - Returns the vector with the current TAO solution
1177: Not Collective
1179: Input Parameter:
1180: . tao - the Tao context
1182: Output Parameter:
1183: . X - the current solution
1185: Level: intermediate
1187: Note: The returned vector will be the same object that was passed into TaoSetInitialVector()
1188: @*/
1189: PetscErrorCode TaoGetSolutionVector(Tao tao, Vec *X)
1190: {
1193: *X = tao->solution;
1194: return(0);
1195: }
1199: /*@
1200: TaoGetGradientVector - Returns the vector with the current TAO gradient
1202: Not Collective
1204: Input Parameter:
1205: . tao - the Tao context
1207: Output Parameter:
1208: . G - the current solution
1210: Level: intermediate
1211: @*/
1212: PetscErrorCode TaoGetGradientVector(Tao tao, Vec *G)
1213: {
1216: *G = tao->gradient;
1217: return(0);
1218: }
1222: /*@
1223: TaoResetStatistics - Initialize the statistics used by TAO for all of the solvers.
1224: These statistics include the iteration number, residual norms, and convergence status.
1225: This routine gets called before solving each optimization problem.
1227: Collective on Tao
1229: Input Parameters:
1230: . solver - the Tao context
1232: Level: developer
1234: .seealso: TaoCreate(), TaoSolve()
1235: @*/
1236: PetscErrorCode TaoResetStatistics(Tao tao)
1237: {
1240: tao->niter = 0;
1241: tao->nfuncs = 0;
1242: tao->nfuncgrads = 0;
1243: tao->ngrads = 0;
1244: tao->nhess = 0;
1245: tao->njac = 0;
1246: tao->nconstraints = 0;
1247: tao->ksp_its = 0;
1248: tao->reason = TAO_CONTINUE_ITERATING;
1249: tao->residual = 0.0;
1250: tao->cnorm = 0.0;
1251: tao->step = 0.0;
1252: tao->lsflag = PETSC_FALSE;
1253: if (tao->hist_reset) tao->hist_len=0;
1254: return(0);
1255: }
1259: /*@C
1260: TaoSetConvergenceTest - Sets the function that is to be used to test
1261: for convergence o fthe iterative minimization solution. The new convergence
1262: testing routine will replace TAO's default convergence test.
1264: Logically Collective on Tao
1266: Input Parameters:
1267: + tao - the Tao object
1268: . conv - the routine to test for convergence
1269: - ctx - [optional] context for private data for the convergence routine
1270: (may be NULL)
1272: Calling sequence of conv:
1273: $ PetscErrorCode conv(Tao tao, void *ctx)
1275: + tao - the Tao object
1276: - ctx - [optional] convergence context
1278: Note: The new convergence testing routine should call TaoSetConvergedReason().
1280: Level: advanced
1282: .seealso: TaoSetConvergedReason(), TaoGetSolutionStatus(), TaoGetTolerances(), TaoSetMonitor
1284: @*/
1285: PetscErrorCode TaoSetConvergenceTest(Tao tao, PetscErrorCode (*conv)(Tao,void*), void *ctx)
1286: {
1289: (tao)->ops->convergencetest = conv;
1290: (tao)->cnvP = ctx;
1291: return(0);
1292: }
1296: /*@C
1297: TaoSetMonitor - Sets an ADDITIONAL function that is to be used at every
1298: iteration of the solver to display the iteration's
1299: progress.
1301: Logically Collective on Tao
1303: Input Parameters:
1304: + tao - the Tao solver context
1305: . mymonitor - monitoring routine
1306: - mctx - [optional] user-defined context for private data for the
1307: monitor routine (may be NULL)
1309: Calling sequence of mymonitor:
1310: $ int mymonitor(Tao tao,void *mctx)
1312: + tao - the Tao solver context
1313: - mctx - [optional] monitoring context
1316: Options Database Keys:
1317: + -tao_monitor - sets TaoDefaultMonitor()
1318: . -tao_smonitor - sets short monitor
1319: . -tao_cmonitor - same as smonitor plus constraint norm
1320: . -tao_view_solution - view solution at each iteration
1321: . -tao_view_gradient - view gradient at each iteration
1322: . -tao_view_separableobjective - view separable objective function at each iteration
1323: - -tao_cancelmonitors - cancels all monitors that have been hardwired into a code by calls to TaoSetMonitor(), but does not cancel those set via the options database.
1326: Notes:
1327: Several different monitoring routines may be set by calling
1328: TaoSetMonitor() multiple times; all will be called in the
1329: order in which they were set.
1331: Fortran Notes: Only one monitor function may be set
1333: Level: intermediate
1335: .seealso: TaoDefaultMonitor(), TaoCancelMonitors(), TaoSetDestroyRoutine()
1336: @*/
1337: PetscErrorCode TaoSetMonitor(Tao tao, PetscErrorCode (*func)(Tao, void*), void *ctx,PetscErrorCode (*dest)(void**))
1338: {
1341: if (tao->numbermonitors >= MAXTAOMONITORS) SETERRQ1(PETSC_COMM_SELF,1,"Cannot attach another monitor -- max=",MAXTAOMONITORS);
1342: tao->monitor[tao->numbermonitors] = func;
1343: tao->monitorcontext[tao->numbermonitors] = ctx;
1344: tao->monitordestroy[tao->numbermonitors] = dest;
1345: ++tao->numbermonitors;
1346: return(0);
1347: }
1351: /*@
1352: TaoCancelMonitors - Clears all the monitor functions for a Tao object.
1354: Logically Collective on Tao
1356: Input Parameters:
1357: . tao - the Tao solver context
1359: Options Database:
1360: . -tao_cancelmonitors - cancels all monitors that have been hardwired
1361: into a code by calls to TaoSetMonitor(), but does not cancel those
1362: set via the options database
1364: Notes:
1365: There is no way to clear one specific monitor from a Tao object.
1367: Level: advanced
1369: .seealso: TaoDefaultMonitor(), TaoSetMonitor()
1370: @*/
1371: PetscErrorCode TaoCancelMonitors(Tao tao)
1372: {
1373: PetscInt i;
1378: for (i=0;i<tao->numbermonitors;i++) {
1379: if (tao->monitordestroy[i]) {
1380: (*tao->monitordestroy[i])(&tao->monitorcontext[i]);
1381: }
1382: }
1383: tao->numbermonitors=0;
1384: return(0);
1385: }
1389: /*@C
1390: TaoDefaultMonitor - Default routine for monitoring progress of the
1391: Tao solvers (default). This monitor prints the function value and gradient
1392: norm at each iteration. It can be turned on from the command line using the
1393: -tao_monitor option
1395: Collective on Tao
1397: Input Parameters:
1398: + tao - the Tao context
1399: - ctx - PetscViewer context or NULL
1401: Options Database Keys:
1402: . -tao_monitor
1404: Level: advanced
1406: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1407: @*/
1408: PetscErrorCode TaoDefaultMonitor(Tao tao, void *ctx)
1409: {
1411: PetscInt its;
1412: PetscReal fct,gnorm;
1413: PetscViewer viewer;
1416: if (ctx) {
1417: viewer = (PetscViewer)ctx;
1418: } else {
1419: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1420: }
1421: its=tao->niter;
1422: fct=tao->fc;
1423: gnorm=tao->residual;
1424: ierr=PetscViewerASCIIPrintf(viewer,"iter = %3D,",its);
1425: ierr=PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1426: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1427: return(0);
1428: }
1432: /*@C
1433: TaoDefaultSMonitor - Default routine for monitoring progress of the
1434: solver. Same as TaoDefaultMonitor() except
1435: it prints fewer digits of the residual as the residual gets smaller.
1436: This is because the later digits are meaningless and are often
1437: different on different machines; by using this routine different
1438: machines will usually generate the same output. It can be turned on
1439: by using the -tao_smonitor option
1441: Collective on Tao
1443: Input Parameters:
1444: + tao - the Tao context
1445: - ctx - PetscViewer context or NULL
1447: Options Database Keys:
1448: . -tao_smonitor
1450: Level: advanced
1452: .seealso: TaoDefaultMonitor(), TaoSetMonitor()
1453: @*/
1454: PetscErrorCode TaoDefaultSMonitor(Tao tao, void *ctx)
1455: {
1457: PetscInt its;
1458: PetscReal fct,gnorm;
1459: PetscViewer viewer;
1462: if (ctx) {
1463: viewer = (PetscViewer)ctx;
1464: } else {
1465: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1466: }
1467: its=tao->niter;
1468: fct=tao->fc;
1469: gnorm=tao->residual;
1470: ierr=PetscViewerASCIIPrintf(viewer,"iter = %3D,",its);
1471: ierr=PetscViewerASCIIPrintf(viewer," Function value %g,",(double)fct);
1472: if (gnorm > 1.e-6) {
1473: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1474: } else if (gnorm > 1.e-11) {
1475: ierr=PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-6 \n");
1476: } else {
1477: ierr=PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-11 \n");
1478: }
1479: return(0);
1480: }
1484: /*@C
1485: TaoDefaultCMonitor - same as TaoDefaultMonitor() except
1486: it prints the norm of the constraints function. It can be turned on
1487: from the command line using the -tao_cmonitor option
1489: Collective on Tao
1491: Input Parameters:
1492: + tao - the Tao context
1493: - ctx - PetscViewer context or NULL
1495: Options Database Keys:
1496: . -tao_cmonitor
1498: Level: advanced
1500: .seealso: TaoDefaultMonitor(), TaoSetMonitor()
1501: @*/
1502: PetscErrorCode TaoDefaultCMonitor(Tao tao, void *ctx)
1503: {
1505: PetscInt its;
1506: PetscReal fct,gnorm;
1507: PetscViewer viewer;
1510: if (ctx) {
1511: viewer = (PetscViewer)ctx;
1512: } else {
1513: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1514: }
1515: its=tao->niter;
1516: fct=tao->fc;
1517: gnorm=tao->residual;
1518: ierr=PetscViewerASCIIPrintf(viewer,"iter = %D,",its);
1519: ierr=PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1520: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g ",(double)gnorm);
1521: PetscViewerASCIIPrintf(viewer," Constraint: %g \n",(double)tao->cnorm);
1522: return(0);
1523: }
1527: /*@C
1528: TaoSolutionMonitor - Views the solution at each iteration
1529: It can be turned on from the command line using the
1530: -tao_view_solution option
1532: Collective on Tao
1534: Input Parameters:
1535: + tao - the Tao context
1536: - ctx - PetscViewer context or NULL
1538: Options Database Keys:
1539: . -tao_view_solution
1541: Level: advanced
1543: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1544: @*/
1545: PetscErrorCode TaoSolutionMonitor(Tao tao, void *ctx)
1546: {
1548: PetscViewer viewer;
1551: if (ctx) {
1552: viewer = (PetscViewer)ctx;
1553: } else {
1554: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1555: }
1556: VecView(tao->solution, viewer);
1557: return(0);
1558: }
1562: /*@C
1563: TaoGradientMonitor - Views the gradient at each iteration
1564: It can be turned on from the command line using the
1565: -tao_view_gradient option
1567: Collective on Tao
1569: Input Parameters:
1570: + tao - the Tao context
1571: - ctx - PetscViewer context or NULL
1573: Options Database Keys:
1574: . -tao_view_gradient
1576: Level: advanced
1578: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1579: @*/
1580: PetscErrorCode TaoGradientMonitor(Tao tao, void *ctx)
1581: {
1583: PetscViewer viewer;
1586: if (ctx) {
1587: viewer = (PetscViewer)ctx;
1588: } else {
1589: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1590: }
1591: VecView(tao->gradient, viewer);
1592: return(0);
1593: }
1597: /*@C
1598: TaoStepDirectionMonitor - Views the gradient at each iteration
1599: It can be turned on from the command line using the
1600: -tao_view_gradient option
1602: Collective on Tao
1604: Input Parameters:
1605: + tao - the Tao context
1606: - ctx - PetscViewer context or NULL
1608: Options Database Keys:
1609: . -tao_view_gradient
1611: Level: advanced
1613: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1614: @*/
1615: PetscErrorCode TaoStepDirectionMonitor(Tao tao, void *ctx)
1616: {
1618: PetscViewer viewer;
1620: if (ctx) {
1621: viewer = (PetscViewer)ctx;
1622: } else {
1623: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1624: }
1625: VecView(tao->stepdirection, viewer);
1626: return(0);
1627: }
1631: /*@C
1632: TaoDrawSolutionMonitor - Plots the solution at each iteration
1633: It can be turned on from the command line using the
1634: -tao_draw_solution option
1636: Collective on Tao
1638: Input Parameters:
1639: + tao - the Tao context
1640: - ctx - PetscViewer context or NULL
1642: Options Database Keys:
1643: . -tao_draw_solution
1645: Level: advanced
1647: .seealso: TaoSolutionMonitor(), TaoSetMonitor(), TaoDrawGradientMonitor
1648: @*/
1649: PetscErrorCode TaoDrawSolutionMonitor(Tao tao, void *ctx)
1650: {
1652: PetscViewer viewer = (PetscViewer) ctx;
1653: MPI_Comm comm;
1656: if (!viewer) {
1657: PetscObjectGetComm((PetscObject)tao,&comm);
1658: viewer = PETSC_VIEWER_DRAW_(comm);
1659: }
1660: VecView(tao->solution, viewer);
1661: return(0);
1662: }
1666: /*@C
1667: TaoDrawGradientMonitor - Plots the gradient at each iteration
1668: It can be turned on from the command line using the
1669: -tao_draw_gradient option
1671: Collective on Tao
1673: Input Parameters:
1674: + tao - the Tao context
1675: - ctx - PetscViewer context or NULL
1677: Options Database Keys:
1678: . -tao_draw_gradient
1680: Level: advanced
1682: .seealso: TaoGradientMonitor(), TaoSetMonitor(), TaoDrawSolutionMonitor
1683: @*/
1684: PetscErrorCode TaoDrawGradientMonitor(Tao tao, void *ctx)
1685: {
1687: PetscViewer viewer = (PetscViewer)ctx;
1688: MPI_Comm comm;
1691: if (!viewer) {
1692: PetscObjectGetComm((PetscObject)tao,&comm);
1693: viewer = PETSC_VIEWER_DRAW_(comm);
1694: }
1695: VecView(tao->gradient, viewer);
1696: return(0);
1697: }
1701: /*@C
1702: TaoDrawStepMonitor - Plots the step direction at each iteration
1703: It can be turned on from the command line using the
1704: -tao_draw_step option
1706: Collective on Tao
1708: Input Parameters:
1709: + tao - the Tao context
1710: - ctx - PetscViewer context or NULL
1712: Options Database Keys:
1713: . -tao_draw_step
1715: Level: advanced
1717: .seealso: TaoSetMonitor(), TaoDrawSolutionMonitor
1718: @*/
1719: PetscErrorCode TaoDrawStepMonitor(Tao tao, void *ctx)
1720: {
1722: PetscViewer viewer = (PetscViewer)(ctx);
1723: MPI_Comm comm;
1726: if (!viewer) {
1727: PetscObjectGetComm((PetscObject)tao,&comm);
1728: viewer = PETSC_VIEWER_DRAW_(comm);
1729: }
1730: VecView(tao->stepdirection, viewer);
1731: return(0);
1732: }
1736: /*@C
1737: TaoSeparableObjectiveMonitor - Views the separable objective function at each iteration
1738: It can be turned on from the command line using the
1739: -tao_view_separableobjective option
1741: Collective on Tao
1743: Input Parameters:
1744: + tao - the Tao context
1745: - ctx - PetscViewer context or NULL
1747: Options Database Keys:
1748: . -tao_view_separableobjective
1750: Level: advanced
1752: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1753: @*/
1754: PetscErrorCode TaoSeparableObjectiveMonitor(Tao tao, void *ctx)
1755: {
1757: PetscViewer viewer;
1760: if (ctx) {
1761: viewer = (PetscViewer)ctx;
1762: } else {
1763: viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
1764: }
1765: VecView(tao->sep_objective,viewer);
1766: return(0);
1767: }
1771: /*@C
1772: TaoDefaultConvergenceTest - Determines whether the solver should continue iterating
1773: or terminate.
1775: Collective on Tao
1777: Input Parameters:
1778: + tao - the Tao context
1779: - dummy - unused dummy context
1781: Output Parameter:
1782: . reason - for terminating
1784: Notes:
1785: This routine checks the residual in the optimality conditions, the
1786: relative residual in the optimity conditions, the number of function
1787: evaluations, and the function value to test convergence. Some
1788: solvers may use different convergence routines.
1790: Level: developer
1792: .seealso: TaoSetTolerances(),TaoGetConvergedReason(),TaoSetConvergedReason()
1793: @*/
1795: PetscErrorCode TaoDefaultConvergenceTest(Tao tao,void *dummy)
1796: {
1797: PetscInt niter=tao->niter, nfuncs=PetscMax(tao->nfuncs,tao->nfuncgrads);
1798: PetscInt max_funcs=tao->max_funcs;
1799: PetscReal gnorm=tao->residual, gnorm0=tao->gnorm0;
1800: PetscReal f=tao->fc, steptol=tao->steptol,trradius=tao->step;
1801: PetscReal gatol=tao->gatol,grtol=tao->grtol,gttol=tao->gttol;
1802: PetscReal fatol=tao->fatol,frtol=tao->frtol,catol=tao->catol,crtol=tao->crtol;
1803: PetscReal fmin=tao->fmin, cnorm=tao->cnorm, cnorm0=tao->cnorm0;
1804: PetscReal gnorm2;
1805: TaoConvergedReason reason=tao->reason;
1806: PetscErrorCode ierr;
1810: if (reason != TAO_CONTINUE_ITERATING) {
1811: return(0);
1812: }
1813: gnorm2=gnorm*gnorm;
1815: if (PetscIsInfOrNanReal(f)) {
1816: PetscInfo(tao,"Failed to converged, function value is Inf or NaN\n");
1817: reason = TAO_DIVERGED_NAN;
1818: } else if (f <= fmin && cnorm <=catol) {
1819: PetscInfo2(tao,"Converged due to function value %g < minimum function value %g\n", (double)f,(double)fmin);
1820: reason = TAO_CONVERGED_MINF;
1821: } else if (gnorm2 <= fatol && cnorm <=catol) {
1822: PetscInfo2(tao,"Converged due to estimated f(X) - f(X*) = %g < %g\n",(double)gnorm2,(double)fatol);
1823: reason = TAO_CONVERGED_FATOL;
1824: } else if (f != 0 && gnorm2 / PetscAbsReal(f)<= frtol && cnorm/PetscMax(cnorm0,1.0) <= crtol) {
1825: PetscInfo2(tao,"Converged due to estimated |f(X)-f(X*)|/f(X) = %g < %g\n",(double)(gnorm2/PetscAbsReal(f)),(double)frtol);
1826: reason = TAO_CONVERGED_FRTOL;
1827: } else if (gnorm<= gatol && cnorm <=catol) {
1828: PetscInfo2(tao,"Converged due to residual norm ||g(X)||=%g < %g\n",(double)gnorm,(double)gatol);
1829: reason = TAO_CONVERGED_GATOL;
1830: } else if ( f!=0 && PetscAbsReal(gnorm/f) <= grtol && cnorm <= crtol) {
1831: PetscInfo2(tao,"Converged due to residual ||g(X)||/|f(X)| =%g < %g\n",(double)(gnorm/f),(double)grtol);
1832: reason = TAO_CONVERGED_GRTOL;
1833: } else if (gnorm0 != 0 && gnorm/gnorm0 <= gttol && cnorm <= crtol) {
1834: PetscInfo2(tao,"Converged due to relative residual norm ||g(X)||/||g(X0)|| = %g < %g\n",(double)(gnorm/gnorm0),(double)gttol);
1835: reason = TAO_CONVERGED_GTTOL;
1836: } else if (nfuncs > max_funcs){
1837: PetscInfo2(tao,"Exceeded maximum number of function evaluations: %D > %D\n", nfuncs,max_funcs);
1838: reason = TAO_DIVERGED_MAXFCN;
1839: } else if ( tao->lsflag != 0 ){
1840: PetscInfo(tao,"Tao Line Search failure.\n");
1841: reason = TAO_DIVERGED_LS_FAILURE;
1842: } else if (trradius < steptol && niter > 0){
1843: PetscInfo2(tao,"Trust region/step size too small: %g < %g\n", (double)trradius,(double)steptol);
1844: reason = TAO_CONVERGED_STEPTOL;
1845: } else if (niter > tao->max_it) {
1846: PetscInfo2(tao,"Exceeded maximum number of iterations: %D > %D\n",niter,tao->max_it);
1847: reason = TAO_DIVERGED_MAXITS;
1848: } else {
1849: reason = TAO_CONTINUE_ITERATING;
1850: }
1851: tao->reason = reason;
1852: return(0);
1853: }
1857: /*@C
1858: TaoSetOptionsPrefix - Sets the prefix used for searching for all
1859: TAO options in the database.
1862: Logically Collective on Tao
1864: Input Parameters:
1865: + tao - the Tao context
1866: - prefix - the prefix string to prepend to all TAO option requests
1868: Notes:
1869: A hyphen (-) must NOT be given at the beginning of the prefix name.
1870: The first character of all runtime options is AUTOMATICALLY the hyphen.
1872: For example, to distinguish between the runtime options for two
1873: different TAO solvers, one could call
1874: .vb
1875: TaoSetOptionsPrefix(tao1,"sys1_")
1876: TaoSetOptionsPrefix(tao2,"sys2_")
1877: .ve
1879: This would enable use of different options for each system, such as
1880: .vb
1881: -sys1_tao_method blmvm -sys1_tao_gtol 1.e-3
1882: -sys2_tao_method lmvm -sys2_tao_gtol 1.e-4
1883: .ve
1886: Level: advanced
1888: .seealso: TaoAppendOptionsPrefix(), TaoGetOptionsPrefix()
1889: @*/
1891: PetscErrorCode TaoSetOptionsPrefix(Tao tao, const char p[])
1892: {
1896: PetscObjectSetOptionsPrefix((PetscObject)tao,p);
1897: if (tao->linesearch) {
1898: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
1899: }
1900: if (tao->ksp) {
1901: KSPSetOptionsPrefix(tao->ksp,p);
1902: }
1903: return(0);
1904: }
1908: /*@C
1909: TaoAppendOptionsPrefix - Appends to the prefix used for searching for all
1910: TAO options in the database.
1913: Logically Collective on Tao
1915: Input Parameters:
1916: + tao - the Tao solver context
1917: - prefix - the prefix string to prepend to all TAO option requests
1919: Notes:
1920: A hyphen (-) must NOT be given at the beginning of the prefix name.
1921: The first character of all runtime options is AUTOMATICALLY the hyphen.
1924: Level: advanced
1926: .seealso: TaoSetOptionsPrefix(), TaoGetOptionsPrefix()
1927: @*/
1928: PetscErrorCode TaoAppendOptionsPrefix(Tao tao, const char p[])
1929: {
1933: PetscObjectAppendOptionsPrefix((PetscObject)tao,p);
1934: if (tao->linesearch) {
1935: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
1936: }
1937: if (tao->ksp) {
1938: KSPSetOptionsPrefix(tao->ksp,p);
1939: }
1940: return(0);
1941: }
1945: /*@C
1946: TaoGetOptionsPrefix - Gets the prefix used for searching for all
1947: TAO options in the database
1949: Not Collective
1951: Input Parameters:
1952: . tao - the Tao context
1954: Output Parameters:
1955: . prefix - pointer to the prefix string used is returned
1957: Notes: On the fortran side, the user should pass in a string 'prefix' of
1958: sufficient length to hold the prefix.
1960: Level: advanced
1962: .seealso: TaoSetOptionsPrefix(), TaoAppendOptionsPrefix()
1963: @*/
1964: PetscErrorCode TaoGetOptionsPrefix(Tao tao, const char *p[])
1965: {
1966: return PetscObjectGetOptionsPrefix((PetscObject)tao,p);
1967: }
1971: /*@C
1972: TaoSetType - Sets the method for the unconstrained minimization solver.
1974: Collective on Tao
1976: Input Parameters:
1977: + solver - the Tao solver context
1978: - type - a known method
1980: Options Database Key:
1981: . -tao_type <type> - Sets the method; use -help for a list
1982: of available methods (for instance, "-tao_type lmvm" or "-tao_type tron")
1984: Available methods include:
1985: + nls - Newton's method with line search for unconstrained minimization
1986: . ntr - Newton's method with trust region for unconstrained minimization
1987: . ntl - Newton's method with trust region, line search for unconstrained minimization
1988: . lmvm - Limited memory variable metric method for unconstrained minimization
1989: . cg - Nonlinear conjugate gradient method for unconstrained minimization
1990: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
1991: . tron - Newton Trust Region method for bound constrained minimization
1992: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
1993: . blmvm - Limited memory variable metric method for bound constrained minimization
1994: - pounders - Model-based algorithm pounder extended for nonlinear least squares
1996: Level: intermediate
1998: .seealso: TaoCreate(), TaoGetType(), TaoType
2000: @*/
2001: PetscErrorCode TaoSetType(Tao tao, const TaoType type)
2002: {
2004: PetscErrorCode (*create_xxx)(Tao);
2005: PetscBool issame;
2010: PetscObjectTypeCompare((PetscObject)tao,type,&issame);
2011: if (issame) return(0);
2013: PetscFunctionListFind(TaoList, type, (void(**)(void))&create_xxx);
2014: if (!create_xxx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unable to find requested Tao type %s",type);
2016: /* Destroy the existing solver information */
2017: if (tao->ops->destroy) {
2018: (*tao->ops->destroy)(tao);
2019: }
2020: KSPDestroy(&tao->ksp);
2021: TaoLineSearchDestroy(&tao->linesearch);
2022: VecDestroy(&tao->gradient);
2023: VecDestroy(&tao->stepdirection);
2025: tao->ops->setup = 0;
2026: tao->ops->solve = 0;
2027: tao->ops->view = 0;
2028: tao->ops->setfromoptions = 0;
2029: tao->ops->destroy = 0;
2031: tao->setupcalled = PETSC_FALSE;
2033: (*create_xxx)(tao);
2034: PetscObjectChangeTypeName((PetscObject)tao,type);
2035: return(0);
2036: }
2040: /*MC
2041: TaoRegister - Adds a method to the TAO package for unconstrained minimization.
2043: Synopsis:
2044: TaoRegister(char *name_solver,char *path,char *name_Create,int (*routine_Create)(Tao))
2046: Not collective
2048: Input Parameters:
2049: + sname - name of a new user-defined solver
2050: - func - routine to Create method context
2052: Notes:
2053: TaoRegister() may be called multiple times to add several user-defined solvers.
2055: Sample usage:
2056: .vb
2057: TaoRegister("my_solver",MySolverCreate);
2058: .ve
2060: Then, your solver can be chosen with the procedural interface via
2061: $ TaoSetType(tao,"my_solver")
2062: or at runtime via the option
2063: $ -tao_type my_solver
2065: Level: advanced
2067: .seealso: TaoRegisterAll(), TaoRegisterDestroy()
2068: M*/
2069: PetscErrorCode TaoRegister(const char sname[], PetscErrorCode (*func)(Tao))
2070: {
2074: PetscFunctionListAdd(&TaoList,sname, (void (*)(void))func);
2075: return(0);
2076: }
2080: /*@C
2081: TaoRegisterDestroy - Frees the list of minimization solvers that were
2082: registered by TaoRegisterDynamic().
2084: Not Collective
2086: Level: advanced
2088: .seealso: TaoRegisterAll(), TaoRegister()
2089: @*/
2090: PetscErrorCode TaoRegisterDestroy(void)
2091: {
2094: PetscFunctionListDestroy(&TaoList);
2095: TaoRegisterAllCalled = PETSC_FALSE;
2096: return(0);
2097: }
2101: /*@
2102: TaoSetConvergedReason - Sets the termination flag on a Tao object
2104: Logically Collective on Tao
2106: Input Parameters:
2107: + tao - the Tao context
2108: - reason - one of
2109: $ TAO_CONVERGED_ATOL (2),
2110: $ TAO_CONVERGED_RTOL (3),
2111: $ TAO_CONVERGED_STEPTOL (4),
2112: $ TAO_CONVERGED_MINF (5),
2113: $ TAO_CONVERGED_USER (6),
2114: $ TAO_DIVERGED_MAXITS (-2),
2115: $ TAO_DIVERGED_NAN (-4),
2116: $ TAO_DIVERGED_MAXFCN (-5),
2117: $ TAO_DIVERGED_LS_FAILURE (-6),
2118: $ TAO_DIVERGED_TR_REDUCTION (-7),
2119: $ TAO_DIVERGED_USER (-8),
2120: $ TAO_CONTINUE_ITERATING (0)
2122: Level: intermediate
2124: @*/
2125: PetscErrorCode TaoSetConvergedReason(Tao tao, TaoConvergedReason reason)
2126: {
2129: tao->reason = reason;
2130: return(0);
2131: }
2135: /*@
2136: TaoGetConvergedReason - Gets the reason the Tao iteration was stopped.
2138: Not Collective
2140: Input Parameter:
2141: . tao - the Tao solver context
2143: Output Parameter:
2144: . reason - one of
2145: $ TAO_CONVERGED_FATOL (1) f(X)-f(X*) <= fatol
2146: $ TAO_CONVERGED_FRTOL (2) |f(X) - f(X*)|/|f(X)| < frtol
2147: $ TAO_CONVERGED_GATOL (3) ||g(X)|| < gatol
2148: $ TAO_CONVERGED_GRTOL (4) ||g(X)|| / f(X) < grtol
2149: $ TAO_CONVERGED_GTTOL (5) ||g(X)|| / ||g(X0)|| < gttol
2150: $ TAO_CONVERGED_STEPTOL (6) step size small
2151: $ TAO_CONVERGED_MINF (7) F < F_min
2152: $ TAO_CONVERGED_USER (8) User defined
2153: $ TAO_DIVERGED_MAXITS (-2) its > maxits
2154: $ TAO_DIVERGED_NAN (-4) Numerical problems
2155: $ TAO_DIVERGED_MAXFCN (-5) fevals > max_funcsals
2156: $ TAO_DIVERGED_LS_FAILURE (-6) line search failure
2157: $ TAO_DIVERGED_TR_REDUCTION (-7) trust region failure
2158: $ TAO_DIVERGED_USER(-8) (user defined)
2159: $ TAO_CONTINUE_ITERATING (0)
2161: where
2162: + X - current solution
2163: . X0 - initial guess
2164: . f(X) - current function value
2165: . f(X*) - true solution (estimated)
2166: . g(X) - current gradient
2167: . its - current iterate number
2168: . maxits - maximum number of iterates
2169: . fevals - number of function evaluations
2170: - max_funcsals - maximum number of function evaluations
2172: Level: intermediate
2174: .seealso: TaoSetConvergenceTest(), TaoSetTolerances()
2176: @*/
2177: PetscErrorCode TaoGetConvergedReason(Tao tao, TaoConvergedReason *reason)
2178: {
2182: *reason = tao->reason;
2183: return(0);
2184: }
2188: /*@
2189: TaoGetSolutionStatus - Get the current iterate, objective value,
2190: residual, infeasibility, and termination
2192: Not Collective
2194: Input Parameters:
2195: . tao - the Tao context
2197: Output Parameters:
2198: + iterate - the current iterate number (>=0)
2199: . f - the current function value
2200: . gnorm - the square of the gradient norm, duality gap, or other measure indicating distance from optimality.
2201: . cnorm - the infeasibility of the current solution with regard to the constraints.
2202: . xdiff - the step length or trust region radius of the most recent iterate.
2203: - reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2205: Level: intermediate
2207: Note:
2208: TAO returns the values set by the solvers in the routine TaoMonitor().
2210: Note:
2211: If any of the output arguments are set to NULL, no corresponding value will be returned.
2213: .seealso: TaoMonitor(), TaoGetConvergedReason()
2214: @*/
2215: PetscErrorCode TaoGetSolutionStatus(Tao tao, PetscInt *its, PetscReal *f, PetscReal *gnorm, PetscReal *cnorm, PetscReal *xdiff, TaoConvergedReason *reason)
2216: {
2218: if (its) *its=tao->niter;
2219: if (f) *f=tao->fc;
2220: if (gnorm) *gnorm=tao->residual;
2221: if (cnorm) *cnorm=tao->cnorm;
2222: if (reason) *reason=tao->reason;
2223: if (xdiff) *xdiff=tao->step;
2224: return(0);
2225: }
2229: /*@C
2230: TaoGetType - Gets the current Tao algorithm.
2232: Not Collective
2234: Input Parameter:
2235: . tao - the Tao solver context
2237: Output Parameter:
2238: . type - Tao method
2240: Level: intermediate
2242: @*/
2243: PetscErrorCode TaoGetType(Tao tao, const TaoType *type)
2244: {
2248: *type=((PetscObject)tao)->type_name;
2249: return(0);
2250: }
2254: /*@C
2255: TaoMonitor - Monitor the solver and the current solution. This
2256: routine will record the iteration number and residual statistics,
2257: call any monitors specified by the user, and calls the convergence-check routine.
2259: Input Parameters:
2260: + tao - the Tao context
2261: . its - the current iterate number (>=0)
2262: . f - the current objective function value
2263: . res - the gradient norm, square root of the duality gap, or other measure indicating distince from optimality. This measure will be recorded and
2264: used for some termination tests.
2265: . cnorm - the infeasibility of the current solution with regard to the constraints.
2266: - steplength - multiple of the step direction added to the previous iterate.
2268: Output Parameters:
2269: . reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2271: Options Database Key:
2272: . -tao_monitor - Use the default monitor, which prints statistics to standard output
2274: .seealso TaoGetConvergedReason(), TaoDefaultMonitor(), TaoSetMonitor()
2276: Level: developer
2278: @*/
2279: PetscErrorCode TaoMonitor(Tao tao, PetscInt its, PetscReal f, PetscReal res, PetscReal cnorm, PetscReal steplength, TaoConvergedReason *reason)
2280: {
2282: PetscInt i;
2286: tao->fc = f;
2287: tao->residual = res;
2288: tao->cnorm = cnorm;
2289: tao->step = steplength;
2290: tao->niter=its;
2291: if (its == 0) {
2292: tao->cnorm0 = cnorm; tao->gnorm0 = res;
2293: }
2294: TaoLogHistory(tao,f,res,cnorm);
2295: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(res)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
2296: if (tao->ops->convergencetest) {
2297: (*tao->ops->convergencetest)(tao,tao->cnvP);
2298: }
2299: for (i=0;i<tao->numbermonitors;i++) {
2300: (*tao->monitor[i])(tao,tao->monitorcontext[i]);
2301: }
2302: *reason = tao->reason;
2303: return(0);
2304: }
2308: /*@
2309: TaoSetHistory - Sets the array used to hold the convergence history.
2311: Logically Collective on Tao
2313: Input Parameters:
2314: + tao - the Tao solver context
2315: . obj - array to hold objective value history
2316: . resid - array to hold residual history
2317: . cnorm - array to hold constraint violation history
2318: . na - size of obj, resid, and cnorm
2319: - reset - PetscTrue indicates each new minimization resets the history counter to zero,
2320: else it continues storing new values for new minimizations after the old ones
2322: Notes:
2323: If set, TAO will fill the given arrays with the indicated
2324: information at each iteration. If no information is desired
2325: for a given array, then NULL may be used.
2327: This routine is useful, e.g., when running a code for purposes
2328: of accurate performance monitoring, when no I/O should be done
2329: during the section of code that is being timed.
2331: Level: intermediate
2333: .seealso: TaoGetHistory()
2335: @*/
2336: PetscErrorCode TaoSetHistory(Tao tao, PetscReal *obj, PetscReal *resid, PetscReal *cnorm, PetscInt na,PetscBool reset)
2337: {
2340: tao->hist_obj = obj;
2341: tao->hist_resid = resid;
2342: tao->hist_cnorm = cnorm;
2343: tao->hist_max = na;
2344: tao->hist_reset = reset;
2345: return(0);
2346: }
2350: /*@C
2351: TaoGetHistory - Gets the array used to hold the convergence history.
2353: Collective on Tao
2355: Input Parameter:
2356: . tao - the Tao context
2358: Output Parameters:
2359: + obj - array used to hold objective value history
2360: . resid - array used to hold residual history
2361: . cnorm - array used to hold constraint violation history
2362: - nhist - size of obj, resid, and cnorm (will be less than or equal to na given in TaoSetHistory)
2364: Notes:
2365: The calling sequence for this routine in Fortran is
2366: $ call TaoGetHistory(Tao tao, integer nhist, integer info)
2368: This routine is useful, e.g., when running a code for purposes
2369: of accurate performance monitoring, when no I/O should be done
2370: during the section of code that is being timed.
2372: Level: advanced
2374: .seealso: TaoSetHistory()
2376: @*/
2377: PetscErrorCode TaoGetHistory(Tao tao, PetscReal **obj, PetscReal **resid, PetscReal **cnorm, PetscInt *nhist)
2378: {
2381: if (obj) *obj = tao->hist_obj;
2382: if (cnorm) *cnorm = tao->hist_cnorm;
2383: if (resid) *resid = tao->hist_resid;
2384: if (nhist) *nhist = tao->hist_len;
2385: return(0);
2386: }
2390: /*@
2391: TaoSetApplicationContext - Sets the optional user-defined context for
2392: a solver.
2394: Logically Collective on Tao
2396: Input Parameters:
2397: + tao - the Tao context
2398: - usrP - optional user context
2400: Level: intermediate
2402: .seealso: TaoGetApplicationContext(), TaoSetApplicationContext()
2403: @*/
2404: PetscErrorCode TaoSetApplicationContext(Tao tao,void *usrP)
2405: {
2408: tao->user = usrP;
2409: return(0);
2410: }
2414: /*@
2415: TaoGetApplicationContext - Gets the user-defined context for a
2416: TAO solvers.
2418: Not Collective
2420: Input Parameter:
2421: . tao - Tao context
2423: Output Parameter:
2424: . usrP - user context
2426: Level: intermediate
2428: .seealso: TaoSetApplicationContext()
2429: @*/
2430: PetscErrorCode TaoGetApplicationContext(Tao tao,void *usrP)
2431: {
2434: *(void**)usrP = tao->user;
2435: return(0);
2436: }