Actual source code: taosolver.c
petsc-3.11.4 2019-09-28
1: #define TAO_DLL
3: #include <petsc/private/taoimpl.h>
5: PetscBool TaoRegisterAllCalled = PETSC_FALSE;
6: PetscFunctionList TaoList = NULL;
8: PetscClassId TAO_CLASSID;
10: PetscLogEvent TAO_Solve;
11: PetscLogEvent TAO_ObjectiveEval;
12: PetscLogEvent TAO_GradientEval;
13: PetscLogEvent TAO_ObjGradEval;
14: PetscLogEvent TAO_HessianEval;
15: PetscLogEvent TAO_JacobianEval;
16: PetscLogEvent TAO_ConstraintsEval;
18: const char *TaoSubSetTypes[] = {"subvec","mask","matrixfree","TaoSubSetType","TAO_SUBSET_",0};
20: struct _n_TaoMonitorDrawCtx {
21: PetscViewer viewer;
22: PetscInt howoften; /* when > 0 uses iteration % howoften, when negative only final solution plotted */
23: };
25: /*@
26: TaoCreate - Creates a TAO solver
28: Collective on MPI_Comm
30: Input Parameter:
31: . comm - MPI communicator
33: Output Parameter:
34: . newtao - the new Tao context
36: Available methods include:
37: + nls - Newton's method with line search for unconstrained minimization
38: . ntr - Newton's method with trust region for unconstrained minimization
39: . ntl - Newton's method with trust region, line search for unconstrained minimization
40: . lmvm - Limited memory variable metric method for unconstrained minimization
41: . cg - Nonlinear conjugate gradient method for unconstrained minimization
42: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
43: . tron - Newton Trust Region method for bound constrained minimization
44: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
45: . blmvm - Limited memory variable metric method for bound constrained minimization
46: . lcl - Linearly constrained Lagrangian method for pde-constrained minimization
47: - pounders - Model-based algorithm for nonlinear least squares
49: Options Database Keys:
50: . -tao_type - select which method TAO should use
52: Level: beginner
54: .seealso: TaoSolve(), TaoDestroy()
55: @*/
56: PetscErrorCode TaoCreate(MPI_Comm comm, Tao *newtao)
57: {
59: Tao tao;
63: *newtao = NULL;
65: TaoInitializePackage();
66: TaoLineSearchInitializePackage();
68: PetscHeaderCreate(tao,TAO_CLASSID,"Tao","Optimization solver","Tao",comm,TaoDestroy,TaoView);
69: tao->ops->computeobjective=0;
70: tao->ops->computeobjectiveandgradient=0;
71: tao->ops->computegradient=0;
72: tao->ops->computehessian=0;
73: tao->ops->computeresidual=0;
74: tao->ops->computeresidualjacobian=0;
75: tao->ops->computeconstraints=0;
76: tao->ops->computejacobian=0;
77: tao->ops->computejacobianequality=0;
78: tao->ops->computejacobianinequality=0;
79: tao->ops->computeequalityconstraints=0;
80: tao->ops->computeinequalityconstraints=0;
81: tao->ops->convergencetest=TaoDefaultConvergenceTest;
82: tao->ops->convergencedestroy=0;
83: tao->ops->computedual=0;
84: tao->ops->setup=0;
85: tao->ops->solve=0;
86: tao->ops->view=0;
87: tao->ops->setfromoptions=0;
88: tao->ops->destroy=0;
90: tao->solution=NULL;
91: tao->gradient=NULL;
92: tao->ls_res = NULL;
93: tao->ls_jac = NULL;
94: tao->constraints=NULL;
95: tao->constraints_equality=NULL;
96: tao->constraints_inequality=NULL;
97: tao->res_weights_v=NULL;
98: tao->res_weights_w=NULL;
99: tao->stepdirection=NULL;
100: tao->niter=0;
101: tao->ntotalits=0;
102: tao->XL = NULL;
103: tao->XU = NULL;
104: tao->IL = NULL;
105: tao->IU = NULL;
106: tao->DI = NULL;
107: tao->DE = NULL;
108: tao->gradient_norm = NULL;
109: tao->gradient_norm_tmp = NULL;
110: tao->hessian = NULL;
111: tao->hessian_pre = NULL;
112: tao->jacobian = NULL;
113: tao->jacobian_pre = NULL;
114: tao->jacobian_state = NULL;
115: tao->jacobian_state_pre = NULL;
116: tao->jacobian_state_inv = NULL;
117: tao->jacobian_design = NULL;
118: tao->jacobian_design_pre = NULL;
119: tao->jacobian_equality = NULL;
120: tao->jacobian_equality_pre = NULL;
121: tao->jacobian_inequality = NULL;
122: tao->jacobian_inequality_pre = NULL;
123: tao->state_is = NULL;
124: tao->design_is = NULL;
126: tao->max_it = 10000;
127: tao->max_funcs = 10000;
128: #if defined(PETSC_USE_REAL_SINGLE)
129: tao->gatol = 1e-5;
130: tao->grtol = 1e-5;
131: #else
132: tao->gatol = 1e-8;
133: tao->grtol = 1e-8;
134: #endif
135: tao->crtol = 0.0;
136: tao->catol = 0.0;
137: tao->gttol = 0.0;
138: tao->steptol = 0.0;
139: tao->trust0 = PETSC_INFINITY;
140: tao->fmin = PETSC_NINFINITY;
141: tao->hist_malloc = PETSC_FALSE;
142: tao->hist_reset = PETSC_TRUE;
143: tao->hist_max = 0;
144: tao->hist_len = 0;
145: tao->hist_obj = NULL;
146: tao->hist_resid = NULL;
147: tao->hist_cnorm = NULL;
148: tao->hist_lits = NULL;
150: tao->numbermonitors=0;
151: tao->viewsolution=PETSC_FALSE;
152: tao->viewhessian=PETSC_FALSE;
153: tao->viewgradient=PETSC_FALSE;
154: tao->viewjacobian=PETSC_FALSE;
155: tao->viewconstraints = PETSC_FALSE;
156:
157: tao->bounded = PETSC_FALSE;
159: tao->header_printed = PETSC_FALSE;
161: /* These flags prevents algorithms from overriding user options */
162: tao->max_it_changed =PETSC_FALSE;
163: tao->max_funcs_changed=PETSC_FALSE;
164: tao->gatol_changed =PETSC_FALSE;
165: tao->grtol_changed =PETSC_FALSE;
166: tao->gttol_changed =PETSC_FALSE;
167: tao->steptol_changed =PETSC_FALSE;
168: tao->trust0_changed =PETSC_FALSE;
169: tao->fmin_changed =PETSC_FALSE;
170: tao->catol_changed =PETSC_FALSE;
171: tao->crtol_changed =PETSC_FALSE;
172: TaoResetStatistics(tao);
173: *newtao = tao;
174: return(0);
175: }
177: /*@
178: TaoSolve - Solves an optimization problem min F(x) s.t. l <= x <= u
180: Collective on Tao
182: Input Parameters:
183: . tao - the Tao context
185: Notes:
186: The user must set up the Tao with calls to TaoSetInitialVector(),
187: TaoSetObjectiveRoutine(),
188: TaoSetGradientRoutine(), and (if using 2nd order method) TaoSetHessianRoutine().
190: You should call TaoGetConvergedReason() or run with -tao_converged_reason to determine if the optimization algorithm actually succeeded or
191: why it failed.
193: Level: beginner
195: .seealso: TaoCreate(), TaoSetObjectiveRoutine(), TaoSetGradientRoutine(), TaoSetHessianRoutine(), TaoGetConvergedReason()
196: @*/
197: PetscErrorCode TaoSolve(Tao tao)
198: {
199: PetscErrorCode ierr;
200: static PetscBool set = PETSC_FALSE;
204: PetscCitationsRegister("@TechReport{tao-user-ref,\n"
205: "title = {Toolkit for Advanced Optimization (TAO) Users Manual},\n"
206: "author = {Todd Munson and Jason Sarich and Stefan Wild and Steve Benson and Lois Curfman McInnes},\n"
207: "Institution = {Argonne National Laboratory},\n"
208: "Year = 2014,\n"
209: "Number = {ANL/MCS-TM-322 - Revision 3.5},\n"
210: "url = {http://www.mcs.anl.gov/tao}\n}\n",&set);
211: tao->header_printed = PETSC_FALSE;
212: TaoSetUp(tao);
213: TaoResetStatistics(tao);
214: if (tao->linesearch) {
215: TaoLineSearchReset(tao->linesearch);
216: }
218: PetscLogEventBegin(TAO_Solve,tao,0,0,0);
219: if (tao->ops->solve){ (*tao->ops->solve)(tao); }
220: PetscLogEventEnd(TAO_Solve,tao,0,0,0);
222: VecViewFromOptions(tao->solution,(PetscObject)tao,"-tao_view_solution");
224: tao->ntotalits += tao->niter;
225: TaoViewFromOptions(tao,NULL,"-tao_view");
227: if (tao->printreason) {
228: if (tao->reason > 0) {
229: PetscPrintf(((PetscObject)tao)->comm,"TAO solve converged due to %s iterations %D\n",TaoConvergedReasons[tao->reason],tao->niter);
230: } else {
231: PetscPrintf(((PetscObject)tao)->comm,"TAO solve did not converge due to %s iteration %D\n",TaoConvergedReasons[tao->reason],tao->niter);
232: }
233: }
234: return(0);
235: }
237: /*@
238: TaoSetUp - Sets up the internal data structures for the later use
239: of a Tao solver
241: Collective on tao
243: Input Parameters:
244: . tao - the TAO context
246: Notes:
247: The user will not need to explicitly call TaoSetUp(), as it will
248: automatically be called in TaoSolve(). However, if the user
249: desires to call it explicitly, it should come after TaoCreate()
250: and any TaoSetSomething() routines, but before TaoSolve().
252: Level: advanced
254: .seealso: TaoCreate(), TaoSolve()
255: @*/
256: PetscErrorCode TaoSetUp(Tao tao)
257: {
262: if (tao->setupcalled) return(0);
264: if (!tao->solution) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must call TaoSetInitialVector");
265: if (tao->ops->setup) {
266: (*tao->ops->setup)(tao);
267: }
268: tao->setupcalled = PETSC_TRUE;
269: return(0);
270: }
272: /*@
273: TaoDestroy - Destroys the TAO context that was created with
274: TaoCreate()
276: Collective on Tao
278: Input Parameter:
279: . tao - the Tao context
281: Level: beginner
283: .seealso: TaoCreate(), TaoSolve()
284: @*/
285: PetscErrorCode TaoDestroy(Tao *tao)
286: {
290: if (!*tao) return(0);
292: if (--((PetscObject)*tao)->refct > 0) {*tao=0;return(0);}
294: if ((*tao)->ops->destroy) {
295: (*((*tao))->ops->destroy)(*tao);
296: }
297: KSPDestroy(&(*tao)->ksp);
298: TaoLineSearchDestroy(&(*tao)->linesearch);
300: if ((*tao)->ops->convergencedestroy) {
301: (*(*tao)->ops->convergencedestroy)((*tao)->cnvP);
302: if ((*tao)->jacobian_state_inv) {
303: MatDestroy(&(*tao)->jacobian_state_inv);
304: }
305: }
306: VecDestroy(&(*tao)->solution);
307: VecDestroy(&(*tao)->gradient);
308: VecDestroy(&(*tao)->ls_res);
310: if ((*tao)->gradient_norm) {
311: PetscObjectDereference((PetscObject)(*tao)->gradient_norm);
312: VecDestroy(&(*tao)->gradient_norm_tmp);
313: }
315: VecDestroy(&(*tao)->XL);
316: VecDestroy(&(*tao)->XU);
317: VecDestroy(&(*tao)->IL);
318: VecDestroy(&(*tao)->IU);
319: VecDestroy(&(*tao)->DE);
320: VecDestroy(&(*tao)->DI);
321: VecDestroy(&(*tao)->constraints_equality);
322: VecDestroy(&(*tao)->constraints_inequality);
323: VecDestroy(&(*tao)->stepdirection);
324: MatDestroy(&(*tao)->hessian_pre);
325: MatDestroy(&(*tao)->hessian);
326: MatDestroy(&(*tao)->ls_jac);
327: MatDestroy(&(*tao)->ls_jac_pre);
328: MatDestroy(&(*tao)->jacobian_pre);
329: MatDestroy(&(*tao)->jacobian);
330: MatDestroy(&(*tao)->jacobian_state_pre);
331: MatDestroy(&(*tao)->jacobian_state);
332: MatDestroy(&(*tao)->jacobian_state_inv);
333: MatDestroy(&(*tao)->jacobian_design);
334: MatDestroy(&(*tao)->jacobian_equality);
335: MatDestroy(&(*tao)->jacobian_equality_pre);
336: MatDestroy(&(*tao)->jacobian_inequality);
337: MatDestroy(&(*tao)->jacobian_inequality_pre);
338: ISDestroy(&(*tao)->state_is);
339: ISDestroy(&(*tao)->design_is);
340: VecDestroy(&(*tao)->res_weights_v);
341: TaoCancelMonitors(*tao);
342: if ((*tao)->hist_malloc) {
343: PetscFree((*tao)->hist_obj);
344: PetscFree((*tao)->hist_resid);
345: PetscFree((*tao)->hist_cnorm);
346: PetscFree((*tao)->hist_lits);
347: }
348: if ((*tao)->res_weights_n) {
349: PetscFree((*tao)->res_weights_rows);
350: PetscFree((*tao)->res_weights_cols);
351: PetscFree((*tao)->res_weights_w);
352: }
353: PetscHeaderDestroy(tao);
354: return(0);
355: }
357: /*@
358: TaoSetFromOptions - Sets various Tao parameters from user
359: options.
361: Collective on Tao
363: Input Paremeter:
364: . tao - the Tao solver context
366: options Database Keys:
367: + -tao_type <type> - The algorithm that TAO uses (lmvm, nls, etc.)
368: . -tao_gatol <gatol> - absolute error tolerance for ||gradient||
369: . -tao_grtol <grtol> - relative error tolerance for ||gradient||
370: . -tao_gttol <gttol> - reduction of ||gradient|| relative to initial gradient
371: . -tao_max_it <max> - sets maximum number of iterations
372: . -tao_max_funcs <max> - sets maximum number of function evaluations
373: . -tao_fmin <fmin> - stop if function value reaches fmin
374: . -tao_steptol <tol> - stop if trust region radius less than <tol>
375: . -tao_trust0 <t> - initial trust region radius
376: . -tao_monitor - prints function value and residual at each iteration
377: . -tao_smonitor - same as tao_monitor, but truncates very small values
378: . -tao_cmonitor - prints function value, residual, and constraint norm at each iteration
379: . -tao_view_solution - prints solution vector at each iteration
380: . -tao_view_ls_residual - prints least-squares residual vector at each iteration
381: . -tao_view_step - prints step direction vector at each iteration
382: . -tao_view_gradient - prints gradient vector at each iteration
383: . -tao_draw_solution - graphically view solution vector at each iteration
384: . -tao_draw_step - graphically view step vector at each iteration
385: . -tao_draw_gradient - graphically view gradient at each iteration
386: . -tao_fd_gradient - use gradient computed with finite differences
387: . -tao_fd_hessian - use hessian computed with finite differences
388: . -tao_mf_hessian - use matrix-free hessian computed with finite differences
389: . -tao_cancelmonitors - cancels all monitors (except those set with command line)
390: . -tao_view - prints information about the Tao after solving
391: - -tao_converged_reason - prints the reason TAO stopped iterating
393: Notes:
394: To see all options, run your program with the -help option or consult the
395: user's manual. Should be called after TaoCreate() but before TaoSolve()
397: Level: beginner
398: @*/
399: PetscErrorCode TaoSetFromOptions(Tao tao)
400: {
402: TaoType default_type = TAOLMVM;
403: char type[256], monfilename[PETSC_MAX_PATH_LEN];
404: PetscViewer monviewer;
405: PetscBool flg;
406: MPI_Comm comm;
410: PetscObjectGetComm((PetscObject)tao,&comm);
412: /* So no warnings are given about unused options */
413: PetscOptionsHasName(((PetscObject)tao)->options,((PetscObject)tao)->prefix,"-tao_ls_type",&flg);
415: PetscObjectOptionsBegin((PetscObject)tao);
416: {
417: TaoRegisterAll();
418: if (((PetscObject)tao)->type_name) {
419: default_type = ((PetscObject)tao)->type_name;
420: }
421: /* Check for type from options */
422: PetscOptionsFList("-tao_type","Tao Solver type","TaoSetType",TaoList,default_type,type,256,&flg);
423: if (flg) {
424: TaoSetType(tao,type);
425: } else if (!((PetscObject)tao)->type_name) {
426: TaoSetType(tao,default_type);
427: }
429: PetscOptionsReal("-tao_catol","Stop if constraints violations within","TaoSetConstraintTolerances",tao->catol,&tao->catol,&flg);
430: if (flg) tao->catol_changed=PETSC_TRUE;
431: PetscOptionsReal("-tao_crtol","Stop if relative contraint violations within","TaoSetConstraintTolerances",tao->crtol,&tao->crtol,&flg);
432: if (flg) tao->crtol_changed=PETSC_TRUE;
433: PetscOptionsReal("-tao_gatol","Stop if norm of gradient less than","TaoSetTolerances",tao->gatol,&tao->gatol,&flg);
434: if (flg) tao->gatol_changed=PETSC_TRUE;
435: PetscOptionsReal("-tao_grtol","Stop if norm of gradient divided by the function value is less than","TaoSetTolerances",tao->grtol,&tao->grtol,&flg);
436: if (flg) tao->grtol_changed=PETSC_TRUE;
437: PetscOptionsReal("-tao_gttol","Stop if the norm of the gradient is less than the norm of the initial gradient times tol","TaoSetTolerances",tao->gttol,&tao->gttol,&flg);
438: if (flg) tao->gttol_changed=PETSC_TRUE;
439: PetscOptionsInt("-tao_max_it","Stop if iteration number exceeds","TaoSetMaximumIterations",tao->max_it,&tao->max_it,&flg);
440: if (flg) tao->max_it_changed=PETSC_TRUE;
441: PetscOptionsInt("-tao_max_funcs","Stop if number of function evaluations exceeds","TaoSetMaximumFunctionEvaluations",tao->max_funcs,&tao->max_funcs,&flg);
442: if (flg) tao->max_funcs_changed=PETSC_TRUE;
443: PetscOptionsReal("-tao_fmin","Stop if function less than","TaoSetFunctionLowerBound",tao->fmin,&tao->fmin,&flg);
444: if (flg) tao->fmin_changed=PETSC_TRUE;
445: PetscOptionsReal("-tao_steptol","Stop if step size or trust region radius less than","",tao->steptol,&tao->steptol,&flg);
446: if (flg) tao->steptol_changed=PETSC_TRUE;
447: PetscOptionsReal("-tao_trust0","Initial trust region radius","TaoSetTrustRegionRadius",tao->trust0,&tao->trust0,&flg);
448: if (flg) tao->trust0_changed=PETSC_TRUE;
449: PetscOptionsString("-tao_view_solution","view solution vector after each evaluation","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
450: if (flg) {
451: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
452: TaoSetMonitor(tao,TaoSolutionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
453: }
455: PetscOptionsBool("-tao_converged_reason","Print reason for TAO converged","TaoSolve",tao->printreason,&tao->printreason,NULL);
456: PetscOptionsString("-tao_view_gradient","view gradient vector after each evaluation","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
457: if (flg) {
458: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
459: TaoSetMonitor(tao,TaoGradientMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
460: }
462: PetscOptionsString("-tao_view_stepdirection","view step direction vector after each iteration","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
463: if (flg) {
464: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
465: TaoSetMonitor(tao,TaoStepDirectionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
466: }
468: PetscOptionsString("-tao_view_residual","view least-squares residual vector after each evaluation","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
469: if (flg) {
470: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
471: TaoSetMonitor(tao,TaoResidualMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
472: }
474: PetscOptionsString("-tao_monitor","Use the default convergence monitor","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
475: if (flg) {
476: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
477: TaoSetMonitor(tao,TaoMonitorDefault,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
478: }
480: PetscOptionsString("-tao_gmonitor","Use the convergence monitor with extra globalization info","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
481: if (flg) {
482: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
483: TaoSetMonitor(tao,TaoDefaultGMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
484: }
486: PetscOptionsString("-tao_smonitor","Use the short convergence monitor","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
487: if (flg) {
488: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
489: TaoSetMonitor(tao,TaoDefaultSMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
490: }
492: PetscOptionsString("-tao_cmonitor","Use the default convergence monitor with constraint norm","TaoSetMonitor","stdout",monfilename,PETSC_MAX_PATH_LEN,&flg);
493: if (flg) {
494: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
495: TaoSetMonitor(tao,TaoDefaultCMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
496: }
499: flg = PETSC_FALSE;
500: PetscOptionsBool("-tao_cancelmonitors","cancel all monitors and call any registered destroy routines","TaoCancelMonitors",flg,&flg,NULL);
501: if (flg) {TaoCancelMonitors(tao);}
503: flg = PETSC_FALSE;
504: PetscOptionsBool("-tao_draw_solution","Plot solution vector at each iteration","TaoSetMonitor",flg,&flg,NULL);
505: if (flg) {
506: TaoMonitorDrawCtx drawctx;
507: PetscInt howoften = 1;
508: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao),0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&drawctx);
509: TaoSetMonitor(tao,TaoDrawSolutionMonitor,drawctx,(PetscErrorCode (*)(void**))TaoMonitorDrawCtxDestroy);
510: }
512: flg = PETSC_FALSE;
513: PetscOptionsBool("-tao_draw_step","plots step direction at each iteration","TaoSetMonitor",flg,&flg,NULL);
514: if (flg) {
515: TaoSetMonitor(tao,TaoDrawStepMonitor,NULL,NULL);
516: }
518: flg = PETSC_FALSE;
519: PetscOptionsBool("-tao_draw_gradient","plots gradient at each iteration","TaoSetMonitor",flg,&flg,NULL);
520: if (flg) {
521: TaoMonitorDrawCtx drawctx;
522: PetscInt howoften = 1;
523: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao),0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&drawctx);
524: TaoSetMonitor(tao,TaoDrawGradientMonitor,drawctx,(PetscErrorCode (*)(void**))TaoMonitorDrawCtxDestroy);
525: }
526: flg = PETSC_FALSE;
527: PetscOptionsBool("-tao_fd_gradient","compute gradient using finite differences","TaoDefaultComputeGradient",flg,&flg,NULL);
528: if (flg) {
529: TaoSetGradientRoutine(tao,TaoDefaultComputeGradient,NULL);
530: }
531: flg = PETSC_FALSE;
532: PetscOptionsBool("-tao_fd_hessian","compute hessian using finite differences","TaoDefaultComputeHessian",flg,&flg,NULL);
533: if (flg) {
534: Mat H;
536: MatCreate(PetscObjectComm((PetscObject)tao),&H);
537: MatSetType(H,MATAIJ);
538: TaoSetHessianRoutine(tao,H,H,TaoDefaultComputeHessian,NULL);
539: MatDestroy(&H);
540: }
541: flg = PETSC_FALSE;
542: PetscOptionsBool("-tao_mf_hessian","compute matrix-free hessian using finite differences","TaoDefaultComputeHessianMFFD",flg,&flg,NULL);
543: if (flg) {
544: Mat H;
546: MatCreate(PetscObjectComm((PetscObject)tao),&H);
547: TaoSetHessianRoutine(tao,H,H,TaoDefaultComputeHessianMFFD,NULL);
548: MatDestroy(&H);
549: }
550: PetscOptionsEnum("-tao_subset_type","subset type","",TaoSubSetTypes,(PetscEnum)tao->subset_type,(PetscEnum*)&tao->subset_type,NULL);
552: if (tao->ops->setfromoptions) {
553: (*tao->ops->setfromoptions)(PetscOptionsObject,tao);
554: }
555: }
556: PetscOptionsEnd();
557: return(0);
558: }
560: /*@C
561: TaoView - Prints information about the Tao
563: Collective on Tao
565: InputParameters:
566: + tao - the Tao context
567: - viewer - visualization context
569: Options Database Key:
570: . -tao_view - Calls TaoView() at the end of TaoSolve()
572: Notes:
573: The available visualization contexts include
574: + PETSC_VIEWER_STDOUT_SELF - standard output (default)
575: - PETSC_VIEWER_STDOUT_WORLD - synchronized standard
576: output where only the first processor opens
577: the file. All other processors send their
578: data to the first processor to print.
580: Level: beginner
582: .seealso: PetscViewerASCIIOpen()
583: @*/
584: PetscErrorCode TaoView(Tao tao, PetscViewer viewer)
585: {
586: PetscErrorCode ierr;
587: PetscBool isascii,isstring;
588: TaoType type;
592: if (!viewer) {
593: PetscViewerASCIIGetStdout(((PetscObject)tao)->comm,&viewer);
594: }
598: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
599: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
600: if (isascii) {
601: PetscObjectPrintClassNamePrefixType((PetscObject)tao,viewer);
603: if (tao->ops->view) {
604: PetscViewerASCIIPushTab(viewer);
605: (*tao->ops->view)(tao,viewer);
606: PetscViewerASCIIPopTab(viewer);
607: }
608: if (tao->linesearch) {
609: PetscViewerASCIIPushTab(viewer);
610: TaoLineSearchView(tao->linesearch,viewer);
611: PetscViewerASCIIPopTab(viewer);
612: }
613: if (tao->ksp) {
614: PetscViewerASCIIPushTab(viewer);
615: KSPView(tao->ksp,viewer);
616: PetscViewerASCIIPrintf(viewer,"total KSP iterations: %D\n",tao->ksp_tot_its);
617: PetscViewerASCIIPopTab(viewer);
618: }
620: PetscViewerASCIIPushTab(viewer);
622: if (tao->XL || tao->XU) {
623: PetscViewerASCIIPrintf(viewer,"Active Set subset type: %s\n",TaoSubSetTypes[tao->subset_type]);
624: }
626: PetscViewerASCIIPrintf(viewer,"convergence tolerances: gatol=%g,",(double)tao->gatol);
627: PetscViewerASCIIPrintf(viewer," steptol=%g,",(double)tao->steptol);
628: PetscViewerASCIIPrintf(viewer," gttol=%g\n",(double)tao->gttol);
629: PetscViewerASCIIPrintf(viewer,"Residual in Function/Gradient:=%g\n",(double)tao->residual);
631: if (tao->cnorm>0 || tao->catol>0 || tao->crtol>0){
632: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances:");
633: ierr=PetscViewerASCIIPrintf(viewer," catol=%g,",(double)tao->catol);
634: ierr=PetscViewerASCIIPrintf(viewer," crtol=%g\n",(double)tao->crtol);
635: PetscViewerASCIIPrintf(viewer,"Residual in Constraints:=%g\n",(double)tao->cnorm);
636: }
638: if (tao->trust < tao->steptol){
639: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances: steptol=%g\n",(double)tao->steptol);
640: ierr=PetscViewerASCIIPrintf(viewer,"Final trust region radius:=%g\n",(double)tao->trust);
641: }
643: if (tao->fmin>-1.e25){
644: ierr=PetscViewerASCIIPrintf(viewer,"convergence tolerances: function minimum=%g\n",(double)tao->fmin);
645: }
646: PetscViewerASCIIPrintf(viewer,"Objective value=%g\n",(double)tao->fc);
648: PetscViewerASCIIPrintf(viewer,"total number of iterations=%D, ",tao->niter);
649: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_it);
651: if (tao->nfuncs>0){
652: PetscViewerASCIIPrintf(viewer,"total number of function evaluations=%D,",tao->nfuncs);
653: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
654: }
655: if (tao->ngrads>0){
656: PetscViewerASCIIPrintf(viewer,"total number of gradient evaluations=%D,",tao->ngrads);
657: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
658: }
659: if (tao->nfuncgrads>0){
660: PetscViewerASCIIPrintf(viewer,"total number of function/gradient evaluations=%D,",tao->nfuncgrads);
661: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_funcs);
662: }
663: if (tao->nhess>0){
664: PetscViewerASCIIPrintf(viewer,"total number of Hessian evaluations=%D\n",tao->nhess);
665: }
666: /* if (tao->linear_its>0){
667: PetscViewerASCIIPrintf(viewer," total Krylov method iterations=%D\n",tao->linear_its);
668: }*/
669: if (tao->nconstraints>0){
670: PetscViewerASCIIPrintf(viewer,"total number of constraint function evaluations=%D\n",tao->nconstraints);
671: }
672: if (tao->njac>0){
673: PetscViewerASCIIPrintf(viewer,"total number of Jacobian evaluations=%D\n",tao->njac);
674: }
676: if (tao->reason>0){
677: PetscViewerASCIIPrintf(viewer, "Solution converged: ");
678: switch (tao->reason) {
679: case TAO_CONVERGED_GATOL:
680: PetscViewerASCIIPrintf(viewer," ||g(X)|| <= gatol\n");
681: break;
682: case TAO_CONVERGED_GRTOL:
683: PetscViewerASCIIPrintf(viewer," ||g(X)||/|f(X)| <= grtol\n");
684: break;
685: case TAO_CONVERGED_GTTOL:
686: PetscViewerASCIIPrintf(viewer," ||g(X)||/||g(X0)|| <= gttol\n");
687: break;
688: case TAO_CONVERGED_STEPTOL:
689: PetscViewerASCIIPrintf(viewer," Steptol -- step size small\n");
690: break;
691: case TAO_CONVERGED_MINF:
692: PetscViewerASCIIPrintf(viewer," Minf -- f < fmin\n");
693: break;
694: case TAO_CONVERGED_USER:
695: PetscViewerASCIIPrintf(viewer," User Terminated\n");
696: break;
697: default:
698: PetscViewerASCIIPrintf(viewer,"\n");
699: break;
700: }
702: } else {
703: PetscViewerASCIIPrintf(viewer,"Solver terminated: %d",tao->reason);
704: switch (tao->reason) {
705: case TAO_DIVERGED_MAXITS:
706: PetscViewerASCIIPrintf(viewer," Maximum Iterations\n");
707: break;
708: case TAO_DIVERGED_NAN:
709: PetscViewerASCIIPrintf(viewer," NAN or Inf encountered\n");
710: break;
711: case TAO_DIVERGED_MAXFCN:
712: PetscViewerASCIIPrintf(viewer," Maximum Function Evaluations\n");
713: break;
714: case TAO_DIVERGED_LS_FAILURE:
715: PetscViewerASCIIPrintf(viewer," Line Search Failure\n");
716: break;
717: case TAO_DIVERGED_TR_REDUCTION:
718: PetscViewerASCIIPrintf(viewer," Trust Region too small\n");
719: break;
720: case TAO_DIVERGED_USER:
721: PetscViewerASCIIPrintf(viewer," User Terminated\n");
722: break;
723: default:
724: PetscViewerASCIIPrintf(viewer,"\n");
725: break;
726: }
727: }
728: PetscViewerASCIIPopTab(viewer);
729: } else if (isstring) {
730: TaoGetType(tao,&type);
731: PetscViewerStringSPrintf(viewer," %-3.3s",type);
732: }
733: return(0);
734: }
736: /*@
737: TaoSetTolerances - Sets parameters used in TAO convergence tests
739: Logically collective on Tao
741: Input Parameters:
742: + tao - the Tao context
743: . gatol - stop if norm of gradient is less than this
744: . grtol - stop if relative norm of gradient is less than this
745: - gttol - stop if norm of gradient is reduced by this factor
747: Options Database Keys:
748: + -tao_gatol <gatol> - Sets gatol
749: . -tao_grtol <grtol> - Sets grtol
750: - -tao_gttol <gttol> - Sets gttol
752: Stopping Criteria:
753: $ ||g(X)|| <= gatol
754: $ ||g(X)|| / |f(X)| <= grtol
755: $ ||g(X)|| / ||g(X0)|| <= gttol
757: Notes:
758: Use PETSC_DEFAULT to leave one or more tolerances unchanged.
760: Level: beginner
762: .seealso: TaoGetTolerances()
764: @*/
765: PetscErrorCode TaoSetTolerances(Tao tao, PetscReal gatol, PetscReal grtol, PetscReal gttol)
766: {
772: if (gatol != PETSC_DEFAULT) {
773: if (gatol<0) {
774: PetscInfo(tao,"Tried to set negative gatol -- ignored.\n");
775: } else {
776: tao->gatol = PetscMax(0,gatol);
777: tao->gatol_changed=PETSC_TRUE;
778: }
779: }
781: if (grtol != PETSC_DEFAULT) {
782: if (grtol<0) {
783: PetscInfo(tao,"Tried to set negative grtol -- ignored.\n");
784: } else {
785: tao->grtol = PetscMax(0,grtol);
786: tao->grtol_changed=PETSC_TRUE;
787: }
788: }
790: if (gttol != PETSC_DEFAULT) {
791: if (gttol<0) {
792: PetscInfo(tao,"Tried to set negative gttol -- ignored.\n");
793: } else {
794: tao->gttol = PetscMax(0,gttol);
795: tao->gttol_changed=PETSC_TRUE;
796: }
797: }
798: return(0);
799: }
801: /*@
802: TaoSetConstraintTolerances - Sets constraint tolerance parameters used in TAO convergence tests
804: Logically collective on Tao
806: Input Parameters:
807: + tao - the Tao context
808: . catol - absolute constraint tolerance, constraint norm must be less than catol for used for gatol convergence criteria
809: - crtol - relative contraint tolerance, constraint norm must be less than crtol for used for gatol, gttol convergence criteria
811: Options Database Keys:
812: + -tao_catol <catol> - Sets catol
813: - -tao_crtol <crtol> - Sets crtol
815: Notes:
816: Use PETSC_DEFAULT to leave any tolerance unchanged.
818: Level: intermediate
820: .seealso: TaoGetTolerances(), TaoGetConstraintTolerances(), TaoSetTolerances()
822: @*/
823: PetscErrorCode TaoSetConstraintTolerances(Tao tao, PetscReal catol, PetscReal crtol)
824: {
830: if (catol != PETSC_DEFAULT) {
831: if (catol<0) {
832: PetscInfo(tao,"Tried to set negative catol -- ignored.\n");
833: } else {
834: tao->catol = PetscMax(0,catol);
835: tao->catol_changed=PETSC_TRUE;
836: }
837: }
839: if (crtol != PETSC_DEFAULT) {
840: if (crtol<0) {
841: PetscInfo(tao,"Tried to set negative crtol -- ignored.\n");
842: } else {
843: tao->crtol = PetscMax(0,crtol);
844: tao->crtol_changed=PETSC_TRUE;
845: }
846: }
847: return(0);
848: }
850: /*@
851: TaoGetConstraintTolerances - Gets constraint tolerance parameters used in TAO convergence tests
853: Not ollective
855: Input Parameter:
856: . tao - the Tao context
858: Output Parameter:
859: + catol - absolute constraint tolerance, constraint norm must be less than catol for used for gatol convergence criteria
860: - crtol - relative contraint tolerance, constraint norm must be less than crtol for used for gatol, gttol convergence criteria
862: Level: intermediate
864: .seealso: TaoGetTolerances(), TaoSetTolerances(), TaoSetConstraintTolerances()
866: @*/
867: PetscErrorCode TaoGetConstraintTolerances(Tao tao, PetscReal *catol, PetscReal *crtol)
868: {
871: if (catol) *catol = tao->catol;
872: if (crtol) *crtol = tao->crtol;
873: return(0);
874: }
876: /*@
877: TaoSetFunctionLowerBound - Sets a bound on the solution objective value.
878: When an approximate solution with an objective value below this number
879: has been found, the solver will terminate.
881: Logically Collective on Tao
883: Input Parameters:
884: + tao - the Tao solver context
885: - fmin - the tolerance
887: Options Database Keys:
888: . -tao_fmin <fmin> - sets the minimum function value
890: Level: intermediate
892: .seealso: TaoSetTolerances()
893: @*/
894: PetscErrorCode TaoSetFunctionLowerBound(Tao tao,PetscReal fmin)
895: {
898: tao->fmin = fmin;
899: tao->fmin_changed=PETSC_TRUE;
900: return(0);
901: }
903: /*@
904: TaoGetFunctionLowerBound - Gets the bound on the solution objective value.
905: When an approximate solution with an objective value below this number
906: has been found, the solver will terminate.
908: Not collective on Tao
910: Input Parameters:
911: . tao - the Tao solver context
913: OutputParameters:
914: . fmin - the minimum function value
916: Level: intermediate
918: .seealso: TaoSetFunctionLowerBound()
919: @*/
920: PetscErrorCode TaoGetFunctionLowerBound(Tao tao,PetscReal *fmin)
921: {
924: *fmin = tao->fmin;
925: return(0);
926: }
928: /*@
929: TaoSetMaximumFunctionEvaluations - Sets a maximum number of
930: function evaluations.
932: Logically Collective on Tao
934: Input Parameters:
935: + tao - the Tao solver context
936: - nfcn - the maximum number of function evaluations (>=0)
938: Options Database Keys:
939: . -tao_max_funcs <nfcn> - sets the maximum number of function evaluations
941: Level: intermediate
943: .seealso: TaoSetTolerances(), TaoSetMaximumIterations()
944: @*/
946: PetscErrorCode TaoSetMaximumFunctionEvaluations(Tao tao,PetscInt nfcn)
947: {
950: tao->max_funcs = PetscMax(0,nfcn);
951: tao->max_funcs_changed=PETSC_TRUE;
952: return(0);
953: }
955: /*@
956: TaoGetMaximumFunctionEvaluations - Sets a maximum number of
957: function evaluations.
959: Not Collective
961: Input Parameters:
962: . tao - the Tao solver context
964: Output Parameters:
965: . nfcn - the maximum number of function evaluations
967: Level: intermediate
969: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
970: @*/
972: PetscErrorCode TaoGetMaximumFunctionEvaluations(Tao tao,PetscInt *nfcn)
973: {
976: *nfcn = tao->max_funcs;
977: return(0);
978: }
980: /*@
981: TaoGetCurrentFunctionEvaluations - Get current number of
982: function evaluations.
984: Not Collective
986: Input Parameters:
987: . tao - the Tao solver context
989: Output Parameters:
990: . nfuncs - the current number of function evaluations
992: Level: intermediate
994: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
995: @*/
997: PetscErrorCode TaoGetCurrentFunctionEvaluations(Tao tao,PetscInt *nfuncs)
998: {
1001: *nfuncs=PetscMax(tao->nfuncs,tao->nfuncgrads);
1002: return(0);
1003: }
1005: /*@
1006: TaoSetMaximumIterations - Sets a maximum number of iterates.
1008: Logically Collective on Tao
1010: Input Parameters:
1011: + tao - the Tao solver context
1012: - maxits - the maximum number of iterates (>=0)
1014: Options Database Keys:
1015: . -tao_max_it <its> - sets the maximum number of iterations
1017: Level: intermediate
1019: .seealso: TaoSetTolerances(), TaoSetMaximumFunctionEvaluations()
1020: @*/
1021: PetscErrorCode TaoSetMaximumIterations(Tao tao,PetscInt maxits)
1022: {
1025: tao->max_it = PetscMax(0,maxits);
1026: tao->max_it_changed=PETSC_TRUE;
1027: return(0);
1028: }
1030: /*@
1031: TaoGetMaximumIterations - Sets a maximum number of iterates.
1033: Not Collective
1035: Input Parameters:
1036: . tao - the Tao solver context
1038: Output Parameters:
1039: . maxits - the maximum number of iterates
1041: Level: intermediate
1043: .seealso: TaoSetMaximumIterations(), TaoGetMaximumFunctionEvaluations()
1044: @*/
1045: PetscErrorCode TaoGetMaximumIterations(Tao tao,PetscInt *maxits)
1046: {
1049: *maxits = tao->max_it;
1050: return(0);
1051: }
1053: /*@
1054: TaoSetInitialTrustRegionRadius - Sets the initial trust region radius.
1056: Logically collective on Tao
1058: Input Parameter:
1059: + tao - a TAO optimization solver
1060: - radius - the trust region radius
1062: Level: intermediate
1064: Options Database Key:
1065: . -tao_trust0 <t0> - sets initial trust region radius
1067: .seealso: TaoGetTrustRegionRadius(), TaoSetTrustRegionTolerance()
1068: @*/
1069: PetscErrorCode TaoSetInitialTrustRegionRadius(Tao tao, PetscReal radius)
1070: {
1073: tao->trust0 = PetscMax(0.0,radius);
1074: tao->trust0_changed=PETSC_TRUE;
1075: return(0);
1076: }
1078: /*@
1079: TaoGetInitialTrustRegionRadius - Sets the initial trust region radius.
1081: Not Collective
1083: Input Parameter:
1084: . tao - a TAO optimization solver
1086: Output Parameter:
1087: . radius - the trust region radius
1089: Level: intermediate
1091: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetCurrentTrustRegionRadius()
1092: @*/
1093: PetscErrorCode TaoGetInitialTrustRegionRadius(Tao tao, PetscReal *radius)
1094: {
1097: *radius = tao->trust0;
1098: return(0);
1099: }
1101: /*@
1102: TaoGetCurrentTrustRegionRadius - Gets the current trust region radius.
1104: Not Collective
1106: Input Parameter:
1107: . tao - a TAO optimization solver
1109: Output Parameter:
1110: . radius - the trust region radius
1112: Level: intermediate
1114: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetInitialTrustRegionRadius()
1115: @*/
1116: PetscErrorCode TaoGetCurrentTrustRegionRadius(Tao tao, PetscReal *radius)
1117: {
1120: *radius = tao->trust;
1121: return(0);
1122: }
1124: /*@
1125: TaoGetTolerances - gets the current values of tolerances
1127: Not Collective
1129: Input Parameters:
1130: . tao - the Tao context
1132: Output Parameters:
1133: + gatol - stop if norm of gradient is less than this
1134: . grtol - stop if relative norm of gradient is less than this
1135: - gttol - stop if norm of gradient is reduced by a this factor
1137: Note: NULL can be used as an argument if not all tolerances values are needed
1139: .seealso TaoSetTolerances()
1141: Level: intermediate
1142: @*/
1143: PetscErrorCode TaoGetTolerances(Tao tao, PetscReal *gatol, PetscReal *grtol, PetscReal *gttol)
1144: {
1147: if (gatol) *gatol=tao->gatol;
1148: if (grtol) *grtol=tao->grtol;
1149: if (gttol) *gttol=tao->gttol;
1150: return(0);
1151: }
1153: /*@
1154: TaoGetKSP - Gets the linear solver used by the optimization solver.
1155: Application writers should use TaoGetKSP if they need direct access
1156: to the PETSc KSP object.
1158: Not Collective
1160: Input Parameters:
1161: . tao - the TAO solver
1163: Output Parameters:
1164: . ksp - the KSP linear solver used in the optimization solver
1166: Level: intermediate
1168: @*/
1169: PetscErrorCode TaoGetKSP(Tao tao, KSP *ksp)
1170: {
1172: *ksp = tao->ksp;
1173: return(0);
1174: }
1176: /*@
1177: TaoGetLinearSolveIterations - Gets the total number of linear iterations
1178: used by the TAO solver
1180: Not Collective
1182: Input Parameter:
1183: . tao - TAO context
1185: Output Parameter:
1186: . lits - number of linear iterations
1188: Notes:
1189: This counter is reset to zero for each successive call to TaoSolve()
1191: Level: intermediate
1193: .keywords: TAO
1195: .seealso: TaoGetKSP()
1196: @*/
1197: PetscErrorCode TaoGetLinearSolveIterations(Tao tao,PetscInt *lits)
1198: {
1202: *lits = tao->ksp_tot_its;
1203: return(0);
1204: }
1206: /*@
1207: TaoGetLineSearch - Gets the line search used by the optimization solver.
1208: Application writers should use TaoGetLineSearch if they need direct access
1209: to the TaoLineSearch object.
1211: Not Collective
1213: Input Parameters:
1214: . tao - the TAO solver
1216: Output Parameters:
1217: . ls - the line search used in the optimization solver
1219: Level: intermediate
1221: @*/
1222: PetscErrorCode TaoGetLineSearch(Tao tao, TaoLineSearch *ls)
1223: {
1225: *ls = tao->linesearch;
1226: return(0);
1227: }
1229: /*@
1230: TaoAddLineSearchCounts - Adds the number of function evaluations spent
1231: in the line search to the running total.
1233: Input Parameters:
1234: + tao - the TAO solver
1235: - ls - the line search used in the optimization solver
1237: Level: developer
1239: .seealso: TaoLineSearchApply()
1240: @*/
1241: PetscErrorCode TaoAddLineSearchCounts(Tao tao)
1242: {
1244: PetscBool flg;
1245: PetscInt nfeval,ngeval,nfgeval;
1249: if (tao->linesearch) {
1250: TaoLineSearchIsUsingTaoRoutines(tao->linesearch,&flg);
1251: if (!flg) {
1252: TaoLineSearchGetNumberFunctionEvaluations(tao->linesearch,&nfeval,&ngeval,&nfgeval);
1253: tao->nfuncs+=nfeval;
1254: tao->ngrads+=ngeval;
1255: tao->nfuncgrads+=nfgeval;
1256: }
1257: }
1258: return(0);
1259: }
1261: /*@
1262: TaoGetSolutionVector - Returns the vector with the current TAO solution
1264: Not Collective
1266: Input Parameter:
1267: . tao - the Tao context
1269: Output Parameter:
1270: . X - the current solution
1272: Level: intermediate
1274: Note: The returned vector will be the same object that was passed into TaoSetInitialVector()
1275: @*/
1276: PetscErrorCode TaoGetSolutionVector(Tao tao, Vec *X)
1277: {
1280: *X = tao->solution;
1281: return(0);
1282: }
1284: /*@
1285: TaoGetGradientVector - Returns the vector with the current TAO gradient
1287: Not Collective
1289: Input Parameter:
1290: . tao - the Tao context
1292: Output Parameter:
1293: . G - the current solution
1295: Level: intermediate
1296: @*/
1297: PetscErrorCode TaoGetGradientVector(Tao tao, Vec *G)
1298: {
1301: *G = tao->gradient;
1302: return(0);
1303: }
1305: /*@
1306: TaoResetStatistics - Initialize the statistics used by TAO for all of the solvers.
1307: These statistics include the iteration number, residual norms, and convergence status.
1308: This routine gets called before solving each optimization problem.
1310: Collective on Tao
1312: Input Parameters:
1313: . solver - the Tao context
1315: Level: developer
1317: .seealso: TaoCreate(), TaoSolve()
1318: @*/
1319: PetscErrorCode TaoResetStatistics(Tao tao)
1320: {
1323: tao->niter = 0;
1324: tao->nfuncs = 0;
1325: tao->nfuncgrads = 0;
1326: tao->ngrads = 0;
1327: tao->nhess = 0;
1328: tao->njac = 0;
1329: tao->nconstraints = 0;
1330: tao->ksp_its = 0;
1331: tao->ksp_tot_its = 0;
1332: tao->reason = TAO_CONTINUE_ITERATING;
1333: tao->residual = 0.0;
1334: tao->cnorm = 0.0;
1335: tao->step = 0.0;
1336: tao->lsflag = PETSC_FALSE;
1337: if (tao->hist_reset) tao->hist_len=0;
1338: return(0);
1339: }
1341: /*@C
1342: TaoSetUpdate - Sets the general-purpose update function called
1343: at the beginning of every iteration of the nonlinear solve. Specifically
1344: it is called at the top of every iteration, after the new solution and the gradient
1345: is determined, but before the Hessian is computed (if applicable).
1347: Logically Collective on Tao
1349: Input Parameters:
1350: . tao - The tao solver context
1351: . func - The function
1353: Calling sequence of func:
1354: . func (Tao tao, PetscInt step);
1356: . step - The current step of the iteration
1358: Level: advanced
1360: .keywords: Tao, update
1362: .seealso TaoSolve()
1363: @*/
1364: PetscErrorCode TaoSetUpdate(Tao tao, PetscErrorCode (*func)(Tao, PetscInt,void*), void *ctx)
1365: {
1368: tao->ops->update = func;
1369: tao->user_update = ctx;
1370: return(0);
1371: }
1373: /*@C
1374: TaoSetConvergenceTest - Sets the function that is to be used to test
1375: for convergence o fthe iterative minimization solution. The new convergence
1376: testing routine will replace TAO's default convergence test.
1378: Logically Collective on Tao
1380: Input Parameters:
1381: + tao - the Tao object
1382: . conv - the routine to test for convergence
1383: - ctx - [optional] context for private data for the convergence routine
1384: (may be NULL)
1386: Calling sequence of conv:
1387: $ PetscErrorCode conv(Tao tao, void *ctx)
1389: + tao - the Tao object
1390: - ctx - [optional] convergence context
1392: Note: The new convergence testing routine should call TaoSetConvergedReason().
1394: Level: advanced
1396: .seealso: TaoSetConvergedReason(), TaoGetSolutionStatus(), TaoGetTolerances(), TaoSetMonitor
1398: @*/
1399: PetscErrorCode TaoSetConvergenceTest(Tao tao, PetscErrorCode (*conv)(Tao,void*), void *ctx)
1400: {
1403: (tao)->ops->convergencetest = conv;
1404: (tao)->cnvP = ctx;
1405: return(0);
1406: }
1408: /*@C
1409: TaoSetMonitor - Sets an ADDITIONAL function that is to be used at every
1410: iteration of the solver to display the iteration's
1411: progress.
1413: Logically Collective on Tao
1415: Input Parameters:
1416: + tao - the Tao solver context
1417: . mymonitor - monitoring routine
1418: - mctx - [optional] user-defined context for private data for the
1419: monitor routine (may be NULL)
1421: Calling sequence of mymonitor:
1422: $ int mymonitor(Tao tao,void *mctx)
1424: + tao - the Tao solver context
1425: - mctx - [optional] monitoring context
1428: Options Database Keys:
1429: + -tao_monitor - sets TaoMonitorDefault()
1430: . -tao_smonitor - sets short monitor
1431: . -tao_cmonitor - same as smonitor plus constraint norm
1432: . -tao_view_solution - view solution at each iteration
1433: . -tao_view_gradient - view gradient at each iteration
1434: . -tao_view_ls_residual - view least-squares residual vector at each iteration
1435: - -tao_cancelmonitors - cancels all monitors that have been hardwired into a code by calls to TaoSetMonitor(), but does not cancel those set via the options database.
1438: Notes:
1439: Several different monitoring routines may be set by calling
1440: TaoSetMonitor() multiple times; all will be called in the
1441: order in which they were set.
1443: Fortran Notes:
1444: Only one monitor function may be set
1446: Level: intermediate
1448: .seealso: TaoMonitorDefault(), TaoCancelMonitors(), TaoSetDestroyRoutine()
1449: @*/
1450: PetscErrorCode TaoSetMonitor(Tao tao, PetscErrorCode (*func)(Tao, void*), void *ctx,PetscErrorCode (*dest)(void**))
1451: {
1453: PetscInt i;
1454: PetscBool identical;
1458: if (tao->numbermonitors >= MAXTAOMONITORS) SETERRQ1(PETSC_COMM_SELF,1,"Cannot attach another monitor -- max=",MAXTAOMONITORS);
1460: for (i=0; i<tao->numbermonitors;i++) {
1461: PetscMonitorCompare((PetscErrorCode (*)(void))func,ctx,dest,(PetscErrorCode (*)(void))tao->monitor[i],tao->monitorcontext[i],tao->monitordestroy[i],&identical);
1462: if (identical) return(0);
1463: }
1464: tao->monitor[tao->numbermonitors] = func;
1465: tao->monitorcontext[tao->numbermonitors] = (void*)ctx;
1466: tao->monitordestroy[tao->numbermonitors] = dest;
1467: ++tao->numbermonitors;
1468: return(0);
1469: }
1471: /*@
1472: TaoCancelMonitors - Clears all the monitor functions for a Tao object.
1474: Logically Collective on Tao
1476: Input Parameters:
1477: . tao - the Tao solver context
1479: Options Database:
1480: . -tao_cancelmonitors - cancels all monitors that have been hardwired
1481: into a code by calls to TaoSetMonitor(), but does not cancel those
1482: set via the options database
1484: Notes:
1485: There is no way to clear one specific monitor from a Tao object.
1487: Level: advanced
1489: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1490: @*/
1491: PetscErrorCode TaoCancelMonitors(Tao tao)
1492: {
1493: PetscInt i;
1498: for (i=0;i<tao->numbermonitors;i++) {
1499: if (tao->monitordestroy[i]) {
1500: (*tao->monitordestroy[i])(&tao->monitorcontext[i]);
1501: }
1502: }
1503: tao->numbermonitors=0;
1504: return(0);
1505: }
1507: /*@
1508: TaoMonitorDefault - Default routine for monitoring progress of the
1509: Tao solvers (default). This monitor prints the function value and gradient
1510: norm at each iteration. It can be turned on from the command line using the
1511: -tao_monitor option
1513: Collective on Tao
1515: Input Parameters:
1516: + tao - the Tao context
1517: - ctx - PetscViewer context or NULL
1519: Options Database Keys:
1520: . -tao_monitor
1522: Level: advanced
1524: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1525: @*/
1526: PetscErrorCode TaoMonitorDefault(Tao tao, void *ctx)
1527: {
1529: PetscInt its, tabs;
1530: PetscReal fct,gnorm;
1531: PetscViewer viewer = (PetscViewer)ctx;
1535: its=tao->niter;
1536: fct=tao->fc;
1537: gnorm=tao->residual;
1538: PetscViewerASCIIGetTab(viewer, &tabs);
1539: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1540: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1541: PetscViewerASCIIPrintf(viewer," Iteration information for %s solve.\n",((PetscObject)tao)->prefix);
1542: tao->header_printed = PETSC_TRUE;
1543: }
1544: ierr=PetscViewerASCIIPrintf(viewer,"%3D TAO,",its);
1545: ierr=PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1546: if (gnorm >= PETSC_INFINITY) {
1547: ierr=PetscViewerASCIIPrintf(viewer," Residual: Inf \n");
1548: } else {
1549: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1550: }
1551: PetscViewerASCIISetTab(viewer, tabs);
1552: return(0);
1553: }
1554:
1555: /*@
1556: TaoDefaultGMonitor - Default routine for monitoring progress of the
1557: Tao solvers (default) with extra detail on the globalization method.
1558: This monitor prints the function value and gradient norm at each
1559: iteration, as well as the step size and trust radius. Note that the
1560: step size and trust radius may be the same for some algorithms.
1561: It can be turned on from the command line using the
1562: -tao_gmonitor option
1564: Collective on Tao
1566: Input Parameters:
1567: + tao - the Tao context
1568: - ctx - PetscViewer context or NULL
1570: Options Database Keys:
1571: . -tao_monitor
1573: Level: advanced
1575: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1576: @*/
1577: PetscErrorCode TaoDefaultGMonitor(Tao tao, void *ctx)
1578: {
1580: PetscInt its, tabs;
1581: PetscReal fct,gnorm,stp,tr;
1582: PetscViewer viewer = (PetscViewer)ctx;
1586: its=tao->niter;
1587: fct=tao->fc;
1588: gnorm=tao->residual;
1589: stp=tao->step;
1590: tr=tao->trust;
1591: PetscViewerASCIIGetTab(viewer, &tabs);
1592: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1593: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1594: PetscViewerASCIIPrintf(viewer," Iteration information for %s solve.\n",((PetscObject)tao)->prefix);
1595: tao->header_printed = PETSC_TRUE;
1596: }
1597: ierr=PetscViewerASCIIPrintf(viewer,"%3D TAO,",its);
1598: ierr=PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1599: if (gnorm >= PETSC_INFINITY) {
1600: ierr=PetscViewerASCIIPrintf(viewer," Residual: Inf,");
1601: } else {
1602: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g,",(double)gnorm);
1603: }
1604: PetscViewerASCIIPrintf(viewer," Step: %g, Trust: %g\n",(double)stp,(double)tr);
1605: PetscViewerASCIISetTab(viewer, tabs);
1606: return(0);
1607: }
1609: /*@
1610: TaoDefaultSMonitor - Default routine for monitoring progress of the
1611: solver. Same as TaoMonitorDefault() except
1612: it prints fewer digits of the residual as the residual gets smaller.
1613: This is because the later digits are meaningless and are often
1614: different on different machines; by using this routine different
1615: machines will usually generate the same output. It can be turned on
1616: by using the -tao_smonitor option
1618: Collective on Tao
1620: Input Parameters:
1621: + tao - the Tao context
1622: - ctx - PetscViewer context of type ASCII
1624: Options Database Keys:
1625: . -tao_smonitor
1627: Level: advanced
1629: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1630: @*/
1631: PetscErrorCode TaoDefaultSMonitor(Tao tao, void *ctx)
1632: {
1634: PetscInt its, tabs;
1635: PetscReal fct,gnorm;
1636: PetscViewer viewer = (PetscViewer)ctx;
1640: its=tao->niter;
1641: fct=tao->fc;
1642: gnorm=tao->residual;
1643: PetscViewerASCIIGetTab(viewer, &tabs);
1644: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1645: ierr=PetscViewerASCIIPrintf(viewer,"iter = %3D,",its);
1646: ierr=PetscViewerASCIIPrintf(viewer," Function value %g,",(double)fct);
1647: if (gnorm >= PETSC_INFINITY) {
1648: ierr=PetscViewerASCIIPrintf(viewer," Residual: Inf \n");
1649: } else if (gnorm > 1.e-6) {
1650: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1651: } else if (gnorm > 1.e-11) {
1652: ierr=PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-6 \n");
1653: } else {
1654: ierr=PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-11 \n");
1655: }
1656: PetscViewerASCIISetTab(viewer, tabs);
1657: return(0);
1658: }
1660: /*@
1661: TaoDefaultCMonitor - same as TaoMonitorDefault() except
1662: it prints the norm of the constraints function. It can be turned on
1663: from the command line using the -tao_cmonitor option
1665: Collective on Tao
1667: Input Parameters:
1668: + tao - the Tao context
1669: - ctx - PetscViewer context or NULL
1671: Options Database Keys:
1672: . -tao_cmonitor
1674: Level: advanced
1676: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1677: @*/
1678: PetscErrorCode TaoDefaultCMonitor(Tao tao, void *ctx)
1679: {
1681: PetscInt its, tabs;
1682: PetscReal fct,gnorm;
1683: PetscViewer viewer = (PetscViewer)ctx;
1687: its=tao->niter;
1688: fct=tao->fc;
1689: gnorm=tao->residual;
1690: PetscViewerASCIIGetTab(viewer, &tabs);
1691: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1692: ierr=PetscViewerASCIIPrintf(viewer,"iter = %D,",its);
1693: ierr=PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1694: ierr=PetscViewerASCIIPrintf(viewer," Residual: %g ",(double)gnorm);
1695: PetscViewerASCIIPrintf(viewer," Constraint: %g \n",(double)tao->cnorm);
1696: PetscViewerASCIISetTab(viewer, tabs);
1697: return(0);
1698: }
1700: /*@C
1701: TaoSolutionMonitor - Views the solution at each iteration
1702: It can be turned on from the command line using the
1703: -tao_view_solution option
1705: Collective on Tao
1707: Input Parameters:
1708: + tao - the Tao context
1709: - ctx - PetscViewer context or NULL
1711: Options Database Keys:
1712: . -tao_view_solution
1714: Level: advanced
1716: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1717: @*/
1718: PetscErrorCode TaoSolutionMonitor(Tao tao, void *ctx)
1719: {
1721: PetscViewer viewer = (PetscViewer)ctx;;
1725: VecView(tao->solution, viewer);
1726: return(0);
1727: }
1729: /*@C
1730: TaoGradientMonitor - Views the gradient at each iteration
1731: It can be turned on from the command line using the
1732: -tao_view_gradient option
1734: Collective on Tao
1736: Input Parameters:
1737: + tao - the Tao context
1738: - ctx - PetscViewer context or NULL
1740: Options Database Keys:
1741: . -tao_view_gradient
1743: Level: advanced
1745: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1746: @*/
1747: PetscErrorCode TaoGradientMonitor(Tao tao, void *ctx)
1748: {
1750: PetscViewer viewer = (PetscViewer)ctx;
1754: VecView(tao->gradient, viewer);
1755: return(0);
1756: }
1758: /*@C
1759: TaoStepDirectionMonitor - Views the gradient at each iteration
1760: It can be turned on from the command line using the
1761: -tao_view_gradient option
1763: Collective on Tao
1765: Input Parameters:
1766: + tao - the Tao context
1767: - ctx - PetscViewer context or NULL
1769: Options Database Keys:
1770: . -tao_view_gradient
1772: Level: advanced
1774: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1775: @*/
1776: PetscErrorCode TaoStepDirectionMonitor(Tao tao, void *ctx)
1777: {
1779: PetscViewer viewer = (PetscViewer)ctx;
1783: VecView(tao->stepdirection, viewer);
1784: return(0);
1785: }
1787: /*@C
1788: TaoDrawSolutionMonitor - Plots the solution at each iteration
1789: It can be turned on from the command line using the
1790: -tao_draw_solution option
1792: Collective on Tao
1794: Input Parameters:
1795: + tao - the Tao context
1796: - ctx - TaoMonitorDraw context
1798: Options Database Keys:
1799: . -tao_draw_solution
1801: Level: advanced
1803: .seealso: TaoSolutionMonitor(), TaoSetMonitor(), TaoDrawGradientMonitor
1804: @*/
1805: PetscErrorCode TaoDrawSolutionMonitor(Tao tao, void *ctx)
1806: {
1807: PetscErrorCode ierr;
1808: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1811: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return(0);
1812: VecView(tao->solution,ictx->viewer);
1813: return(0);
1814: }
1816: /*@C
1817: TaoDrawGradientMonitor - Plots the gradient at each iteration
1818: It can be turned on from the command line using the
1819: -tao_draw_gradient option
1821: Collective on Tao
1823: Input Parameters:
1824: + tao - the Tao context
1825: - ctx - PetscViewer context
1827: Options Database Keys:
1828: . -tao_draw_gradient
1830: Level: advanced
1832: .seealso: TaoGradientMonitor(), TaoSetMonitor(), TaoDrawSolutionMonitor
1833: @*/
1834: PetscErrorCode TaoDrawGradientMonitor(Tao tao, void *ctx)
1835: {
1836: PetscErrorCode ierr;
1837: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1840: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return(0);
1841: VecView(tao->gradient,ictx->viewer);
1842: return(0);
1843: }
1845: /*@C
1846: TaoDrawStepMonitor - Plots the step direction at each iteration
1847: It can be turned on from the command line using the
1848: -tao_draw_step option
1850: Collective on Tao
1852: Input Parameters:
1853: + tao - the Tao context
1854: - ctx - PetscViewer context
1856: Options Database Keys:
1857: . -tao_draw_step
1859: Level: advanced
1861: .seealso: TaoSetMonitor(), TaoDrawSolutionMonitor
1862: @*/
1863: PetscErrorCode TaoDrawStepMonitor(Tao tao, void *ctx)
1864: {
1866: PetscViewer viewer = (PetscViewer)(ctx);
1869: VecView(tao->stepdirection, viewer);
1870: return(0);
1871: }
1873: /*@C
1874: TaoResidualMonitor - Views the least-squares residual at each iteration
1875: It can be turned on from the command line using the
1876: -tao_view_ls_residual option
1878: Collective on Tao
1880: Input Parameters:
1881: + tao - the Tao context
1882: - ctx - PetscViewer context or NULL
1884: Options Database Keys:
1885: . -tao_view_ls_residual
1887: Level: advanced
1889: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1890: @*/
1891: PetscErrorCode TaoResidualMonitor(Tao tao, void *ctx)
1892: {
1894: PetscViewer viewer = (PetscViewer)ctx;
1898: VecView(tao->ls_res,viewer);
1899: return(0);
1900: }
1902: /*@
1903: TaoDefaultConvergenceTest - Determines whether the solver should continue iterating
1904: or terminate.
1906: Collective on Tao
1908: Input Parameters:
1909: + tao - the Tao context
1910: - dummy - unused dummy context
1912: Output Parameter:
1913: . reason - for terminating
1915: Notes:
1916: This routine checks the residual in the optimality conditions, the
1917: relative residual in the optimity conditions, the number of function
1918: evaluations, and the function value to test convergence. Some
1919: solvers may use different convergence routines.
1921: Level: developer
1923: .seealso: TaoSetTolerances(),TaoGetConvergedReason(),TaoSetConvergedReason()
1924: @*/
1926: PetscErrorCode TaoDefaultConvergenceTest(Tao tao,void *dummy)
1927: {
1928: PetscInt niter=tao->niter, nfuncs=PetscMax(tao->nfuncs,tao->nfuncgrads);
1929: PetscInt max_funcs=tao->max_funcs;
1930: PetscReal gnorm=tao->residual, gnorm0=tao->gnorm0;
1931: PetscReal f=tao->fc, steptol=tao->steptol,trradius=tao->step;
1932: PetscReal gatol=tao->gatol,grtol=tao->grtol,gttol=tao->gttol;
1933: PetscReal catol=tao->catol,crtol=tao->crtol;
1934: PetscReal fmin=tao->fmin, cnorm=tao->cnorm;
1935: TaoConvergedReason reason=tao->reason;
1936: PetscErrorCode ierr;
1940: if (reason != TAO_CONTINUE_ITERATING) {
1941: return(0);
1942: }
1944: if (PetscIsInfOrNanReal(f)) {
1945: PetscInfo(tao,"Failed to converged, function value is Inf or NaN\n");
1946: reason = TAO_DIVERGED_NAN;
1947: } else if (f <= fmin && cnorm <=catol) {
1948: PetscInfo2(tao,"Converged due to function value %g < minimum function value %g\n", (double)f,(double)fmin);
1949: reason = TAO_CONVERGED_MINF;
1950: } else if (gnorm<= gatol && cnorm <=catol) {
1951: PetscInfo2(tao,"Converged due to residual norm ||g(X)||=%g < %g\n",(double)gnorm,(double)gatol);
1952: reason = TAO_CONVERGED_GATOL;
1953: } else if ( f!=0 && PetscAbsReal(gnorm/f) <= grtol && cnorm <= crtol) {
1954: PetscInfo2(tao,"Converged due to residual ||g(X)||/|f(X)| =%g < %g\n",(double)(gnorm/f),(double)grtol);
1955: reason = TAO_CONVERGED_GRTOL;
1956: } else if (gnorm0 != 0 && ((gttol == 0 && gnorm == 0) || gnorm/gnorm0 < gttol) && cnorm <= crtol) {
1957: PetscInfo2(tao,"Converged due to relative residual norm ||g(X)||/||g(X0)|| = %g < %g\n",(double)(gnorm/gnorm0),(double)gttol);
1958: reason = TAO_CONVERGED_GTTOL;
1959: } else if (nfuncs > max_funcs){
1960: PetscInfo2(tao,"Exceeded maximum number of function evaluations: %D > %D\n", nfuncs,max_funcs);
1961: reason = TAO_DIVERGED_MAXFCN;
1962: } else if ( tao->lsflag != 0 ){
1963: PetscInfo(tao,"Tao Line Search failure.\n");
1964: reason = TAO_DIVERGED_LS_FAILURE;
1965: } else if (trradius < steptol && niter > 0){
1966: PetscInfo2(tao,"Trust region/step size too small: %g < %g\n", (double)trradius,(double)steptol);
1967: reason = TAO_CONVERGED_STEPTOL;
1968: } else if (niter >= tao->max_it) {
1969: PetscInfo2(tao,"Exceeded maximum number of iterations: %D > %D\n",niter,tao->max_it);
1970: reason = TAO_DIVERGED_MAXITS;
1971: } else {
1972: reason = TAO_CONTINUE_ITERATING;
1973: }
1974: tao->reason = reason;
1975: return(0);
1976: }
1978: /*@C
1979: TaoSetOptionsPrefix - Sets the prefix used for searching for all
1980: TAO options in the database.
1983: Logically Collective on Tao
1985: Input Parameters:
1986: + tao - the Tao context
1987: - prefix - the prefix string to prepend to all TAO option requests
1989: Notes:
1990: A hyphen (-) must NOT be given at the beginning of the prefix name.
1991: The first character of all runtime options is AUTOMATICALLY the hyphen.
1993: For example, to distinguish between the runtime options for two
1994: different TAO solvers, one could call
1995: .vb
1996: TaoSetOptionsPrefix(tao1,"sys1_")
1997: TaoSetOptionsPrefix(tao2,"sys2_")
1998: .ve
2000: This would enable use of different options for each system, such as
2001: .vb
2002: -sys1_tao_method blmvm -sys1_tao_gtol 1.e-3
2003: -sys2_tao_method lmvm -sys2_tao_gtol 1.e-4
2004: .ve
2007: Level: advanced
2009: .seealso: TaoAppendOptionsPrefix(), TaoGetOptionsPrefix()
2010: @*/
2012: PetscErrorCode TaoSetOptionsPrefix(Tao tao, const char p[])
2013: {
2017: PetscObjectSetOptionsPrefix((PetscObject)tao,p);
2018: if (tao->linesearch) {
2019: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
2020: }
2021: if (tao->ksp) {
2022: KSPSetOptionsPrefix(tao->ksp,p);
2023: }
2024: return(0);
2025: }
2027: /*@C
2028: TaoAppendOptionsPrefix - Appends to the prefix used for searching for all
2029: TAO options in the database.
2032: Logically Collective on Tao
2034: Input Parameters:
2035: + tao - the Tao solver context
2036: - prefix - the prefix string to prepend to all TAO option requests
2038: Notes:
2039: A hyphen (-) must NOT be given at the beginning of the prefix name.
2040: The first character of all runtime options is AUTOMATICALLY the hyphen.
2043: Level: advanced
2045: .seealso: TaoSetOptionsPrefix(), TaoGetOptionsPrefix()
2046: @*/
2047: PetscErrorCode TaoAppendOptionsPrefix(Tao tao, const char p[])
2048: {
2052: PetscObjectAppendOptionsPrefix((PetscObject)tao,p);
2053: if (tao->linesearch) {
2054: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
2055: }
2056: if (tao->ksp) {
2057: KSPSetOptionsPrefix(tao->ksp,p);
2058: }
2059: return(0);
2060: }
2062: /*@C
2063: TaoGetOptionsPrefix - Gets the prefix used for searching for all
2064: TAO options in the database
2066: Not Collective
2068: Input Parameters:
2069: . tao - the Tao context
2071: Output Parameters:
2072: . prefix - pointer to the prefix string used is returned
2074: Notes:
2075: On the fortran side, the user should pass in a string 'prefix' of
2076: sufficient length to hold the prefix.
2078: Level: advanced
2080: .seealso: TaoSetOptionsPrefix(), TaoAppendOptionsPrefix()
2081: @*/
2082: PetscErrorCode TaoGetOptionsPrefix(Tao tao, const char *p[])
2083: {
2084: return PetscObjectGetOptionsPrefix((PetscObject)tao,p);
2085: }
2087: /*@C
2088: TaoSetType - Sets the method for the unconstrained minimization solver.
2090: Collective on Tao
2092: Input Parameters:
2093: + solver - the Tao solver context
2094: - type - a known method
2096: Options Database Key:
2097: . -tao_type <type> - Sets the method; use -help for a list
2098: of available methods (for instance, "-tao_type lmvm" or "-tao_type tron")
2100: Available methods include:
2101: + nls - Newton's method with line search for unconstrained minimization
2102: . ntr - Newton's method with trust region for unconstrained minimization
2103: . ntl - Newton's method with trust region, line search for unconstrained minimization
2104: . lmvm - Limited memory variable metric method for unconstrained minimization
2105: . cg - Nonlinear conjugate gradient method for unconstrained minimization
2106: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
2107: . tron - Newton Trust Region method for bound constrained minimization
2108: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
2109: . blmvm - Limited memory variable metric method for bound constrained minimization
2110: - pounders - Model-based algorithm pounder extended for nonlinear least squares
2112: Level: intermediate
2114: .seealso: TaoCreate(), TaoGetType(), TaoType
2116: @*/
2117: PetscErrorCode TaoSetType(Tao tao, TaoType type)
2118: {
2120: PetscErrorCode (*create_xxx)(Tao);
2121: PetscBool issame;
2126: PetscObjectTypeCompare((PetscObject)tao,type,&issame);
2127: if (issame) return(0);
2129: PetscFunctionListFind(TaoList, type, (void(**)(void))&create_xxx);
2130: if (!create_xxx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unable to find requested Tao type %s",type);
2132: /* Destroy the existing solver information */
2133: if (tao->ops->destroy) {
2134: (*tao->ops->destroy)(tao);
2135: }
2136: KSPDestroy(&tao->ksp);
2137: TaoLineSearchDestroy(&tao->linesearch);
2138: VecDestroy(&tao->gradient);
2139: VecDestroy(&tao->stepdirection);
2141: tao->ops->setup = 0;
2142: tao->ops->solve = 0;
2143: tao->ops->view = 0;
2144: tao->ops->setfromoptions = 0;
2145: tao->ops->destroy = 0;
2147: tao->setupcalled = PETSC_FALSE;
2149: (*create_xxx)(tao);
2150: PetscObjectChangeTypeName((PetscObject)tao,type);
2151: return(0);
2152: }
2154: /*MC
2155: TaoRegister - Adds a method to the TAO package for unconstrained minimization.
2157: Synopsis:
2158: TaoRegister(char *name_solver,char *path,char *name_Create,int (*routine_Create)(Tao))
2160: Not collective
2162: Input Parameters:
2163: + sname - name of a new user-defined solver
2164: - func - routine to Create method context
2166: Notes:
2167: TaoRegister() may be called multiple times to add several user-defined solvers.
2169: Sample usage:
2170: .vb
2171: TaoRegister("my_solver",MySolverCreate);
2172: .ve
2174: Then, your solver can be chosen with the procedural interface via
2175: $ TaoSetType(tao,"my_solver")
2176: or at runtime via the option
2177: $ -tao_type my_solver
2179: Level: advanced
2181: .seealso: TaoRegisterAll(), TaoRegisterDestroy()
2182: M*/
2183: PetscErrorCode TaoRegister(const char sname[], PetscErrorCode (*func)(Tao))
2184: {
2188: TaoInitializePackage();
2189: PetscFunctionListAdd(&TaoList,sname, (void (*)(void))func);
2190: return(0);
2191: }
2193: /*@C
2194: TaoRegisterDestroy - Frees the list of minimization solvers that were
2195: registered by TaoRegisterDynamic().
2197: Not Collective
2199: Level: advanced
2201: .seealso: TaoRegisterAll(), TaoRegister()
2202: @*/
2203: PetscErrorCode TaoRegisterDestroy(void)
2204: {
2207: PetscFunctionListDestroy(&TaoList);
2208: TaoRegisterAllCalled = PETSC_FALSE;
2209: return(0);
2210: }
2212: /*@
2213: TaoGetIterationNumber - Gets the number of Tao iterations completed
2214: at this time.
2216: Not Collective
2218: Input Parameter:
2219: . tao - Tao context
2221: Output Parameter:
2222: . iter - iteration number
2224: Notes:
2225: For example, during the computation of iteration 2 this would return 1.
2228: Level: intermediate
2230: .keywords: Tao, nonlinear, get, iteration, number,
2232: .seealso: TaoGetLinearSolveIterations(), TaoGetResidualNorm(), TaoGetObjective()
2233: @*/
2234: PetscErrorCode TaoGetIterationNumber(Tao tao,PetscInt *iter)
2235: {
2239: *iter = tao->niter;
2240: return(0);
2241: }
2243: /*@
2244: TaoGetObjective - Gets the current value of the objective function
2245: at this time.
2247: Not Collective
2249: Input Parameter:
2250: . tao - Tao context
2252: Output Parameter:
2253: . value - the current value
2255: Level: intermediate
2257: .keywords: Tao, nonlinear, get, iteration, number,
2259: .seealso: TaoGetLinearSolveIterations(), TaoGetIterationNumber(), TaoGetResidualNorm()
2260: @*/
2261: PetscErrorCode TaoGetObjective(Tao tao,PetscReal *value)
2262: {
2266: *value = tao->fc;
2267: return(0);
2268: }
2270: /*@
2271: TaoGetResidualNorm - Gets the current value of the norm of the residual
2272: at this time.
2274: Not Collective
2276: Input Parameter:
2277: . tao - Tao context
2279: Output Parameter:
2280: . value - the current value
2282: Level: intermediate
2284: Developer Note: This is the 2-norm of the residual, we cannot use TaoGetGradientNorm() because that has
2285: a different meaning. For some reason Tao sometimes calls the gradient the residual.
2287: .keywords: Tao, nonlinear, get, iteration, number,
2289: .seealso: TaoGetLinearSolveIterations(), TaoGetIterationNumber(), TaoGetObjective()
2290: @*/
2291: PetscErrorCode TaoGetResidualNorm(Tao tao,PetscReal *value)
2292: {
2296: *value = tao->residual;
2297: return(0);
2298: }
2300: /*@
2301: TaoSetIterationNumber - Sets the current iteration number.
2303: Not Collective
2305: Input Parameter:
2306: . tao - Tao context
2307: . iter - iteration number
2309: Level: developer
2311: .keywords: Tao, nonlinear, set, iteration, number,
2313: .seealso: TaoGetLinearSolveIterations()
2314: @*/
2315: PetscErrorCode TaoSetIterationNumber(Tao tao,PetscInt iter)
2316: {
2321: PetscObjectSAWsTakeAccess((PetscObject)tao);
2322: tao->niter = iter;
2323: PetscObjectSAWsGrantAccess((PetscObject)tao);
2324: return(0);
2325: }
2327: /*@
2328: TaoGetTotalIterationNumber - Gets the total number of Tao iterations
2329: completed. This number keeps accumulating if multiple solves
2330: are called with the Tao object.
2332: Not Collective
2334: Input Parameter:
2335: . tao - Tao context
2337: Output Parameter:
2338: . iter - iteration number
2340: Notes:
2341: The total iteration count is updated after each solve, if there is a current
2342: TaoSolve() in progress then those iterations are not yet counted.
2344: Level: intermediate
2346: .keywords: Tao, nonlinear, get, iteration, number,
2348: .seealso: TaoGetLinearSolveIterations()
2349: @*/
2350: PetscErrorCode TaoGetTotalIterationNumber(Tao tao,PetscInt *iter)
2351: {
2355: *iter = tao->ntotalits;
2356: return(0);
2357: }
2359: /*@
2360: TaoSetTotalIterationNumber - Sets the current total iteration number.
2362: Not Collective
2364: Input Parameter:
2365: . tao - Tao context
2366: . iter - iteration number
2368: Level: developer
2370: .keywords: Tao, nonlinear, set, iteration, number,
2372: .seealso: TaoGetLinearSolveIterations()
2373: @*/
2374: PetscErrorCode TaoSetTotalIterationNumber(Tao tao,PetscInt iter)
2375: {
2380: PetscObjectSAWsTakeAccess((PetscObject)tao);
2381: tao->ntotalits = iter;
2382: PetscObjectSAWsGrantAccess((PetscObject)tao);
2383: return(0);
2384: }
2386: /*@
2387: TaoSetConvergedReason - Sets the termination flag on a Tao object
2389: Logically Collective on Tao
2391: Input Parameters:
2392: + tao - the Tao context
2393: - reason - one of
2394: $ TAO_CONVERGED_ATOL (2),
2395: $ TAO_CONVERGED_RTOL (3),
2396: $ TAO_CONVERGED_STEPTOL (4),
2397: $ TAO_CONVERGED_MINF (5),
2398: $ TAO_CONVERGED_USER (6),
2399: $ TAO_DIVERGED_MAXITS (-2),
2400: $ TAO_DIVERGED_NAN (-4),
2401: $ TAO_DIVERGED_MAXFCN (-5),
2402: $ TAO_DIVERGED_LS_FAILURE (-6),
2403: $ TAO_DIVERGED_TR_REDUCTION (-7),
2404: $ TAO_DIVERGED_USER (-8),
2405: $ TAO_CONTINUE_ITERATING (0)
2407: Level: intermediate
2409: @*/
2410: PetscErrorCode TaoSetConvergedReason(Tao tao, TaoConvergedReason reason)
2411: {
2414: tao->reason = reason;
2415: return(0);
2416: }
2418: /*@
2419: TaoGetConvergedReason - Gets the reason the Tao iteration was stopped.
2421: Not Collective
2423: Input Parameter:
2424: . tao - the Tao solver context
2426: Output Parameter:
2427: . reason - one of
2428: $ TAO_CONVERGED_GATOL (3) ||g(X)|| < gatol
2429: $ TAO_CONVERGED_GRTOL (4) ||g(X)|| / f(X) < grtol
2430: $ TAO_CONVERGED_GTTOL (5) ||g(X)|| / ||g(X0)|| < gttol
2431: $ TAO_CONVERGED_STEPTOL (6) step size small
2432: $ TAO_CONVERGED_MINF (7) F < F_min
2433: $ TAO_CONVERGED_USER (8) User defined
2434: $ TAO_DIVERGED_MAXITS (-2) its > maxits
2435: $ TAO_DIVERGED_NAN (-4) Numerical problems
2436: $ TAO_DIVERGED_MAXFCN (-5) fevals > max_funcsals
2437: $ TAO_DIVERGED_LS_FAILURE (-6) line search failure
2438: $ TAO_DIVERGED_TR_REDUCTION (-7) trust region failure
2439: $ TAO_DIVERGED_USER(-8) (user defined)
2440: $ TAO_CONTINUE_ITERATING (0)
2442: where
2443: + X - current solution
2444: . X0 - initial guess
2445: . f(X) - current function value
2446: . f(X*) - true solution (estimated)
2447: . g(X) - current gradient
2448: . its - current iterate number
2449: . maxits - maximum number of iterates
2450: . fevals - number of function evaluations
2451: - max_funcsals - maximum number of function evaluations
2453: Level: intermediate
2455: .seealso: TaoSetConvergenceTest(), TaoSetTolerances()
2457: @*/
2458: PetscErrorCode TaoGetConvergedReason(Tao tao, TaoConvergedReason *reason)
2459: {
2463: *reason = tao->reason;
2464: return(0);
2465: }
2467: /*@
2468: TaoGetSolutionStatus - Get the current iterate, objective value,
2469: residual, infeasibility, and termination
2471: Not Collective
2473: Input Parameters:
2474: . tao - the Tao context
2476: Output Parameters:
2477: + iterate - the current iterate number (>=0)
2478: . f - the current function value
2479: . gnorm - the square of the gradient norm, duality gap, or other measure indicating distance from optimality.
2480: . cnorm - the infeasibility of the current solution with regard to the constraints.
2481: . xdiff - the step length or trust region radius of the most recent iterate.
2482: - reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2484: Level: intermediate
2486: Note:
2487: TAO returns the values set by the solvers in the routine TaoMonitor().
2489: Note:
2490: If any of the output arguments are set to NULL, no corresponding value will be returned.
2492: .seealso: TaoMonitor(), TaoGetConvergedReason()
2493: @*/
2494: PetscErrorCode TaoGetSolutionStatus(Tao tao, PetscInt *its, PetscReal *f, PetscReal *gnorm, PetscReal *cnorm, PetscReal *xdiff, TaoConvergedReason *reason)
2495: {
2497: if (its) *its=tao->niter;
2498: if (f) *f=tao->fc;
2499: if (gnorm) *gnorm=tao->residual;
2500: if (cnorm) *cnorm=tao->cnorm;
2501: if (reason) *reason=tao->reason;
2502: if (xdiff) *xdiff=tao->step;
2503: return(0);
2504: }
2506: /*@C
2507: TaoGetType - Gets the current Tao algorithm.
2509: Not Collective
2511: Input Parameter:
2512: . tao - the Tao solver context
2514: Output Parameter:
2515: . type - Tao method
2517: Level: intermediate
2519: @*/
2520: PetscErrorCode TaoGetType(Tao tao,TaoType *type)
2521: {
2525: *type=((PetscObject)tao)->type_name;
2526: return(0);
2527: }
2529: /*@C
2530: TaoMonitor - Monitor the solver and the current solution. This
2531: routine will record the iteration number and residual statistics,
2532: call any monitors specified by the user, and calls the convergence-check routine.
2534: Input Parameters:
2535: + tao - the Tao context
2536: . its - the current iterate number (>=0)
2537: . f - the current objective function value
2538: . res - the gradient norm, square root of the duality gap, or other measure indicating distince from optimality. This measure will be recorded and
2539: used for some termination tests.
2540: . cnorm - the infeasibility of the current solution with regard to the constraints.
2541: - steplength - multiple of the step direction added to the previous iterate.
2543: Output Parameters:
2544: . reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2546: Options Database Key:
2547: . -tao_monitor - Use the default monitor, which prints statistics to standard output
2549: .seealso TaoGetConvergedReason(), TaoMonitorDefault(), TaoSetMonitor()
2551: Level: developer
2553: @*/
2554: PetscErrorCode TaoMonitor(Tao tao, PetscInt its, PetscReal f, PetscReal res, PetscReal cnorm, PetscReal steplength)
2555: {
2557: PetscInt i;
2561: tao->fc = f;
2562: tao->residual = res;
2563: tao->cnorm = cnorm;
2564: tao->step = steplength;
2565: if (!its) {
2566: tao->cnorm0 = cnorm; tao->gnorm0 = res;
2567: }
2568: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(res)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
2569: for (i=0;i<tao->numbermonitors;i++) {
2570: (*tao->monitor[i])(tao,tao->monitorcontext[i]);
2571: }
2572: return(0);
2573: }
2575: /*@
2576: TaoSetConvergenceHistory - Sets the array used to hold the convergence history.
2578: Logically Collective on Tao
2580: Input Parameters:
2581: + tao - the Tao solver context
2582: . obj - array to hold objective value history
2583: . resid - array to hold residual history
2584: . cnorm - array to hold constraint violation history
2585: . lits - integer array holds the number of linear iterations for each Tao iteration
2586: . na - size of obj, resid, and cnorm
2587: - reset - PetscTrue indicates each new minimization resets the history counter to zero,
2588: else it continues storing new values for new minimizations after the old ones
2590: Notes:
2591: If set, TAO will fill the given arrays with the indicated
2592: information at each iteration. If 'obj','resid','cnorm','lits' are
2593: *all* NULL then space (using size na, or 1000 if na is PETSC_DECIDE or
2594: PETSC_DEFAULT) is allocated for the history.
2595: If not all are NULL, then only the non-NULL information categories
2596: will be stored, the others will be ignored.
2598: Any convergence information after iteration number 'na' will not be stored.
2600: This routine is useful, e.g., when running a code for purposes
2601: of accurate performance monitoring, when no I/O should be done
2602: during the section of code that is being timed.
2604: Level: intermediate
2606: .seealso: TaoGetConvergenceHistory()
2608: @*/
2609: PetscErrorCode TaoSetConvergenceHistory(Tao tao, PetscReal obj[], PetscReal resid[], PetscReal cnorm[], PetscInt lits[], PetscInt na,PetscBool reset)
2610: {
2620: if (na == PETSC_DECIDE || na == PETSC_DEFAULT) na = 1000;
2621: if (!obj && !resid && !cnorm && !lits) {
2622: PetscCalloc1(na,&obj);
2623: PetscCalloc1(na,&resid);
2624: PetscCalloc1(na,&cnorm);
2625: PetscCalloc1(na,&lits);
2626: tao->hist_malloc=PETSC_TRUE;
2627: }
2629: tao->hist_obj = obj;
2630: tao->hist_resid = resid;
2631: tao->hist_cnorm = cnorm;
2632: tao->hist_lits = lits;
2633: tao->hist_max = na;
2634: tao->hist_reset = reset;
2635: tao->hist_len = 0;
2636: return(0);
2637: }
2639: /*@C
2640: TaoGetConvergenceHistory - Gets the arrays used to hold the convergence history.
2642: Collective on Tao
2644: Input Parameter:
2645: . tao - the Tao context
2647: Output Parameters:
2648: + obj - array used to hold objective value history
2649: . resid - array used to hold residual history
2650: . cnorm - array used to hold constraint violation history
2651: . lits - integer array used to hold linear solver iteration count
2652: - nhist - size of obj, resid, cnorm, and lits (will be less than or equal to na given in TaoSetHistory)
2654: Notes:
2655: This routine must be preceded by calls to TaoSetConvergenceHistory()
2656: and TaoSolve(), otherwise it returns useless information.
2658: The calling sequence for this routine in Fortran is
2659: $ call TaoGetConvergenceHistory(Tao tao, PetscInt nhist, PetscErrorCode ierr)
2661: This routine is useful, e.g., when running a code for purposes
2662: of accurate performance monitoring, when no I/O should be done
2663: during the section of code that is being timed.
2665: Level: advanced
2667: .seealso: TaoSetConvergenceHistory()
2669: @*/
2670: PetscErrorCode TaoGetConvergenceHistory(Tao tao, PetscReal **obj, PetscReal **resid, PetscReal **cnorm, PetscInt **lits, PetscInt *nhist)
2671: {
2674: if (obj) *obj = tao->hist_obj;
2675: if (cnorm) *cnorm = tao->hist_cnorm;
2676: if (resid) *resid = tao->hist_resid;
2677: if (nhist) *nhist = tao->hist_len;
2678: return(0);
2679: }
2681: /*@
2682: TaoSetApplicationContext - Sets the optional user-defined context for
2683: a solver.
2685: Logically Collective on Tao
2687: Input Parameters:
2688: + tao - the Tao context
2689: - usrP - optional user context
2691: Level: intermediate
2693: .seealso: TaoGetApplicationContext(), TaoSetApplicationContext()
2694: @*/
2695: PetscErrorCode TaoSetApplicationContext(Tao tao,void *usrP)
2696: {
2699: tao->user = usrP;
2700: return(0);
2701: }
2703: /*@
2704: TaoGetApplicationContext - Gets the user-defined context for a
2705: TAO solvers.
2707: Not Collective
2709: Input Parameter:
2710: . tao - Tao context
2712: Output Parameter:
2713: . usrP - user context
2715: Level: intermediate
2717: .seealso: TaoSetApplicationContext()
2718: @*/
2719: PetscErrorCode TaoGetApplicationContext(Tao tao,void *usrP)
2720: {
2723: *(void**)usrP = tao->user;
2724: return(0);
2725: }
2727: /*@
2728: TaoSetGradientNorm - Sets the matrix used to define the inner product that measures the size of the gradient.
2730: Collective on tao
2732: Input Parameters:
2733: + tao - the Tao context
2734: - M - gradient norm
2736: Level: beginner
2738: .seealso: TaoGetGradientNorm(), TaoGradientNorm()
2739: @*/
2740: PetscErrorCode TaoSetGradientNorm(Tao tao, Mat M)
2741: {
2747: if (tao->gradient_norm) {
2748: PetscObjectDereference((PetscObject)tao->gradient_norm);
2749: VecDestroy(&tao->gradient_norm_tmp);
2750: }
2752: PetscObjectReference((PetscObject)M);
2753: tao->gradient_norm = M;
2754: MatCreateVecs(M, NULL, &tao->gradient_norm_tmp);
2755: return(0);
2756: }
2758: /*@
2759: TaoGetGradientNorm - Returns the matrix used to define the inner product for measuring the size of the gradient.
2761: Not Collective
2763: Input Parameter:
2764: . tao - Tao context
2766: Output Parameter:
2767: . M - gradient norm
2769: Level: beginner
2771: .seealso: TaoSetGradientNorm(), TaoGradientNorm()
2772: @*/
2773: PetscErrorCode TaoGetGradientNorm(Tao tao, Mat *M)
2774: {
2777: *M = tao->gradient_norm;
2778: return(0);
2779: }
2781: /*c
2782: TaoGradientNorm - Compute the norm with respect to the inner product the user has set.
2784: Collective on tao
2786: Input Parameter:
2787: . tao - the Tao context
2788: . gradient - the gradient to be computed
2789: . norm - the norm type
2791: Output Parameter:
2792: . gnorm - the gradient norm
2794: Level: developer
2796: .seealso: TaoSetGradientNorm(), TaoGetGradientNorm()
2797: @*/
2798: PetscErrorCode TaoGradientNorm(Tao tao, Vec gradient, NormType type, PetscReal *gnorm)
2799: {
2805: if (tao->gradient_norm) {
2806: PetscScalar gnorms;
2808: if (type != NORM_2) SETERRQ(PetscObjectComm((PetscObject)gradient), PETSC_ERR_ARG_WRONGSTATE, "Norm type must be NORM_2 if an inner product for the gradient norm is set.");
2809: MatMult(tao->gradient_norm, gradient, tao->gradient_norm_tmp);
2810: VecDot(gradient, tao->gradient_norm_tmp, &gnorms);
2811: *gnorm = PetscRealPart(PetscSqrtScalar(gnorms));
2812: } else {
2813: VecNorm(gradient, type, gnorm);
2814: }
2815: return(0);
2816: }
2818: /*@C
2819: TaoMonitorDrawCtxCreate - Creates the monitor context for TaoMonitorDrawCtx
2821: Collective on Tao
2823: Output Patameter:
2824: . ctx - the monitor context
2826: Options Database:
2827: . -tao_draw_solution_initial - show initial guess as well as current solution
2829: Level: intermediate
2831: .keywords: Tao, vector, monitor, view
2833: .seealso: TaoMonitorSet(), TaoMonitorDefault(), VecView(), TaoMonitorDrawCtx()
2834: @*/
2835: PetscErrorCode TaoMonitorDrawCtxCreate(MPI_Comm comm,const char host[],const char label[],int x,int y,int m,int n,PetscInt howoften,TaoMonitorDrawCtx *ctx)
2836: {
2837: PetscErrorCode ierr;
2840: PetscNew(ctx);
2841: PetscViewerDrawOpen(comm,host,label,x,y,m,n,&(*ctx)->viewer);
2842: PetscViewerSetFromOptions((*ctx)->viewer);
2843: (*ctx)->howoften = howoften;
2844: return(0);
2845: }
2847: /*@C
2848: TaoMonitorDrawCtxDestroy - Destroys the monitor context for TaoMonitorDrawSolution()
2850: Collective on Tao
2852: Input Parameters:
2853: . ctx - the monitor context
2855: Level: intermediate
2857: .keywords: Tao, vector, monitor, view
2859: .seealso: TaoMonitorSet(), TaoMonitorDefault(), VecView(), TaoMonitorDrawSolution()
2860: @*/
2861: PetscErrorCode TaoMonitorDrawCtxDestroy(TaoMonitorDrawCtx *ictx)
2862: {
2866: PetscViewerDestroy(&(*ictx)->viewer);
2867: PetscFree(*ictx);
2868: return(0);
2869: }