Actual source code: taosolver.c
1: #define TAO_DLL
3: #include <petsc/private/taoimpl.h>
5: PetscBool TaoRegisterAllCalled = PETSC_FALSE;
6: PetscFunctionList TaoList = NULL;
8: PetscClassId TAO_CLASSID;
10: PetscLogEvent TAO_Solve;
11: PetscLogEvent TAO_ObjectiveEval;
12: PetscLogEvent TAO_GradientEval;
13: PetscLogEvent TAO_ObjGradEval;
14: PetscLogEvent TAO_HessianEval;
15: PetscLogEvent TAO_JacobianEval;
16: PetscLogEvent TAO_ConstraintsEval;
18: const char *TaoSubSetTypes[] = {"subvec","mask","matrixfree","TaoSubSetType","TAO_SUBSET_",NULL};
20: struct _n_TaoMonitorDrawCtx {
21: PetscViewer viewer;
22: PetscInt howoften; /* when > 0 uses iteration % howoften, when negative only final solution plotted */
23: };
25: /*@
26: TaoCreate - Creates a TAO solver
28: Collective
30: Input Parameter:
31: . comm - MPI communicator
33: Output Parameter:
34: . newtao - the new Tao context
36: Available methods include:
37: + nls - Newton's method with line search for unconstrained minimization
38: . ntr - Newton's method with trust region for unconstrained minimization
39: . ntl - Newton's method with trust region, line search for unconstrained minimization
40: . lmvm - Limited memory variable metric method for unconstrained minimization
41: . cg - Nonlinear conjugate gradient method for unconstrained minimization
42: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
43: . tron - Newton Trust Region method for bound constrained minimization
44: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
45: . blmvm - Limited memory variable metric method for bound constrained minimization
46: . lcl - Linearly constrained Lagrangian method for pde-constrained minimization
47: - pounders - Model-based algorithm for nonlinear least squares
49: Options Database Keys:
50: . -tao_type - select which method TAO should use
52: Level: beginner
54: .seealso: TaoSolve(), TaoDestroy()
55: @*/
56: PetscErrorCode TaoCreate(MPI_Comm comm, Tao *newtao)
57: {
58: Tao tao;
61: TaoInitializePackage();
62: TaoLineSearchInitializePackage();
63: PetscHeaderCreate(tao,TAO_CLASSID,"Tao","Optimization solver","Tao",comm,TaoDestroy,TaoView);
65: /* Set non-NULL defaults */
66: tao->ops->convergencetest = TaoDefaultConvergenceTest;
68: tao->max_it = 10000;
69: tao->max_funcs = -1;
70: #if defined(PETSC_USE_REAL_SINGLE)
71: tao->gatol = 1e-5;
72: tao->grtol = 1e-5;
73: tao->crtol = 1e-5;
74: tao->catol = 1e-5;
75: #else
76: tao->gatol = 1e-8;
77: tao->grtol = 1e-8;
78: tao->crtol = 1e-8;
79: tao->catol = 1e-8;
80: #endif
81: tao->gttol = 0.0;
82: tao->steptol = 0.0;
83: tao->trust0 = PETSC_INFINITY;
84: tao->fmin = PETSC_NINFINITY;
86: tao->hist_reset = PETSC_TRUE;
88: TaoResetStatistics(tao);
89: *newtao = tao;
90: return 0;
91: }
93: /*@
94: TaoSolve - Solves an optimization problem min F(x) s.t. l <= x <= u
96: Collective on Tao
98: Input Parameters:
99: . tao - the Tao context
101: Notes:
102: The user must set up the Tao with calls to TaoSetSolution(),
103: TaoSetObjective(),
104: TaoSetGradient(), and (if using 2nd order method) TaoSetHessian().
106: You should call TaoGetConvergedReason() or run with -tao_converged_reason to determine if the optimization algorithm actually succeeded or
107: why it failed.
109: Level: beginner
111: .seealso: TaoCreate(), TaoSetObjective(), TaoSetGradient(), TaoSetHessian(), TaoGetConvergedReason()
112: @*/
113: PetscErrorCode TaoSolve(Tao tao)
114: {
115: PetscErrorCode ierr;
116: static PetscBool set = PETSC_FALSE;
119: PetscCitationsRegister("@TechReport{tao-user-ref,\n"
120: "title = {Toolkit for Advanced Optimization (TAO) Users Manual},\n"
121: "author = {Todd Munson and Jason Sarich and Stefan Wild and Steve Benson and Lois Curfman McInnes},\n"
122: "Institution = {Argonne National Laboratory},\n"
123: "Year = 2014,\n"
124: "Number = {ANL/MCS-TM-322 - Revision 3.5},\n"
125: "url = {https://www.mcs.anl.gov/research/projects/tao/}\n}\n",&set);
126: tao->header_printed = PETSC_FALSE;
127: TaoSetUp(tao);
128: TaoResetStatistics(tao);
129: if (tao->linesearch) {
130: TaoLineSearchReset(tao->linesearch);
131: }
133: PetscLogEventBegin(TAO_Solve,tao,0,0,0);
134: if (tao->ops->solve) (*tao->ops->solve)(tao);
135: PetscLogEventEnd(TAO_Solve,tao,0,0,0);
137: VecViewFromOptions(tao->solution,(PetscObject)tao,"-tao_view_solution");
139: tao->ntotalits += tao->niter;
140: TaoViewFromOptions(tao,NULL,"-tao_view");
142: if (tao->printreason) {
143: if (tao->reason > 0) {
144: PetscPrintf(((PetscObject)tao)->comm,"TAO solve converged due to %s iterations %D\n",TaoConvergedReasons[tao->reason],tao->niter);
145: } else {
146: PetscPrintf(((PetscObject)tao)->comm,"TAO solve did not converge due to %s iteration %D\n",TaoConvergedReasons[tao->reason],tao->niter);
147: }
148: }
149: return 0;
150: }
152: /*@
153: TaoSetUp - Sets up the internal data structures for the later use
154: of a Tao solver
156: Collective on tao
158: Input Parameters:
159: . tao - the TAO context
161: Notes:
162: The user will not need to explicitly call TaoSetUp(), as it will
163: automatically be called in TaoSolve(). However, if the user
164: desires to call it explicitly, it should come after TaoCreate()
165: and any TaoSetSomething() routines, but before TaoSolve().
167: Level: advanced
169: .seealso: TaoCreate(), TaoSolve()
170: @*/
171: PetscErrorCode TaoSetUp(Tao tao)
172: {
174: if (tao->setupcalled) return 0;
176: if (tao->ops->setup) {
177: (*tao->ops->setup)(tao);
178: }
179: tao->setupcalled = PETSC_TRUE;
180: return 0;
181: }
183: /*@C
184: TaoDestroy - Destroys the TAO context that was created with
185: TaoCreate()
187: Collective on Tao
189: Input Parameter:
190: . tao - the Tao context
192: Level: beginner
194: .seealso: TaoCreate(), TaoSolve()
195: @*/
196: PetscErrorCode TaoDestroy(Tao *tao)
197: {
198: if (!*tao) return 0;
200: if (--((PetscObject)*tao)->refct > 0) {*tao = NULL;return 0;}
202: if ((*tao)->ops->destroy) {
203: (*((*tao))->ops->destroy)(*tao);
204: }
205: KSPDestroy(&(*tao)->ksp);
206: TaoLineSearchDestroy(&(*tao)->linesearch);
208: if ((*tao)->ops->convergencedestroy) {
209: (*(*tao)->ops->convergencedestroy)((*tao)->cnvP);
210: if ((*tao)->jacobian_state_inv) {
211: MatDestroy(&(*tao)->jacobian_state_inv);
212: }
213: }
214: VecDestroy(&(*tao)->solution);
215: VecDestroy(&(*tao)->gradient);
216: VecDestroy(&(*tao)->ls_res);
218: if ((*tao)->gradient_norm) {
219: PetscObjectDereference((PetscObject)(*tao)->gradient_norm);
220: VecDestroy(&(*tao)->gradient_norm_tmp);
221: }
223: VecDestroy(&(*tao)->XL);
224: VecDestroy(&(*tao)->XU);
225: VecDestroy(&(*tao)->IL);
226: VecDestroy(&(*tao)->IU);
227: VecDestroy(&(*tao)->DE);
228: VecDestroy(&(*tao)->DI);
229: VecDestroy(&(*tao)->constraints);
230: VecDestroy(&(*tao)->constraints_equality);
231: VecDestroy(&(*tao)->constraints_inequality);
232: VecDestroy(&(*tao)->stepdirection);
233: MatDestroy(&(*tao)->hessian_pre);
234: MatDestroy(&(*tao)->hessian);
235: MatDestroy(&(*tao)->ls_jac);
236: MatDestroy(&(*tao)->ls_jac_pre);
237: MatDestroy(&(*tao)->jacobian_pre);
238: MatDestroy(&(*tao)->jacobian);
239: MatDestroy(&(*tao)->jacobian_state_pre);
240: MatDestroy(&(*tao)->jacobian_state);
241: MatDestroy(&(*tao)->jacobian_state_inv);
242: MatDestroy(&(*tao)->jacobian_design);
243: MatDestroy(&(*tao)->jacobian_equality);
244: MatDestroy(&(*tao)->jacobian_equality_pre);
245: MatDestroy(&(*tao)->jacobian_inequality);
246: MatDestroy(&(*tao)->jacobian_inequality_pre);
247: ISDestroy(&(*tao)->state_is);
248: ISDestroy(&(*tao)->design_is);
249: VecDestroy(&(*tao)->res_weights_v);
250: TaoCancelMonitors(*tao);
251: if ((*tao)->hist_malloc) {
252: PetscFree4((*tao)->hist_obj,(*tao)->hist_resid,(*tao)->hist_cnorm,(*tao)->hist_lits);
253: }
254: if ((*tao)->res_weights_n) {
255: PetscFree((*tao)->res_weights_rows);
256: PetscFree((*tao)->res_weights_cols);
257: PetscFree((*tao)->res_weights_w);
258: }
259: PetscHeaderDestroy(tao);
260: return 0;
261: }
263: /*@
264: TaoSetFromOptions - Sets various Tao parameters from user
265: options.
267: Collective on Tao
269: Input Parameter:
270: . tao - the Tao solver context
272: options Database Keys:
273: + -tao_type <type> - The algorithm that TAO uses (lmvm, nls, etc.)
274: . -tao_gatol <gatol> - absolute error tolerance for ||gradient||
275: . -tao_grtol <grtol> - relative error tolerance for ||gradient||
276: . -tao_gttol <gttol> - reduction of ||gradient|| relative to initial gradient
277: . -tao_max_it <max> - sets maximum number of iterations
278: . -tao_max_funcs <max> - sets maximum number of function evaluations
279: . -tao_fmin <fmin> - stop if function value reaches fmin
280: . -tao_steptol <tol> - stop if trust region radius less than <tol>
281: . -tao_trust0 <t> - initial trust region radius
282: . -tao_monitor - prints function value and residual at each iteration
283: . -tao_smonitor - same as tao_monitor, but truncates very small values
284: . -tao_cmonitor - prints function value, residual, and constraint norm at each iteration
285: . -tao_view_solution - prints solution vector at each iteration
286: . -tao_view_ls_residual - prints least-squares residual vector at each iteration
287: . -tao_view_stepdirection - prints step direction vector at each iteration
288: . -tao_view_gradient - prints gradient vector at each iteration
289: . -tao_draw_solution - graphically view solution vector at each iteration
290: . -tao_draw_step - graphically view step vector at each iteration
291: . -tao_draw_gradient - graphically view gradient at each iteration
292: . -tao_fd_gradient - use gradient computed with finite differences
293: . -tao_fd_hessian - use hessian computed with finite differences
294: . -tao_mf_hessian - use matrix-free hessian computed with finite differences
295: . -tao_cancelmonitors - cancels all monitors (except those set with command line)
296: . -tao_view - prints information about the Tao after solving
297: - -tao_converged_reason - prints the reason TAO stopped iterating
299: Notes:
300: To see all options, run your program with the -help option or consult the
301: user's manual. Should be called after TaoCreate() but before TaoSolve()
303: Level: beginner
304: @*/
305: PetscErrorCode TaoSetFromOptions(Tao tao)
306: {
308: TaoType default_type = TAOLMVM;
309: char type[256], monfilename[PETSC_MAX_PATH_LEN];
310: PetscViewer monviewer;
311: PetscBool flg;
312: MPI_Comm comm;
315: PetscObjectGetComm((PetscObject)tao,&comm);
317: /* So no warnings are given about unused options */
318: PetscOptionsHasName(((PetscObject)tao)->options,((PetscObject)tao)->prefix,"-tao_ls_type",&flg);
320: PetscObjectOptionsBegin((PetscObject)tao);
321: {
322: if (((PetscObject)tao)->type_name) default_type = ((PetscObject)tao)->type_name;
323: /* Check for type from options */
324: PetscOptionsFList("-tao_type","Tao Solver type","TaoSetType",TaoList,default_type,type,256,&flg);
325: if (flg) {
326: TaoSetType(tao,type);
327: } else if (!((PetscObject)tao)->type_name) {
328: TaoSetType(tao,default_type);
329: }
331: PetscOptionsReal("-tao_catol","Stop if constraints violations within","TaoSetConstraintTolerances",tao->catol,&tao->catol,&flg);
332: if (flg) tao->catol_changed = PETSC_TRUE;
333: PetscOptionsReal("-tao_crtol","Stop if relative constraint violations within","TaoSetConstraintTolerances",tao->crtol,&tao->crtol,&flg);
334: if (flg) tao->crtol_changed = PETSC_TRUE;
335: PetscOptionsReal("-tao_gatol","Stop if norm of gradient less than","TaoSetTolerances",tao->gatol,&tao->gatol,&flg);
336: if (flg) tao->gatol_changed = PETSC_TRUE;
337: PetscOptionsReal("-tao_grtol","Stop if norm of gradient divided by the function value is less than","TaoSetTolerances",tao->grtol,&tao->grtol,&flg);
338: if (flg) tao->grtol_changed = PETSC_TRUE;
339: PetscOptionsReal("-tao_gttol","Stop if the norm of the gradient is less than the norm of the initial gradient times tol","TaoSetTolerances",tao->gttol,&tao->gttol,&flg);
340: if (flg) tao->gttol_changed = PETSC_TRUE;
341: PetscOptionsInt("-tao_max_it","Stop if iteration number exceeds","TaoSetMaximumIterations",tao->max_it,&tao->max_it,&flg);
342: if (flg) tao->max_it_changed = PETSC_TRUE;
343: PetscOptionsInt("-tao_max_funcs","Stop if number of function evaluations exceeds","TaoSetMaximumFunctionEvaluations",tao->max_funcs,&tao->max_funcs,&flg);
344: if (flg) tao->max_funcs_changed = PETSC_TRUE;
345: PetscOptionsReal("-tao_fmin","Stop if function less than","TaoSetFunctionLowerBound",tao->fmin,&tao->fmin,&flg);
346: if (flg) tao->fmin_changed = PETSC_TRUE;
347: PetscOptionsReal("-tao_steptol","Stop if step size or trust region radius less than","",tao->steptol,&tao->steptol,&flg);
348: if (flg) tao->steptol_changed = PETSC_TRUE;
349: PetscOptionsReal("-tao_trust0","Initial trust region radius","TaoSetTrustRegionRadius",tao->trust0,&tao->trust0,&flg);
350: if (flg) tao->trust0_changed = PETSC_TRUE;
351: PetscOptionsString("-tao_view_solution","view solution vector after each evaluation","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
352: if (flg) {
353: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
354: TaoSetMonitor(tao,TaoSolutionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
355: }
357: PetscOptionsBool("-tao_converged_reason","Print reason for TAO converged","TaoSolve",tao->printreason,&tao->printreason,NULL);
358: PetscOptionsString("-tao_view_gradient","view gradient vector after each evaluation","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
359: if (flg) {
360: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
361: TaoSetMonitor(tao,TaoGradientMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
362: }
364: PetscOptionsString("-tao_view_stepdirection","view step direction vector after each iteration","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
365: if (flg) {
366: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
367: TaoSetMonitor(tao,TaoStepDirectionMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
368: }
370: PetscOptionsString("-tao_view_residual","view least-squares residual vector after each evaluation","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
371: if (flg) {
372: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
373: TaoSetMonitor(tao,TaoResidualMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
374: }
376: PetscOptionsString("-tao_monitor","Use the default convergence monitor","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
377: if (flg) {
378: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
379: TaoSetMonitor(tao,TaoMonitorDefault,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
380: }
382: PetscOptionsString("-tao_gmonitor","Use the convergence monitor with extra globalization info","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
383: if (flg) {
384: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
385: TaoSetMonitor(tao,TaoDefaultGMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
386: }
388: PetscOptionsString("-tao_smonitor","Use the short convergence monitor","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
389: if (flg) {
390: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
391: TaoSetMonitor(tao,TaoDefaultSMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
392: }
394: PetscOptionsString("-tao_cmonitor","Use the default convergence monitor with constraint norm","TaoSetMonitor","stdout",monfilename,sizeof(monfilename),&flg);
395: if (flg) {
396: PetscViewerASCIIOpen(comm,monfilename,&monviewer);
397: TaoSetMonitor(tao,TaoDefaultCMonitor,monviewer,(PetscErrorCode (*)(void**))PetscViewerDestroy);
398: }
400: flg = PETSC_FALSE;
401: PetscOptionsBool("-tao_cancelmonitors","cancel all monitors and call any registered destroy routines","TaoCancelMonitors",flg,&flg,NULL);
402: if (flg) TaoCancelMonitors(tao);
404: flg = PETSC_FALSE;
405: PetscOptionsBool("-tao_draw_solution","Plot solution vector at each iteration","TaoSetMonitor",flg,&flg,NULL);
406: if (flg) {
407: TaoMonitorDrawCtx drawctx;
408: PetscInt howoften = 1;
409: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao),NULL,NULL,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&drawctx);
410: TaoSetMonitor(tao,TaoDrawSolutionMonitor,drawctx,(PetscErrorCode (*)(void**))TaoMonitorDrawCtxDestroy);
411: }
413: flg = PETSC_FALSE;
414: PetscOptionsBool("-tao_draw_step","plots step direction at each iteration","TaoSetMonitor",flg,&flg,NULL);
415: if (flg) {
416: TaoSetMonitor(tao,TaoDrawStepMonitor,NULL,NULL);
417: }
419: flg = PETSC_FALSE;
420: PetscOptionsBool("-tao_draw_gradient","plots gradient at each iteration","TaoSetMonitor",flg,&flg,NULL);
421: if (flg) {
422: TaoMonitorDrawCtx drawctx;
423: PetscInt howoften = 1;
424: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao),NULL,NULL,PETSC_DECIDE,PETSC_DECIDE,300,300,howoften,&drawctx);
425: TaoSetMonitor(tao,TaoDrawGradientMonitor,drawctx,(PetscErrorCode (*)(void**))TaoMonitorDrawCtxDestroy);
426: }
427: flg = PETSC_FALSE;
428: PetscOptionsBool("-tao_fd_gradient","compute gradient using finite differences","TaoDefaultComputeGradient",flg,&flg,NULL);
429: if (flg) {
430: TaoSetGradient(tao,NULL,TaoDefaultComputeGradient,NULL);
431: }
432: flg = PETSC_FALSE;
433: PetscOptionsBool("-tao_fd_hessian","compute hessian using finite differences","TaoDefaultComputeHessian",flg,&flg,NULL);
434: if (flg) {
435: Mat H;
437: MatCreate(PetscObjectComm((PetscObject)tao),&H);
438: MatSetType(H,MATAIJ);
439: TaoSetHessian(tao,H,H,TaoDefaultComputeHessian,NULL);
440: MatDestroy(&H);
441: }
442: flg = PETSC_FALSE;
443: PetscOptionsBool("-tao_mf_hessian","compute matrix-free hessian using finite differences","TaoDefaultComputeHessianMFFD",flg,&flg,NULL);
444: if (flg) {
445: Mat H;
447: MatCreate(PetscObjectComm((PetscObject)tao),&H);
448: TaoSetHessian(tao,H,H,TaoDefaultComputeHessianMFFD,NULL);
449: MatDestroy(&H);
450: }
451: flg = PETSC_FALSE;
452: PetscOptionsBool("-tao_recycle_history","enable recycling/re-using information from the previous TaoSolve() call for some algorithms","TaoSetRecycleHistory",flg,&flg,NULL);
453: if (flg) {
454: TaoSetRecycleHistory(tao,PETSC_TRUE);
455: }
456: PetscOptionsEnum("-tao_subset_type","subset type","",TaoSubSetTypes,(PetscEnum)tao->subset_type,(PetscEnum*)&tao->subset_type,NULL);
458: if (tao->linesearch) {
459: TaoLineSearchSetFromOptions(tao->linesearch);
460: }
462: if (tao->ops->setfromoptions) {
463: (*tao->ops->setfromoptions)(PetscOptionsObject,tao);
464: }
465: }
466: PetscOptionsEnd();
467: return 0;
468: }
470: /*@C
471: TaoViewFromOptions - View from Options
473: Collective on Tao
475: Input Parameters:
476: + A - the Tao context
477: . obj - Optional object
478: - name - command line option
480: Level: intermediate
481: .seealso: Tao, TaoView, PetscObjectViewFromOptions(), TaoCreate()
482: @*/
483: PetscErrorCode TaoViewFromOptions(Tao A,PetscObject obj,const char name[])
484: {
486: PetscObjectViewFromOptions((PetscObject)A,obj,name);
487: return 0;
488: }
490: /*@C
491: TaoView - Prints information about the Tao
493: Collective on Tao
495: InputParameters:
496: + tao - the Tao context
497: - viewer - visualization context
499: Options Database Key:
500: . -tao_view - Calls TaoView() at the end of TaoSolve()
502: Notes:
503: The available visualization contexts include
504: + PETSC_VIEWER_STDOUT_SELF - standard output (default)
505: - PETSC_VIEWER_STDOUT_WORLD - synchronized standard
506: output where only the first processor opens
507: the file. All other processors send their
508: data to the first processor to print.
510: Level: beginner
512: .seealso: PetscViewerASCIIOpen()
513: @*/
514: PetscErrorCode TaoView(Tao tao, PetscViewer viewer)
515: {
516: PetscBool isascii,isstring;
517: TaoType type;
520: if (!viewer) {
521: PetscViewerASCIIGetStdout(((PetscObject)tao)->comm,&viewer);
522: }
526: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
527: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
528: if (isascii) {
529: PetscObjectPrintClassNamePrefixType((PetscObject)tao,viewer);
531: if (tao->ops->view) {
532: PetscViewerASCIIPushTab(viewer);
533: (*tao->ops->view)(tao,viewer);
534: PetscViewerASCIIPopTab(viewer);
535: }
536: if (tao->linesearch) {
537: PetscViewerASCIIPushTab(viewer);
538: TaoLineSearchView(tao->linesearch,viewer);
539: PetscViewerASCIIPopTab(viewer);
540: }
541: if (tao->ksp) {
542: PetscViewerASCIIPushTab(viewer);
543: KSPView(tao->ksp,viewer);
544: PetscViewerASCIIPrintf(viewer,"total KSP iterations: %D\n",tao->ksp_tot_its);
545: PetscViewerASCIIPopTab(viewer);
546: }
548: PetscViewerASCIIPushTab(viewer);
550: if (tao->XL || tao->XU) {
551: PetscViewerASCIIPrintf(viewer,"Active Set subset type: %s\n",TaoSubSetTypes[tao->subset_type]);
552: }
554: PetscViewerASCIIPrintf(viewer,"convergence tolerances: gatol=%g,",(double)tao->gatol);
555: PetscViewerASCIIPrintf(viewer," steptol=%g,",(double)tao->steptol);
556: PetscViewerASCIIPrintf(viewer," gttol=%g\n",(double)tao->gttol);
557: PetscViewerASCIIPrintf(viewer,"Residual in Function/Gradient:=%g\n",(double)tao->residual);
559: if (tao->constrained) {
560: PetscViewerASCIIPrintf(viewer,"convergence tolerances:");
561: PetscViewerASCIIPrintf(viewer," catol=%g,",(double)tao->catol);
562: PetscViewerASCIIPrintf(viewer," crtol=%g\n",(double)tao->crtol);
563: PetscViewerASCIIPrintf(viewer,"Residual in Constraints:=%g\n",(double)tao->cnorm);
564: }
566: if (tao->trust < tao->steptol) {
567: PetscViewerASCIIPrintf(viewer,"convergence tolerances: steptol=%g\n",(double)tao->steptol);
568: PetscViewerASCIIPrintf(viewer,"Final trust region radius:=%g\n",(double)tao->trust);
569: }
571: if (tao->fmin>-1.e25) {
572: PetscViewerASCIIPrintf(viewer,"convergence tolerances: function minimum=%g\n",(double)tao->fmin);
573: }
574: PetscViewerASCIIPrintf(viewer,"Objective value=%g\n",(double)tao->fc);
576: PetscViewerASCIIPrintf(viewer,"total number of iterations=%D, ",tao->niter);
577: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_it);
579: if (tao->nfuncs>0) {
580: PetscViewerASCIIPrintf(viewer,"total number of function evaluations=%D,",tao->nfuncs);
581: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
582: }
583: if (tao->ngrads>0) {
584: PetscViewerASCIIPrintf(viewer,"total number of gradient evaluations=%D,",tao->ngrads);
585: PetscViewerASCIIPrintf(viewer," max: %D\n",tao->max_funcs);
586: }
587: if (tao->nfuncgrads>0) {
588: PetscViewerASCIIPrintf(viewer,"total number of function/gradient evaluations=%D,",tao->nfuncgrads);
589: PetscViewerASCIIPrintf(viewer," (max: %D)\n",tao->max_funcs);
590: }
591: if (tao->nhess>0) {
592: PetscViewerASCIIPrintf(viewer,"total number of Hessian evaluations=%D\n",tao->nhess);
593: }
594: if (tao->nconstraints>0) {
595: PetscViewerASCIIPrintf(viewer,"total number of constraint function evaluations=%D\n",tao->nconstraints);
596: }
597: if (tao->njac>0) {
598: PetscViewerASCIIPrintf(viewer,"total number of Jacobian evaluations=%D\n",tao->njac);
599: }
601: if (tao->reason>0) {
602: PetscViewerASCIIPrintf(viewer, "Solution converged: ");
603: switch (tao->reason) {
604: case TAO_CONVERGED_GATOL:
605: PetscViewerASCIIPrintf(viewer," ||g(X)|| <= gatol\n");
606: break;
607: case TAO_CONVERGED_GRTOL:
608: PetscViewerASCIIPrintf(viewer," ||g(X)||/|f(X)| <= grtol\n");
609: break;
610: case TAO_CONVERGED_GTTOL:
611: PetscViewerASCIIPrintf(viewer," ||g(X)||/||g(X0)|| <= gttol\n");
612: break;
613: case TAO_CONVERGED_STEPTOL:
614: PetscViewerASCIIPrintf(viewer," Steptol -- step size small\n");
615: break;
616: case TAO_CONVERGED_MINF:
617: PetscViewerASCIIPrintf(viewer," Minf -- f < fmin\n");
618: break;
619: case TAO_CONVERGED_USER:
620: PetscViewerASCIIPrintf(viewer," User Terminated\n");
621: break;
622: default:
623: PetscViewerASCIIPrintf(viewer,"\n");
624: break;
625: }
626: } else {
627: PetscViewerASCIIPrintf(viewer,"Solver terminated: %d",tao->reason);
628: switch (tao->reason) {
629: case TAO_DIVERGED_MAXITS:
630: PetscViewerASCIIPrintf(viewer," Maximum Iterations\n");
631: break;
632: case TAO_DIVERGED_NAN:
633: PetscViewerASCIIPrintf(viewer," NAN or Inf encountered\n");
634: break;
635: case TAO_DIVERGED_MAXFCN:
636: PetscViewerASCIIPrintf(viewer," Maximum Function Evaluations\n");
637: break;
638: case TAO_DIVERGED_LS_FAILURE:
639: PetscViewerASCIIPrintf(viewer," Line Search Failure\n");
640: break;
641: case TAO_DIVERGED_TR_REDUCTION:
642: PetscViewerASCIIPrintf(viewer," Trust Region too small\n");
643: break;
644: case TAO_DIVERGED_USER:
645: PetscViewerASCIIPrintf(viewer," User Terminated\n");
646: break;
647: default:
648: PetscViewerASCIIPrintf(viewer,"\n");
649: break;
650: }
651: }
652: PetscViewerASCIIPopTab(viewer);
653: } else if (isstring) {
654: TaoGetType(tao,&type);
655: PetscViewerStringSPrintf(viewer," %-3.3s",type);
656: }
657: return 0;
658: }
660: /*@
661: TaoSetRecycleHistory - Sets the boolean flag to enable/disable re-using
662: iterate information from the previous TaoSolve(). This feature is disabled by
663: default.
665: For conjugate gradient methods (BNCG), this re-uses the latest search direction
666: from the previous TaoSolve() call when computing the first search direction in a
667: new solution. By default, CG methods set the first search direction to the
668: negative gradient.
670: For quasi-Newton family of methods (BQNLS, BQNKLS, BQNKTR, BQNKTL), this re-uses
671: the accumulated quasi-Newton Hessian approximation from the previous TaoSolve()
672: call. By default, QN family of methods reset the initial Hessian approximation to
673: the identity matrix.
675: For any other algorithm, this setting has no effect.
677: Logically collective on Tao
679: Input Parameters:
680: + tao - the Tao context
681: - recycle - boolean flag
683: Options Database Keys:
684: . -tao_recycle_history <true,false> - reuse the history
686: Level: intermediate
688: .seealso: TaoSetRecycleHistory(), TAOBNCG, TAOBQNLS, TAOBQNKLS, TAOBQNKTR, TAOBQNKTL
690: @*/
691: PetscErrorCode TaoSetRecycleHistory(Tao tao, PetscBool recycle)
692: {
695: tao->recycle = recycle;
696: return 0;
697: }
699: /*@
700: TaoGetRecycleHistory - Retrieve the boolean flag for re-using iterate information
701: from the previous TaoSolve(). This feature is disabled by default.
703: Logically collective on Tao
705: Input Parameters:
706: . tao - the Tao context
708: Output Parameters:
709: . recycle - boolean flag
711: Level: intermediate
713: .seealso: TaoGetRecycleHistory(), TAOBNCG, TAOBQNLS, TAOBQNKLS, TAOBQNKTR, TAOBQNKTL
715: @*/
716: PetscErrorCode TaoGetRecycleHistory(Tao tao, PetscBool *recycle)
717: {
720: *recycle = tao->recycle;
721: return 0;
722: }
724: /*@
725: TaoSetTolerances - Sets parameters used in TAO convergence tests
727: Logically collective on Tao
729: Input Parameters:
730: + tao - the Tao context
731: . gatol - stop if norm of gradient is less than this
732: . grtol - stop if relative norm of gradient is less than this
733: - gttol - stop if norm of gradient is reduced by this factor
735: Options Database Keys:
736: + -tao_gatol <gatol> - Sets gatol
737: . -tao_grtol <grtol> - Sets grtol
738: - -tao_gttol <gttol> - Sets gttol
740: Stopping Criteria:
741: $ ||g(X)|| <= gatol
742: $ ||g(X)|| / |f(X)| <= grtol
743: $ ||g(X)|| / ||g(X0)|| <= gttol
745: Notes:
746: Use PETSC_DEFAULT to leave one or more tolerances unchanged.
748: Level: beginner
750: .seealso: TaoGetTolerances()
752: @*/
753: PetscErrorCode TaoSetTolerances(Tao tao, PetscReal gatol, PetscReal grtol, PetscReal gttol)
754: {
760: if (gatol != PETSC_DEFAULT) {
761: if (gatol<0) {
762: PetscInfo(tao,"Tried to set negative gatol -- ignored.\n");
763: } else {
764: tao->gatol = PetscMax(0,gatol);
765: tao->gatol_changed = PETSC_TRUE;
766: }
767: }
769: if (grtol != PETSC_DEFAULT) {
770: if (grtol<0) {
771: PetscInfo(tao,"Tried to set negative grtol -- ignored.\n");
772: } else {
773: tao->grtol = PetscMax(0,grtol);
774: tao->grtol_changed = PETSC_TRUE;
775: }
776: }
778: if (gttol != PETSC_DEFAULT) {
779: if (gttol<0) {
780: PetscInfo(tao,"Tried to set negative gttol -- ignored.\n");
781: } else {
782: tao->gttol = PetscMax(0,gttol);
783: tao->gttol_changed = PETSC_TRUE;
784: }
785: }
786: return 0;
787: }
789: /*@
790: TaoSetConstraintTolerances - Sets constraint tolerance parameters used in TAO convergence tests
792: Logically collective on Tao
794: Input Parameters:
795: + tao - the Tao context
796: . catol - absolute constraint tolerance, constraint norm must be less than catol for used for gatol convergence criteria
797: - crtol - relative constraint tolerance, constraint norm must be less than crtol for used for gatol, gttol convergence criteria
799: Options Database Keys:
800: + -tao_catol <catol> - Sets catol
801: - -tao_crtol <crtol> - Sets crtol
803: Notes:
804: Use PETSC_DEFAULT to leave any tolerance unchanged.
806: Level: intermediate
808: .seealso: TaoGetTolerances(), TaoGetConstraintTolerances(), TaoSetTolerances()
810: @*/
811: PetscErrorCode TaoSetConstraintTolerances(Tao tao, PetscReal catol, PetscReal crtol)
812: {
817: if (catol != PETSC_DEFAULT) {
818: if (catol<0) {
819: PetscInfo(tao,"Tried to set negative catol -- ignored.\n");
820: } else {
821: tao->catol = PetscMax(0,catol);
822: tao->catol_changed = PETSC_TRUE;
823: }
824: }
826: if (crtol != PETSC_DEFAULT) {
827: if (crtol<0) {
828: PetscInfo(tao,"Tried to set negative crtol -- ignored.\n");
829: } else {
830: tao->crtol = PetscMax(0,crtol);
831: tao->crtol_changed = PETSC_TRUE;
832: }
833: }
834: return 0;
835: }
837: /*@
838: TaoGetConstraintTolerances - Gets constraint tolerance parameters used in TAO convergence tests
840: Not ollective
842: Input Parameter:
843: . tao - the Tao context
845: Output Parameters:
846: + catol - absolute constraint tolerance, constraint norm must be less than catol for used for gatol convergence criteria
847: - crtol - relative constraint tolerance, constraint norm must be less than crtol for used for gatol, gttol convergence criteria
849: Level: intermediate
851: .seealso: TaoGetTolerances(), TaoSetTolerances(), TaoSetConstraintTolerances()
853: @*/
854: PetscErrorCode TaoGetConstraintTolerances(Tao tao, PetscReal *catol, PetscReal *crtol)
855: {
857: if (catol) *catol = tao->catol;
858: if (crtol) *crtol = tao->crtol;
859: return 0;
860: }
862: /*@
863: TaoSetFunctionLowerBound - Sets a bound on the solution objective value.
864: When an approximate solution with an objective value below this number
865: has been found, the solver will terminate.
867: Logically Collective on Tao
869: Input Parameters:
870: + tao - the Tao solver context
871: - fmin - the tolerance
873: Options Database Keys:
874: . -tao_fmin <fmin> - sets the minimum function value
876: Level: intermediate
878: .seealso: TaoSetTolerances()
879: @*/
880: PetscErrorCode TaoSetFunctionLowerBound(Tao tao,PetscReal fmin)
881: {
884: tao->fmin = fmin;
885: tao->fmin_changed = PETSC_TRUE;
886: return 0;
887: }
889: /*@
890: TaoGetFunctionLowerBound - Gets the bound on the solution objective value.
891: When an approximate solution with an objective value below this number
892: has been found, the solver will terminate.
894: Not collective on Tao
896: Input Parameters:
897: . tao - the Tao solver context
899: OutputParameters:
900: . fmin - the minimum function value
902: Level: intermediate
904: .seealso: TaoSetFunctionLowerBound()
905: @*/
906: PetscErrorCode TaoGetFunctionLowerBound(Tao tao,PetscReal *fmin)
907: {
910: *fmin = tao->fmin;
911: return 0;
912: }
914: /*@
915: TaoSetMaximumFunctionEvaluations - Sets a maximum number of
916: function evaluations.
918: Logically Collective on Tao
920: Input Parameters:
921: + tao - the Tao solver context
922: - nfcn - the maximum number of function evaluations (>=0)
924: Options Database Keys:
925: . -tao_max_funcs <nfcn> - sets the maximum number of function evaluations
927: Level: intermediate
929: .seealso: TaoSetTolerances(), TaoSetMaximumIterations()
930: @*/
932: PetscErrorCode TaoSetMaximumFunctionEvaluations(Tao tao,PetscInt nfcn)
933: {
936: if (nfcn >= 0) { tao->max_funcs = PetscMax(0,nfcn); }
937: else { tao->max_funcs = -1; }
938: tao->max_funcs_changed = PETSC_TRUE;
939: return 0;
940: }
942: /*@
943: TaoGetMaximumFunctionEvaluations - Sets a maximum number of
944: function evaluations.
946: Not Collective
948: Input Parameters:
949: . tao - the Tao solver context
951: Output Parameters:
952: . nfcn - the maximum number of function evaluations
954: Level: intermediate
956: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
957: @*/
959: PetscErrorCode TaoGetMaximumFunctionEvaluations(Tao tao,PetscInt *nfcn)
960: {
963: *nfcn = tao->max_funcs;
964: return 0;
965: }
967: /*@
968: TaoGetCurrentFunctionEvaluations - Get current number of
969: function evaluations.
971: Not Collective
973: Input Parameters:
974: . tao - the Tao solver context
976: Output Parameters:
977: . nfuncs - the current number of function evaluations (maximum between gradient and function evaluations)
979: Level: intermediate
981: .seealso: TaoSetMaximumFunctionEvaluations(), TaoGetMaximumFunctionEvaluations(), TaoGetMaximumIterations()
982: @*/
984: PetscErrorCode TaoGetCurrentFunctionEvaluations(Tao tao,PetscInt *nfuncs)
985: {
988: *nfuncs = PetscMax(tao->nfuncs,tao->nfuncgrads);
989: return 0;
990: }
992: /*@
993: TaoSetMaximumIterations - Sets a maximum number of iterates.
995: Logically Collective on Tao
997: Input Parameters:
998: + tao - the Tao solver context
999: - maxits - the maximum number of iterates (>=0)
1001: Options Database Keys:
1002: . -tao_max_it <its> - sets the maximum number of iterations
1004: Level: intermediate
1006: .seealso: TaoSetTolerances(), TaoSetMaximumFunctionEvaluations()
1007: @*/
1008: PetscErrorCode TaoSetMaximumIterations(Tao tao,PetscInt maxits)
1009: {
1012: tao->max_it = PetscMax(0,maxits);
1013: tao->max_it_changed = PETSC_TRUE;
1014: return 0;
1015: }
1017: /*@
1018: TaoGetMaximumIterations - Sets a maximum number of iterates.
1020: Not Collective
1022: Input Parameters:
1023: . tao - the Tao solver context
1025: Output Parameters:
1026: . maxits - the maximum number of iterates
1028: Level: intermediate
1030: .seealso: TaoSetMaximumIterations(), TaoGetMaximumFunctionEvaluations()
1031: @*/
1032: PetscErrorCode TaoGetMaximumIterations(Tao tao,PetscInt *maxits)
1033: {
1036: *maxits = tao->max_it;
1037: return 0;
1038: }
1040: /*@
1041: TaoSetInitialTrustRegionRadius - Sets the initial trust region radius.
1043: Logically collective on Tao
1045: Input Parameters:
1046: + tao - a TAO optimization solver
1047: - radius - the trust region radius
1049: Level: intermediate
1051: Options Database Key:
1052: . -tao_trust0 <t0> - sets initial trust region radius
1054: .seealso: TaoGetTrustRegionRadius(), TaoSetTrustRegionTolerance()
1055: @*/
1056: PetscErrorCode TaoSetInitialTrustRegionRadius(Tao tao, PetscReal radius)
1057: {
1060: tao->trust0 = PetscMax(0.0,radius);
1061: tao->trust0_changed = PETSC_TRUE;
1062: return 0;
1063: }
1065: /*@
1066: TaoGetInitialTrustRegionRadius - Sets the initial trust region radius.
1068: Not Collective
1070: Input Parameter:
1071: . tao - a TAO optimization solver
1073: Output Parameter:
1074: . radius - the trust region radius
1076: Level: intermediate
1078: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetCurrentTrustRegionRadius()
1079: @*/
1080: PetscErrorCode TaoGetInitialTrustRegionRadius(Tao tao, PetscReal *radius)
1081: {
1084: *radius = tao->trust0;
1085: return 0;
1086: }
1088: /*@
1089: TaoGetCurrentTrustRegionRadius - Gets the current trust region radius.
1091: Not Collective
1093: Input Parameter:
1094: . tao - a TAO optimization solver
1096: Output Parameter:
1097: . radius - the trust region radius
1099: Level: intermediate
1101: .seealso: TaoSetInitialTrustRegionRadius(), TaoGetInitialTrustRegionRadius()
1102: @*/
1103: PetscErrorCode TaoGetCurrentTrustRegionRadius(Tao tao, PetscReal *radius)
1104: {
1107: *radius = tao->trust;
1108: return 0;
1109: }
1111: /*@
1112: TaoGetTolerances - gets the current values of tolerances
1114: Not Collective
1116: Input Parameter:
1117: . tao - the Tao context
1119: Output Parameters:
1120: + gatol - stop if norm of gradient is less than this
1121: . grtol - stop if relative norm of gradient is less than this
1122: - gttol - stop if norm of gradient is reduced by a this factor
1124: Note: NULL can be used as an argument if not all tolerances values are needed
1126: .seealso TaoSetTolerances()
1128: Level: intermediate
1129: @*/
1130: PetscErrorCode TaoGetTolerances(Tao tao, PetscReal *gatol, PetscReal *grtol, PetscReal *gttol)
1131: {
1133: if (gatol) *gatol = tao->gatol;
1134: if (grtol) *grtol = tao->grtol;
1135: if (gttol) *gttol = tao->gttol;
1136: return 0;
1137: }
1139: /*@
1140: TaoGetKSP - Gets the linear solver used by the optimization solver.
1141: Application writers should use TaoGetKSP if they need direct access
1142: to the PETSc KSP object.
1144: Not Collective
1146: Input Parameters:
1147: . tao - the TAO solver
1149: Output Parameters:
1150: . ksp - the KSP linear solver used in the optimization solver
1152: Level: intermediate
1154: @*/
1155: PetscErrorCode TaoGetKSP(Tao tao, KSP *ksp)
1156: {
1159: *ksp = tao->ksp;
1160: return 0;
1161: }
1163: /*@
1164: TaoGetLinearSolveIterations - Gets the total number of linear iterations
1165: used by the TAO solver
1167: Not Collective
1169: Input Parameter:
1170: . tao - TAO context
1172: Output Parameter:
1173: . lits - number of linear iterations
1175: Notes:
1176: This counter is reset to zero for each successive call to TaoSolve()
1178: Level: intermediate
1180: .seealso: TaoGetKSP()
1181: @*/
1182: PetscErrorCode TaoGetLinearSolveIterations(Tao tao, PetscInt *lits)
1183: {
1186: *lits = tao->ksp_tot_its;
1187: return 0;
1188: }
1190: /*@
1191: TaoGetLineSearch - Gets the line search used by the optimization solver.
1192: Application writers should use TaoGetLineSearch if they need direct access
1193: to the TaoLineSearch object.
1195: Not Collective
1197: Input Parameters:
1198: . tao - the TAO solver
1200: Output Parameters:
1201: . ls - the line search used in the optimization solver
1203: Level: intermediate
1205: @*/
1206: PetscErrorCode TaoGetLineSearch(Tao tao, TaoLineSearch *ls)
1207: {
1210: *ls = tao->linesearch;
1211: return 0;
1212: }
1214: /*@
1215: TaoAddLineSearchCounts - Adds the number of function evaluations spent
1216: in the line search to the running total.
1218: Input Parameters:
1219: + tao - the TAO solver
1220: - ls - the line search used in the optimization solver
1222: Level: developer
1224: .seealso: TaoLineSearchApply()
1225: @*/
1226: PetscErrorCode TaoAddLineSearchCounts(Tao tao)
1227: {
1228: PetscBool flg;
1229: PetscInt nfeval,ngeval,nfgeval;
1232: if (tao->linesearch) {
1233: TaoLineSearchIsUsingTaoRoutines(tao->linesearch,&flg);
1234: if (!flg) {
1235: TaoLineSearchGetNumberFunctionEvaluations(tao->linesearch,&nfeval,&ngeval,&nfgeval);
1236: tao->nfuncs += nfeval;
1237: tao->ngrads += ngeval;
1238: tao->nfuncgrads += nfgeval;
1239: }
1240: }
1241: return 0;
1242: }
1244: /*@
1245: TaoGetSolution - Returns the vector with the current TAO solution
1247: Not Collective
1249: Input Parameter:
1250: . tao - the Tao context
1252: Output Parameter:
1253: . X - the current solution
1255: Level: intermediate
1257: Note: The returned vector will be the same object that was passed into TaoSetSolution()
1258: @*/
1259: PetscErrorCode TaoGetSolution(Tao tao, Vec *X)
1260: {
1263: *X = tao->solution;
1264: return 0;
1265: }
1267: /*@
1268: TaoResetStatistics - Initialize the statistics used by TAO for all of the solvers.
1269: These statistics include the iteration number, residual norms, and convergence status.
1270: This routine gets called before solving each optimization problem.
1272: Collective on Tao
1274: Input Parameters:
1275: . solver - the Tao context
1277: Level: developer
1279: .seealso: TaoCreate(), TaoSolve()
1280: @*/
1281: PetscErrorCode TaoResetStatistics(Tao tao)
1282: {
1284: tao->niter = 0;
1285: tao->nfuncs = 0;
1286: tao->nfuncgrads = 0;
1287: tao->ngrads = 0;
1288: tao->nhess = 0;
1289: tao->njac = 0;
1290: tao->nconstraints = 0;
1291: tao->ksp_its = 0;
1292: tao->ksp_tot_its = 0;
1293: tao->reason = TAO_CONTINUE_ITERATING;
1294: tao->residual = 0.0;
1295: tao->cnorm = 0.0;
1296: tao->step = 0.0;
1297: tao->lsflag = PETSC_FALSE;
1298: if (tao->hist_reset) tao->hist_len = 0;
1299: return 0;
1300: }
1302: /*@C
1303: TaoSetUpdate - Sets the general-purpose update function called
1304: at the beginning of every iteration of the nonlinear solve. Specifically
1305: it is called at the top of every iteration, after the new solution and the gradient
1306: is determined, but before the Hessian is computed (if applicable).
1308: Logically Collective on Tao
1310: Input Parameters:
1311: + tao - The tao solver context
1312: - func - The function
1314: Calling sequence of func:
1315: $ func (Tao tao, PetscInt step);
1317: . step - The current step of the iteration
1319: Level: advanced
1321: .seealso TaoSolve()
1322: @*/
1323: PetscErrorCode TaoSetUpdate(Tao tao, PetscErrorCode (*func)(Tao, PetscInt, void*), void *ctx)
1324: {
1326: tao->ops->update = func;
1327: tao->user_update = ctx;
1328: return 0;
1329: }
1331: /*@C
1332: TaoSetConvergenceTest - Sets the function that is to be used to test
1333: for convergence o fthe iterative minimization solution. The new convergence
1334: testing routine will replace TAO's default convergence test.
1336: Logically Collective on Tao
1338: Input Parameters:
1339: + tao - the Tao object
1340: . conv - the routine to test for convergence
1341: - ctx - [optional] context for private data for the convergence routine
1342: (may be NULL)
1344: Calling sequence of conv:
1345: $ PetscErrorCode conv(Tao tao, void *ctx)
1347: + tao - the Tao object
1348: - ctx - [optional] convergence context
1350: Note: The new convergence testing routine should call TaoSetConvergedReason().
1352: Level: advanced
1354: .seealso: TaoSetConvergedReason(), TaoGetSolutionStatus(), TaoGetTolerances(), TaoSetMonitor
1356: @*/
1357: PetscErrorCode TaoSetConvergenceTest(Tao tao, PetscErrorCode (*conv)(Tao, void*), void *ctx)
1358: {
1360: tao->ops->convergencetest = conv;
1361: tao->cnvP = ctx;
1362: return 0;
1363: }
1365: /*@C
1366: TaoSetMonitor - Sets an ADDITIONAL function that is to be used at every
1367: iteration of the solver to display the iteration's
1368: progress.
1370: Logically Collective on Tao
1372: Input Parameters:
1373: + tao - the Tao solver context
1374: . mymonitor - monitoring routine
1375: - mctx - [optional] user-defined context for private data for the
1376: monitor routine (may be NULL)
1378: Calling sequence of mymonitor:
1379: .vb
1380: PetscErrorCode mymonitor(Tao tao,void *mctx)
1381: .ve
1383: + tao - the Tao solver context
1384: - mctx - [optional] monitoring context
1386: Options Database Keys:
1387: + -tao_monitor - sets TaoMonitorDefault()
1388: . -tao_smonitor - sets short monitor
1389: . -tao_cmonitor - same as smonitor plus constraint norm
1390: . -tao_view_solution - view solution at each iteration
1391: . -tao_view_gradient - view gradient at each iteration
1392: . -tao_view_ls_residual - view least-squares residual vector at each iteration
1393: - -tao_cancelmonitors - cancels all monitors that have been hardwired into a code by calls to TaoSetMonitor(), but does not cancel those set via the options database.
1395: Notes:
1396: Several different monitoring routines may be set by calling
1397: TaoSetMonitor() multiple times; all will be called in the
1398: order in which they were set.
1400: Fortran Notes:
1401: Only one monitor function may be set
1403: Level: intermediate
1405: .seealso: TaoMonitorDefault(), TaoCancelMonitors(), TaoSetDestroyRoutine()
1406: @*/
1407: PetscErrorCode TaoSetMonitor(Tao tao, PetscErrorCode (*func)(Tao, void*), void *ctx,PetscErrorCode (*dest)(void**))
1408: {
1409: PetscInt i;
1410: PetscBool identical;
1415: for (i=0; i<tao->numbermonitors;i++) {
1416: PetscMonitorCompare((PetscErrorCode (*)(void))func,ctx,dest,(PetscErrorCode (*)(void))tao->monitor[i],tao->monitorcontext[i],tao->monitordestroy[i],&identical);
1417: if (identical) return 0;
1418: }
1419: tao->monitor[tao->numbermonitors] = func;
1420: tao->monitorcontext[tao->numbermonitors] = (void*)ctx;
1421: tao->monitordestroy[tao->numbermonitors] = dest;
1422: ++tao->numbermonitors;
1423: return 0;
1424: }
1426: /*@
1427: TaoCancelMonitors - Clears all the monitor functions for a Tao object.
1429: Logically Collective on Tao
1431: Input Parameters:
1432: . tao - the Tao solver context
1434: Options Database:
1435: . -tao_cancelmonitors - cancels all monitors that have been hardwired
1436: into a code by calls to TaoSetMonitor(), but does not cancel those
1437: set via the options database
1439: Notes:
1440: There is no way to clear one specific monitor from a Tao object.
1442: Level: advanced
1444: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1445: @*/
1446: PetscErrorCode TaoCancelMonitors(Tao tao)
1447: {
1448: PetscInt i;
1451: for (i=0;i<tao->numbermonitors;i++) {
1452: if (tao->monitordestroy[i]) {
1453: (*tao->monitordestroy[i])(&tao->monitorcontext[i]);
1454: }
1455: }
1456: tao->numbermonitors = 0;
1457: return 0;
1458: }
1460: /*@
1461: TaoMonitorDefault - Default routine for monitoring progress of the
1462: Tao solvers (default). This monitor prints the function value and gradient
1463: norm at each iteration. It can be turned on from the command line using the
1464: -tao_monitor option
1466: Collective on Tao
1468: Input Parameters:
1469: + tao - the Tao context
1470: - ctx - PetscViewer context or NULL
1472: Options Database Keys:
1473: . -tao_monitor - turn on default monitoring
1475: Level: advanced
1477: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1478: @*/
1479: PetscErrorCode TaoMonitorDefault(Tao tao, void *ctx)
1480: {
1481: PetscInt its, tabs;
1482: PetscReal fct,gnorm;
1483: PetscViewer viewer = (PetscViewer)ctx;
1487: its = tao->niter;
1488: fct = tao->fc;
1489: gnorm = tao->residual;
1490: PetscViewerASCIIGetTab(viewer, &tabs);
1491: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1492: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1493: PetscViewerASCIIPrintf(viewer," Iteration information for %s solve.\n",((PetscObject)tao)->prefix);
1494: tao->header_printed = PETSC_TRUE;
1495: }
1496: PetscViewerASCIIPrintf(viewer,"%3D TAO,",its);
1497: PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1498: if (gnorm >= PETSC_INFINITY) {
1499: PetscViewerASCIIPrintf(viewer," Residual: Inf \n");
1500: } else {
1501: PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1502: }
1503: PetscViewerASCIISetTab(viewer, tabs);
1504: return 0;
1505: }
1507: /*@
1508: TaoDefaultGMonitor - Default routine for monitoring progress of the
1509: Tao solvers (default) with extra detail on the globalization method.
1510: This monitor prints the function value and gradient norm at each
1511: iteration, as well as the step size and trust radius. Note that the
1512: step size and trust radius may be the same for some algorithms.
1513: It can be turned on from the command line using the
1514: -tao_gmonitor option
1516: Collective on Tao
1518: Input Parameters:
1519: + tao - the Tao context
1520: - ctx - PetscViewer context or NULL
1522: Options Database Keys:
1523: . -tao_gmonitor - turn on monitoring with globalization information
1525: Level: advanced
1527: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1528: @*/
1529: PetscErrorCode TaoDefaultGMonitor(Tao tao, void *ctx)
1530: {
1531: PetscInt its, tabs;
1532: PetscReal fct,gnorm,stp,tr;
1533: PetscViewer viewer = (PetscViewer)ctx;
1537: its = tao->niter;
1538: fct = tao->fc;
1539: gnorm = tao->residual;
1540: stp = tao->step;
1541: tr = tao->trust;
1542: PetscViewerASCIIGetTab(viewer, &tabs);
1543: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1544: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1545: PetscViewerASCIIPrintf(viewer," Iteration information for %s solve.\n",((PetscObject)tao)->prefix);
1546: tao->header_printed = PETSC_TRUE;
1547: }
1548: PetscViewerASCIIPrintf(viewer,"%3D TAO,",its);
1549: PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1550: if (gnorm >= PETSC_INFINITY) {
1551: PetscViewerASCIIPrintf(viewer," Residual: Inf,");
1552: } else {
1553: PetscViewerASCIIPrintf(viewer," Residual: %g,",(double)gnorm);
1554: }
1555: PetscViewerASCIIPrintf(viewer," Step: %g, Trust: %g\n",(double)stp,(double)tr);
1556: PetscViewerASCIISetTab(viewer, tabs);
1557: return 0;
1558: }
1560: /*@
1561: TaoDefaultSMonitor - Default routine for monitoring progress of the
1562: solver. Same as TaoMonitorDefault() except
1563: it prints fewer digits of the residual as the residual gets smaller.
1564: This is because the later digits are meaningless and are often
1565: different on different machines; by using this routine different
1566: machines will usually generate the same output. It can be turned on
1567: by using the -tao_smonitor option
1569: Collective on Tao
1571: Input Parameters:
1572: + tao - the Tao context
1573: - ctx - PetscViewer context of type ASCII
1575: Options Database Keys:
1576: . -tao_smonitor - turn on default short monitoring
1578: Level: advanced
1580: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1581: @*/
1582: PetscErrorCode TaoDefaultSMonitor(Tao tao, void *ctx)
1583: {
1584: PetscInt its, tabs;
1585: PetscReal fct,gnorm;
1586: PetscViewer viewer = (PetscViewer)ctx;
1590: its = tao->niter;
1591: fct = tao->fc;
1592: gnorm = tao->residual;
1593: PetscViewerASCIIGetTab(viewer, &tabs);
1594: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1595: PetscViewerASCIIPrintf(viewer,"iter = %3D,",its);
1596: PetscViewerASCIIPrintf(viewer," Function value %g,",(double)fct);
1597: if (gnorm >= PETSC_INFINITY) {
1598: PetscViewerASCIIPrintf(viewer," Residual: Inf \n");
1599: } else if (gnorm > 1.e-6) {
1600: PetscViewerASCIIPrintf(viewer," Residual: %g \n",(double)gnorm);
1601: } else if (gnorm > 1.e-11) {
1602: PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-6 \n");
1603: } else {
1604: PetscViewerASCIIPrintf(viewer," Residual: < 1.0e-11 \n");
1605: }
1606: PetscViewerASCIISetTab(viewer, tabs);
1607: return 0;
1608: }
1610: /*@
1611: TaoDefaultCMonitor - same as TaoMonitorDefault() except
1612: it prints the norm of the constraints function. It can be turned on
1613: from the command line using the -tao_cmonitor option
1615: Collective on Tao
1617: Input Parameters:
1618: + tao - the Tao context
1619: - ctx - PetscViewer context or NULL
1621: Options Database Keys:
1622: . -tao_cmonitor - monitor the constraints
1624: Level: advanced
1626: .seealso: TaoMonitorDefault(), TaoSetMonitor()
1627: @*/
1628: PetscErrorCode TaoDefaultCMonitor(Tao tao, void *ctx)
1629: {
1630: PetscInt its, tabs;
1631: PetscReal fct,gnorm;
1632: PetscViewer viewer = (PetscViewer)ctx;
1636: its = tao->niter;
1637: fct = tao->fc;
1638: gnorm = tao->residual;
1639: PetscViewerASCIIGetTab(viewer, &tabs);
1640: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1641: PetscViewerASCIIPrintf(viewer,"iter = %D,",its);
1642: PetscViewerASCIIPrintf(viewer," Function value: %g,",(double)fct);
1643: PetscViewerASCIIPrintf(viewer," Residual: %g ",(double)gnorm);
1644: PetscViewerASCIIPrintf(viewer," Constraint: %g \n",(double)tao->cnorm);
1645: PetscViewerASCIISetTab(viewer, tabs);
1646: return 0;
1647: }
1649: /*@C
1650: TaoSolutionMonitor - Views the solution at each iteration
1651: It can be turned on from the command line using the
1652: -tao_view_solution option
1654: Collective on Tao
1656: Input Parameters:
1657: + tao - the Tao context
1658: - ctx - PetscViewer context or NULL
1660: Options Database Keys:
1661: . -tao_view_solution - view the solution
1663: Level: advanced
1665: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1666: @*/
1667: PetscErrorCode TaoSolutionMonitor(Tao tao, void *ctx)
1668: {
1669: PetscViewer viewer = (PetscViewer)ctx;
1673: VecView(tao->solution,viewer);
1674: return 0;
1675: }
1677: /*@C
1678: TaoGradientMonitor - Views the gradient at each iteration
1679: It can be turned on from the command line using the
1680: -tao_view_gradient option
1682: Collective on Tao
1684: Input Parameters:
1685: + tao - the Tao context
1686: - ctx - PetscViewer context or NULL
1688: Options Database Keys:
1689: . -tao_view_gradient - view the gradient at each iteration
1691: Level: advanced
1693: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1694: @*/
1695: PetscErrorCode TaoGradientMonitor(Tao tao, void *ctx)
1696: {
1697: PetscViewer viewer = (PetscViewer)ctx;
1701: VecView(tao->gradient,viewer);
1702: return 0;
1703: }
1705: /*@C
1706: TaoStepDirectionMonitor - Views the step-direction at each iteration
1708: Collective on Tao
1710: Input Parameters:
1711: + tao - the Tao context
1712: - ctx - PetscViewer context or NULL
1714: Options Database Keys:
1715: . -tao_view_gradient - view the gradient at each iteration
1717: Level: advanced
1719: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1720: @*/
1721: PetscErrorCode TaoStepDirectionMonitor(Tao tao, void *ctx)
1722: {
1723: PetscViewer viewer = (PetscViewer)ctx;
1727: VecView(tao->stepdirection,viewer);
1728: return 0;
1729: }
1731: /*@C
1732: TaoDrawSolutionMonitor - Plots the solution at each iteration
1733: It can be turned on from the command line using the
1734: -tao_draw_solution option
1736: Collective on Tao
1738: Input Parameters:
1739: + tao - the Tao context
1740: - ctx - TaoMonitorDraw context
1742: Options Database Keys:
1743: . -tao_draw_solution - draw the solution at each iteration
1745: Level: advanced
1747: .seealso: TaoSolutionMonitor(), TaoSetMonitor(), TaoDrawGradientMonitor
1748: @*/
1749: PetscErrorCode TaoDrawSolutionMonitor(Tao tao, void *ctx)
1750: {
1751: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1754: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return 0;
1755: VecView(tao->solution,ictx->viewer);
1756: return 0;
1757: }
1759: /*@C
1760: TaoDrawGradientMonitor - Plots the gradient at each iteration
1761: It can be turned on from the command line using the
1762: -tao_draw_gradient option
1764: Collective on Tao
1766: Input Parameters:
1767: + tao - the Tao context
1768: - ctx - PetscViewer context
1770: Options Database Keys:
1771: . -tao_draw_gradient - draw the gradient at each iteration
1773: Level: advanced
1775: .seealso: TaoGradientMonitor(), TaoSetMonitor(), TaoDrawSolutionMonitor
1776: @*/
1777: PetscErrorCode TaoDrawGradientMonitor(Tao tao, void *ctx)
1778: {
1779: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1782: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return 0;
1783: VecView(tao->gradient,ictx->viewer);
1784: return 0;
1785: }
1787: /*@C
1788: TaoDrawStepMonitor - Plots the step direction at each iteration
1790: Collective on Tao
1792: Input Parameters:
1793: + tao - the Tao context
1794: - ctx - PetscViewer context
1796: Options Database Keys:
1797: . -tao_draw_step - draw the step direction at each iteration
1799: Level: advanced
1801: .seealso: TaoSetMonitor(), TaoDrawSolutionMonitor
1802: @*/
1803: PetscErrorCode TaoDrawStepMonitor(Tao tao, void *ctx)
1804: {
1805: PetscViewer viewer = (PetscViewer)ctx;
1809: VecView(tao->stepdirection,viewer);
1810: return 0;
1811: }
1813: /*@C
1814: TaoResidualMonitor - Views the least-squares residual at each iteration
1816: Collective on Tao
1818: Input Parameters:
1819: + tao - the Tao context
1820: - ctx - PetscViewer context or NULL
1822: Options Database Keys:
1823: . -tao_view_ls_residual - view the least-squares residual at each iteration
1825: Level: advanced
1827: .seealso: TaoDefaultSMonitor(), TaoSetMonitor()
1828: @*/
1829: PetscErrorCode TaoResidualMonitor(Tao tao, void *ctx)
1830: {
1831: PetscViewer viewer = (PetscViewer)ctx;
1835: VecView(tao->ls_res,viewer);
1836: return 0;
1837: }
1839: /*@
1840: TaoDefaultConvergenceTest - Determines whether the solver should continue iterating
1841: or terminate.
1843: Collective on Tao
1845: Input Parameters:
1846: + tao - the Tao context
1847: - dummy - unused dummy context
1849: Output Parameter:
1850: . reason - for terminating
1852: Notes:
1853: This routine checks the residual in the optimality conditions, the
1854: relative residual in the optimity conditions, the number of function
1855: evaluations, and the function value to test convergence. Some
1856: solvers may use different convergence routines.
1858: Level: developer
1860: .seealso: TaoSetTolerances(),TaoGetConvergedReason(),TaoSetConvergedReason()
1861: @*/
1863: PetscErrorCode TaoDefaultConvergenceTest(Tao tao,void *dummy)
1864: {
1865: PetscInt niter=tao->niter, nfuncs=PetscMax(tao->nfuncs,tao->nfuncgrads);
1866: PetscInt max_funcs=tao->max_funcs;
1867: PetscReal gnorm=tao->residual, gnorm0=tao->gnorm0;
1868: PetscReal f=tao->fc, steptol=tao->steptol,trradius=tao->step;
1869: PetscReal gatol=tao->gatol,grtol=tao->grtol,gttol=tao->gttol;
1870: PetscReal catol=tao->catol,crtol=tao->crtol;
1871: PetscReal fmin=tao->fmin, cnorm=tao->cnorm;
1872: TaoConvergedReason reason=tao->reason;
1875: if (reason != TAO_CONTINUE_ITERATING) return 0;
1877: if (PetscIsInfOrNanReal(f)) {
1878: PetscInfo(tao,"Failed to converged, function value is Inf or NaN\n");
1879: reason = TAO_DIVERGED_NAN;
1880: } else if (f <= fmin && cnorm <=catol) {
1881: PetscInfo(tao,"Converged due to function value %g < minimum function value %g\n", (double)f,(double)fmin);
1882: reason = TAO_CONVERGED_MINF;
1883: } else if (gnorm<= gatol && cnorm <=catol) {
1884: PetscInfo(tao,"Converged due to residual norm ||g(X)||=%g < %g\n",(double)gnorm,(double)gatol);
1885: reason = TAO_CONVERGED_GATOL;
1886: } else if (f!=0 && PetscAbsReal(gnorm/f) <= grtol && cnorm <= crtol) {
1887: PetscInfo(tao,"Converged due to residual ||g(X)||/|f(X)| =%g < %g\n",(double)(gnorm/f),(double)grtol);
1888: reason = TAO_CONVERGED_GRTOL;
1889: } else if (gnorm0 != 0 && ((gttol == 0 && gnorm == 0) || gnorm/gnorm0 < gttol) && cnorm <= crtol) {
1890: PetscInfo(tao,"Converged due to relative residual norm ||g(X)||/||g(X0)|| = %g < %g\n",(double)(gnorm/gnorm0),(double)gttol);
1891: reason = TAO_CONVERGED_GTTOL;
1892: } else if (max_funcs >=0 && nfuncs > max_funcs) {
1893: PetscInfo(tao,"Exceeded maximum number of function evaluations: %" PetscInt_FMT " > %" PetscInt_FMT "\n", nfuncs,max_funcs);
1894: reason = TAO_DIVERGED_MAXFCN;
1895: } else if (tao->lsflag != 0) {
1896: PetscInfo(tao,"Tao Line Search failure.\n");
1897: reason = TAO_DIVERGED_LS_FAILURE;
1898: } else if (trradius < steptol && niter > 0) {
1899: PetscInfo(tao,"Trust region/step size too small: %g < %g\n", (double)trradius,(double)steptol);
1900: reason = TAO_CONVERGED_STEPTOL;
1901: } else if (niter >= tao->max_it) {
1902: PetscInfo(tao,"Exceeded maximum number of iterations: %D > %D\n",niter,tao->max_it);
1903: reason = TAO_DIVERGED_MAXITS;
1904: } else {
1905: reason = TAO_CONTINUE_ITERATING;
1906: }
1907: tao->reason = reason;
1908: return 0;
1909: }
1911: /*@C
1912: TaoSetOptionsPrefix - Sets the prefix used for searching for all
1913: TAO options in the database.
1915: Logically Collective on Tao
1917: Input Parameters:
1918: + tao - the Tao context
1919: - prefix - the prefix string to prepend to all TAO option requests
1921: Notes:
1922: A hyphen (-) must NOT be given at the beginning of the prefix name.
1923: The first character of all runtime options is AUTOMATICALLY the hyphen.
1925: For example, to distinguish between the runtime options for two
1926: different TAO solvers, one could call
1927: .vb
1928: TaoSetOptionsPrefix(tao1,"sys1_")
1929: TaoSetOptionsPrefix(tao2,"sys2_")
1930: .ve
1932: This would enable use of different options for each system, such as
1933: .vb
1934: -sys1_tao_method blmvm -sys1_tao_grtol 1.e-3
1935: -sys2_tao_method lmvm -sys2_tao_grtol 1.e-4
1936: .ve
1938: Level: advanced
1940: .seealso: TaoAppendOptionsPrefix(), TaoGetOptionsPrefix()
1941: @*/
1943: PetscErrorCode TaoSetOptionsPrefix(Tao tao, const char p[])
1944: {
1946: PetscObjectSetOptionsPrefix((PetscObject)tao,p);
1947: if (tao->linesearch) {
1948: TaoLineSearchSetOptionsPrefix(tao->linesearch,p);
1949: }
1950: if (tao->ksp) {
1951: KSPSetOptionsPrefix(tao->ksp,p);
1952: }
1953: return 0;
1954: }
1956: /*@C
1957: TaoAppendOptionsPrefix - Appends to the prefix used for searching for all
1958: TAO options in the database.
1960: Logically Collective on Tao
1962: Input Parameters:
1963: + tao - the Tao solver context
1964: - prefix - the prefix string to prepend to all TAO option requests
1966: Notes:
1967: A hyphen (-) must NOT be given at the beginning of the prefix name.
1968: The first character of all runtime options is AUTOMATICALLY the hyphen.
1970: Level: advanced
1972: .seealso: TaoSetOptionsPrefix(), TaoGetOptionsPrefix()
1973: @*/
1974: PetscErrorCode TaoAppendOptionsPrefix(Tao tao, const char p[])
1975: {
1977: PetscObjectAppendOptionsPrefix((PetscObject)tao,p);
1978: if (tao->linesearch) {
1979: PetscObjectAppendOptionsPrefix((PetscObject)tao->linesearch,p);
1980: }
1981: if (tao->ksp) {
1982: KSPAppendOptionsPrefix(tao->ksp,p);
1983: }
1984: return 0;
1985: }
1987: /*@C
1988: TaoGetOptionsPrefix - Gets the prefix used for searching for all
1989: TAO options in the database
1991: Not Collective
1993: Input Parameters:
1994: . tao - the Tao context
1996: Output Parameters:
1997: . prefix - pointer to the prefix string used is returned
1999: Notes:
2000: On the fortran side, the user should pass in a string 'prefix' of
2001: sufficient length to hold the prefix.
2003: Level: advanced
2005: .seealso: TaoSetOptionsPrefix(), TaoAppendOptionsPrefix()
2006: @*/
2007: PetscErrorCode TaoGetOptionsPrefix(Tao tao, const char *p[])
2008: {
2010: PetscObjectGetOptionsPrefix((PetscObject)tao,p);
2011: return 0;
2012: }
2014: /*@C
2015: TaoSetType - Sets the method for the unconstrained minimization solver.
2017: Collective on Tao
2019: Input Parameters:
2020: + solver - the Tao solver context
2021: - type - a known method
2023: Options Database Key:
2024: . -tao_type <type> - Sets the method; use -help for a list
2025: of available methods (for instance, "-tao_type lmvm" or "-tao_type tron")
2027: Available methods include:
2028: + nls - Newton's method with line search for unconstrained minimization
2029: . ntr - Newton's method with trust region for unconstrained minimization
2030: . ntl - Newton's method with trust region, line search for unconstrained minimization
2031: . lmvm - Limited memory variable metric method for unconstrained minimization
2032: . cg - Nonlinear conjugate gradient method for unconstrained minimization
2033: . nm - Nelder-Mead algorithm for derivate-free unconstrained minimization
2034: . tron - Newton Trust Region method for bound constrained minimization
2035: . gpcg - Newton Trust Region method for quadratic bound constrained minimization
2036: . blmvm - Limited memory variable metric method for bound constrained minimization
2037: - pounders - Model-based algorithm pounder extended for nonlinear least squares
2039: Level: intermediate
2041: .seealso: TaoCreate(), TaoGetType(), TaoType
2043: @*/
2044: PetscErrorCode TaoSetType(Tao tao, TaoType type)
2045: {
2046: PetscErrorCode (*create_xxx)(Tao);
2047: PetscBool issame;
2051: PetscObjectTypeCompare((PetscObject)tao,type,&issame);
2052: if (issame) return 0;
2054: PetscFunctionListFind(TaoList, type, (void(**)(void))&create_xxx);
2057: /* Destroy the existing solver information */
2058: if (tao->ops->destroy) {
2059: (*tao->ops->destroy)(tao);
2060: }
2061: KSPDestroy(&tao->ksp);
2062: TaoLineSearchDestroy(&tao->linesearch);
2063: VecDestroy(&tao->gradient);
2064: VecDestroy(&tao->stepdirection);
2066: tao->ops->setup = NULL;
2067: tao->ops->solve = NULL;
2068: tao->ops->view = NULL;
2069: tao->ops->setfromoptions = NULL;
2070: tao->ops->destroy = NULL;
2072: tao->setupcalled = PETSC_FALSE;
2074: (*create_xxx)(tao);
2075: PetscObjectChangeTypeName((PetscObject)tao,type);
2076: return 0;
2077: }
2079: /*MC
2080: TaoRegister - Adds a method to the TAO package for unconstrained minimization.
2082: Synopsis:
2083: TaoRegister(char *name_solver,char *path,char *name_Create,PetscErrorCode (*routine_Create)(Tao))
2085: Not collective
2087: Input Parameters:
2088: + sname - name of a new user-defined solver
2089: - func - routine to Create method context
2091: Notes:
2092: TaoRegister() may be called multiple times to add several user-defined solvers.
2094: Sample usage:
2095: .vb
2096: TaoRegister("my_solver",MySolverCreate);
2097: .ve
2099: Then, your solver can be chosen with the procedural interface via
2100: $ TaoSetType(tao,"my_solver")
2101: or at runtime via the option
2102: $ -tao_type my_solver
2104: Level: advanced
2106: .seealso: TaoRegisterAll(), TaoRegisterDestroy()
2107: M*/
2108: PetscErrorCode TaoRegister(const char sname[], PetscErrorCode (*func)(Tao))
2109: {
2110: TaoInitializePackage();
2111: PetscFunctionListAdd(&TaoList,sname,(void (*)(void))func);
2112: return 0;
2113: }
2115: /*@C
2116: TaoRegisterDestroy - Frees the list of minimization solvers that were
2117: registered by TaoRegisterDynamic().
2119: Not Collective
2121: Level: advanced
2123: .seealso: TaoRegisterAll(), TaoRegister()
2124: @*/
2125: PetscErrorCode TaoRegisterDestroy(void)
2126: {
2127: PetscFunctionListDestroy(&TaoList);
2128: TaoRegisterAllCalled = PETSC_FALSE;
2129: return 0;
2130: }
2132: /*@
2133: TaoGetIterationNumber - Gets the number of Tao iterations completed
2134: at this time.
2136: Not Collective
2138: Input Parameter:
2139: . tao - Tao context
2141: Output Parameter:
2142: . iter - iteration number
2144: Notes:
2145: For example, during the computation of iteration 2 this would return 1.
2147: Level: intermediate
2149: .seealso: TaoGetLinearSolveIterations(), TaoGetResidualNorm(), TaoGetObjective()
2150: @*/
2151: PetscErrorCode TaoGetIterationNumber(Tao tao,PetscInt *iter)
2152: {
2155: *iter = tao->niter;
2156: return 0;
2157: }
2159: /*@
2160: TaoGetResidualNorm - Gets the current value of the norm of the residual
2161: at this time.
2163: Not Collective
2165: Input Parameter:
2166: . tao - Tao context
2168: Output Parameter:
2169: . value - the current value
2171: Level: intermediate
2173: Developer Note: This is the 2-norm of the residual, we cannot use TaoGetGradientNorm() because that has
2174: a different meaning. For some reason Tao sometimes calls the gradient the residual.
2176: .seealso: TaoGetLinearSolveIterations(), TaoGetIterationNumber(), TaoGetObjective()
2177: @*/
2178: PetscErrorCode TaoGetResidualNorm(Tao tao,PetscReal *value)
2179: {
2182: *value = tao->residual;
2183: return 0;
2184: }
2186: /*@
2187: TaoSetIterationNumber - Sets the current iteration number.
2189: Logically Collective on Tao
2191: Input Parameters:
2192: + tao - Tao context
2193: - iter - iteration number
2195: Level: developer
2197: .seealso: TaoGetLinearSolveIterations()
2198: @*/
2199: PetscErrorCode TaoSetIterationNumber(Tao tao,PetscInt iter)
2200: {
2203: PetscObjectSAWsTakeAccess((PetscObject)tao);
2204: tao->niter = iter;
2205: PetscObjectSAWsGrantAccess((PetscObject)tao);
2206: return 0;
2207: }
2209: /*@
2210: TaoGetTotalIterationNumber - Gets the total number of Tao iterations
2211: completed. This number keeps accumulating if multiple solves
2212: are called with the Tao object.
2214: Not Collective
2216: Input Parameter:
2217: . tao - Tao context
2219: Output Parameter:
2220: . iter - iteration number
2222: Notes:
2223: The total iteration count is updated after each solve, if there is a current
2224: TaoSolve() in progress then those iterations are not yet counted.
2226: Level: intermediate
2228: .seealso: TaoGetLinearSolveIterations()
2229: @*/
2230: PetscErrorCode TaoGetTotalIterationNumber(Tao tao,PetscInt *iter)
2231: {
2234: *iter = tao->ntotalits;
2235: return 0;
2236: }
2238: /*@
2239: TaoSetTotalIterationNumber - Sets the current total iteration number.
2241: Logically Collective on Tao
2243: Input Parameters:
2244: + tao - Tao context
2245: - iter - iteration number
2247: Level: developer
2249: .seealso: TaoGetLinearSolveIterations()
2250: @*/
2251: PetscErrorCode TaoSetTotalIterationNumber(Tao tao,PetscInt iter)
2252: {
2255: PetscObjectSAWsTakeAccess((PetscObject)tao);
2256: tao->ntotalits = iter;
2257: PetscObjectSAWsGrantAccess((PetscObject)tao);
2258: return 0;
2259: }
2261: /*@
2262: TaoSetConvergedReason - Sets the termination flag on a Tao object
2264: Logically Collective on Tao
2266: Input Parameters:
2267: + tao - the Tao context
2268: - reason - one of
2269: $ TAO_CONVERGED_ATOL (2),
2270: $ TAO_CONVERGED_RTOL (3),
2271: $ TAO_CONVERGED_STEPTOL (4),
2272: $ TAO_CONVERGED_MINF (5),
2273: $ TAO_CONVERGED_USER (6),
2274: $ TAO_DIVERGED_MAXITS (-2),
2275: $ TAO_DIVERGED_NAN (-4),
2276: $ TAO_DIVERGED_MAXFCN (-5),
2277: $ TAO_DIVERGED_LS_FAILURE (-6),
2278: $ TAO_DIVERGED_TR_REDUCTION (-7),
2279: $ TAO_DIVERGED_USER (-8),
2280: $ TAO_CONTINUE_ITERATING (0)
2282: Level: intermediate
2284: @*/
2285: PetscErrorCode TaoSetConvergedReason(Tao tao, TaoConvergedReason reason)
2286: {
2289: tao->reason = reason;
2290: return 0;
2291: }
2293: /*@
2294: TaoGetConvergedReason - Gets the reason the Tao iteration was stopped.
2296: Not Collective
2298: Input Parameter:
2299: . tao - the Tao solver context
2301: Output Parameter:
2302: . reason - one of
2303: $ TAO_CONVERGED_GATOL (3) ||g(X)|| < gatol
2304: $ TAO_CONVERGED_GRTOL (4) ||g(X)|| / f(X) < grtol
2305: $ TAO_CONVERGED_GTTOL (5) ||g(X)|| / ||g(X0)|| < gttol
2306: $ TAO_CONVERGED_STEPTOL (6) step size small
2307: $ TAO_CONVERGED_MINF (7) F < F_min
2308: $ TAO_CONVERGED_USER (8) User defined
2309: $ TAO_DIVERGED_MAXITS (-2) its > maxits
2310: $ TAO_DIVERGED_NAN (-4) Numerical problems
2311: $ TAO_DIVERGED_MAXFCN (-5) fevals > max_funcsals
2312: $ TAO_DIVERGED_LS_FAILURE (-6) line search failure
2313: $ TAO_DIVERGED_TR_REDUCTION (-7) trust region failure
2314: $ TAO_DIVERGED_USER(-8) (user defined)
2315: $ TAO_CONTINUE_ITERATING (0)
2317: where
2318: + X - current solution
2319: . X0 - initial guess
2320: . f(X) - current function value
2321: . f(X*) - true solution (estimated)
2322: . g(X) - current gradient
2323: . its - current iterate number
2324: . maxits - maximum number of iterates
2325: . fevals - number of function evaluations
2326: - max_funcsals - maximum number of function evaluations
2328: Level: intermediate
2330: .seealso: TaoSetConvergenceTest(), TaoSetTolerances()
2332: @*/
2333: PetscErrorCode TaoGetConvergedReason(Tao tao, TaoConvergedReason *reason)
2334: {
2337: *reason = tao->reason;
2338: return 0;
2339: }
2341: /*@
2342: TaoGetSolutionStatus - Get the current iterate, objective value,
2343: residual, infeasibility, and termination
2345: Not Collective
2347: Input Parameter:
2348: . tao - the Tao context
2350: Output Parameters:
2351: + iterate - the current iterate number (>=0)
2352: . f - the current function value
2353: . gnorm - the square of the gradient norm, duality gap, or other measure indicating distance from optimality.
2354: . cnorm - the infeasibility of the current solution with regard to the constraints.
2355: . xdiff - the step length or trust region radius of the most recent iterate.
2356: - reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2358: Level: intermediate
2360: Note:
2361: TAO returns the values set by the solvers in the routine TaoMonitor().
2363: Note:
2364: If any of the output arguments are set to NULL, no corresponding value will be returned.
2366: .seealso: TaoMonitor(), TaoGetConvergedReason()
2367: @*/
2368: PetscErrorCode TaoGetSolutionStatus(Tao tao, PetscInt *its, PetscReal *f, PetscReal *gnorm, PetscReal *cnorm, PetscReal *xdiff, TaoConvergedReason *reason)
2369: {
2371: if (its) *its = tao->niter;
2372: if (f) *f = tao->fc;
2373: if (gnorm) *gnorm = tao->residual;
2374: if (cnorm) *cnorm = tao->cnorm;
2375: if (reason) *reason = tao->reason;
2376: if (xdiff) *xdiff = tao->step;
2377: return 0;
2378: }
2380: /*@C
2381: TaoGetType - Gets the current Tao algorithm.
2383: Not Collective
2385: Input Parameter:
2386: . tao - the Tao solver context
2388: Output Parameter:
2389: . type - Tao method
2391: Level: intermediate
2393: @*/
2394: PetscErrorCode TaoGetType(Tao tao,TaoType *type)
2395: {
2398: *type = ((PetscObject)tao)->type_name;
2399: return 0;
2400: }
2402: /*@C
2403: TaoMonitor - Monitor the solver and the current solution. This
2404: routine will record the iteration number and residual statistics,
2405: call any monitors specified by the user, and calls the convergence-check routine.
2407: Input Parameters:
2408: + tao - the Tao context
2409: . its - the current iterate number (>=0)
2410: . f - the current objective function value
2411: . res - the gradient norm, square root of the duality gap, or other measure indicating distince from optimality. This measure will be recorded and
2412: used for some termination tests.
2413: . cnorm - the infeasibility of the current solution with regard to the constraints.
2414: - steplength - multiple of the step direction added to the previous iterate.
2416: Output Parameters:
2417: . reason - The termination reason, which can equal TAO_CONTINUE_ITERATING
2419: Options Database Key:
2420: . -tao_monitor - Use the default monitor, which prints statistics to standard output
2422: .seealso TaoGetConvergedReason(), TaoMonitorDefault(), TaoSetMonitor()
2424: Level: developer
2426: @*/
2427: PetscErrorCode TaoMonitor(Tao tao, PetscInt its, PetscReal f, PetscReal res, PetscReal cnorm, PetscReal steplength)
2428: {
2429: PetscInt i;
2432: tao->fc = f;
2433: tao->residual = res;
2434: tao->cnorm = cnorm;
2435: tao->step = steplength;
2436: if (!its) {
2437: tao->cnorm0 = cnorm;
2438: tao->gnorm0 = res;
2439: }
2441: for (i=0;i<tao->numbermonitors;i++) {
2442: (*tao->monitor[i])(tao,tao->monitorcontext[i]);
2443: }
2444: return 0;
2445: }
2447: /*@
2448: TaoSetConvergenceHistory - Sets the array used to hold the convergence history.
2450: Logically Collective on Tao
2452: Input Parameters:
2453: + tao - the Tao solver context
2454: . obj - array to hold objective value history
2455: . resid - array to hold residual history
2456: . cnorm - array to hold constraint violation history
2457: . lits - integer array holds the number of linear iterations for each Tao iteration
2458: . na - size of obj, resid, and cnorm
2459: - reset - PetscTrue indicates each new minimization resets the history counter to zero,
2460: else it continues storing new values for new minimizations after the old ones
2462: Notes:
2463: If set, TAO will fill the given arrays with the indicated
2464: information at each iteration. If 'obj','resid','cnorm','lits' are
2465: *all* NULL then space (using size na, or 1000 if na is PETSC_DECIDE or
2466: PETSC_DEFAULT) is allocated for the history.
2467: If not all are NULL, then only the non-NULL information categories
2468: will be stored, the others will be ignored.
2470: Any convergence information after iteration number 'na' will not be stored.
2472: This routine is useful, e.g., when running a code for purposes
2473: of accurate performance monitoring, when no I/O should be done
2474: during the section of code that is being timed.
2476: Level: intermediate
2478: .seealso: TaoGetConvergenceHistory()
2480: @*/
2481: PetscErrorCode TaoSetConvergenceHistory(Tao tao, PetscReal obj[], PetscReal resid[], PetscReal cnorm[], PetscInt lits[], PetscInt na,PetscBool reset)
2482: {
2489: if (na == PETSC_DECIDE || na == PETSC_DEFAULT) na = 1000;
2490: if (!obj && !resid && !cnorm && !lits) {
2491: PetscCalloc4(na,&obj,na,&resid,na,&cnorm,na,&lits);
2492: tao->hist_malloc = PETSC_TRUE;
2493: }
2495: tao->hist_obj = obj;
2496: tao->hist_resid = resid;
2497: tao->hist_cnorm = cnorm;
2498: tao->hist_lits = lits;
2499: tao->hist_max = na;
2500: tao->hist_reset = reset;
2501: tao->hist_len = 0;
2502: return 0;
2503: }
2505: /*@C
2506: TaoGetConvergenceHistory - Gets the arrays used to hold the convergence history.
2508: Collective on Tao
2510: Input Parameter:
2511: . tao - the Tao context
2513: Output Parameters:
2514: + obj - array used to hold objective value history
2515: . resid - array used to hold residual history
2516: . cnorm - array used to hold constraint violation history
2517: . lits - integer array used to hold linear solver iteration count
2518: - nhist - size of obj, resid, cnorm, and lits
2520: Notes:
2521: This routine must be preceded by calls to TaoSetConvergenceHistory()
2522: and TaoSolve(), otherwise it returns useless information.
2524: The calling sequence for this routine in Fortran is
2525: $ call TaoGetConvergenceHistory(Tao tao, PetscInt nhist, PetscErrorCode ierr)
2527: This routine is useful, e.g., when running a code for purposes
2528: of accurate performance monitoring, when no I/O should be done
2529: during the section of code that is being timed.
2531: Level: advanced
2533: .seealso: TaoSetConvergenceHistory()
2535: @*/
2536: PetscErrorCode TaoGetConvergenceHistory(Tao tao, PetscReal **obj, PetscReal **resid, PetscReal **cnorm, PetscInt **lits, PetscInt *nhist)
2537: {
2539: if (obj) *obj = tao->hist_obj;
2540: if (cnorm) *cnorm = tao->hist_cnorm;
2541: if (resid) *resid = tao->hist_resid;
2542: if (nhist) *nhist = tao->hist_len;
2543: return 0;
2544: }
2546: /*@
2547: TaoSetApplicationContext - Sets the optional user-defined context for
2548: a solver.
2550: Logically Collective on Tao
2552: Input Parameters:
2553: + tao - the Tao context
2554: - usrP - optional user context
2556: Level: intermediate
2558: .seealso: TaoGetApplicationContext(), TaoSetApplicationContext()
2559: @*/
2560: PetscErrorCode TaoSetApplicationContext(Tao tao,void *usrP)
2561: {
2563: tao->user = usrP;
2564: return 0;
2565: }
2567: /*@
2568: TaoGetApplicationContext - Gets the user-defined context for a
2569: TAO solvers.
2571: Not Collective
2573: Input Parameter:
2574: . tao - Tao context
2576: Output Parameter:
2577: . usrP - user context
2579: Level: intermediate
2581: .seealso: TaoSetApplicationContext()
2582: @*/
2583: PetscErrorCode TaoGetApplicationContext(Tao tao,void *usrP)
2584: {
2587: *(void**)usrP = tao->user;
2588: return 0;
2589: }
2591: /*@
2592: TaoSetGradientNorm - Sets the matrix used to define the inner product that measures the size of the gradient.
2594: Collective on tao
2596: Input Parameters:
2597: + tao - the Tao context
2598: - M - gradient norm
2600: Level: beginner
2602: .seealso: TaoGetGradientNorm(), TaoGradientNorm()
2603: @*/
2604: PetscErrorCode TaoSetGradientNorm(Tao tao, Mat M)
2605: {
2608: PetscObjectReference((PetscObject)M);
2609: MatDestroy(&tao->gradient_norm);
2610: VecDestroy(&tao->gradient_norm_tmp);
2611: tao->gradient_norm = M;
2612: MatCreateVecs(M, NULL, &tao->gradient_norm_tmp);
2613: return 0;
2614: }
2616: /*@
2617: TaoGetGradientNorm - Returns the matrix used to define the inner product for measuring the size of the gradient.
2619: Not Collective
2621: Input Parameter:
2622: . tao - Tao context
2624: Output Parameter:
2625: . M - gradient norm
2627: Level: beginner
2629: .seealso: TaoSetGradientNorm(), TaoGradientNorm()
2630: @*/
2631: PetscErrorCode TaoGetGradientNorm(Tao tao, Mat *M)
2632: {
2635: *M = tao->gradient_norm;
2636: return 0;
2637: }
2639: /*@C
2640: TaoGradientNorm - Compute the norm with respect to the inner product the user has set.
2642: Collective on tao
2644: Input Parameters:
2645: + tao - the Tao context
2646: . gradient - the gradient to be computed
2647: - norm - the norm type
2649: Output Parameter:
2650: . gnorm - the gradient norm
2652: Level: developer
2654: .seealso: TaoSetGradientNorm(), TaoGetGradientNorm()
2655: @*/
2656: PetscErrorCode TaoGradientNorm(Tao tao, Vec gradient, NormType type, PetscReal *gnorm)
2657: {
2662: if (tao->gradient_norm) {
2663: PetscScalar gnorms;
2666: MatMult(tao->gradient_norm, gradient, tao->gradient_norm_tmp);
2667: VecDot(gradient, tao->gradient_norm_tmp, &gnorms);
2668: *gnorm = PetscRealPart(PetscSqrtScalar(gnorms));
2669: } else {
2670: VecNorm(gradient, type, gnorm);
2671: }
2672: return 0;
2673: }
2675: /*@C
2676: TaoMonitorDrawCtxCreate - Creates the monitor context for TaoMonitorDrawCtx
2678: Collective on Tao
2680: Output Patameter:
2681: . ctx - the monitor context
2683: Options Database:
2684: . -tao_draw_solution_initial - show initial guess as well as current solution
2686: Level: intermediate
2688: .seealso: TaoMonitorSet(), TaoMonitorDefault(), VecView(), TaoMonitorDrawCtx()
2689: @*/
2690: PetscErrorCode TaoMonitorDrawCtxCreate(MPI_Comm comm,const char host[],const char label[],int x,int y,int m,int n,PetscInt howoften,TaoMonitorDrawCtx *ctx)
2691: {
2692: PetscNew(ctx);
2693: PetscViewerDrawOpen(comm,host,label,x,y,m,n,&(*ctx)->viewer);
2694: PetscViewerSetFromOptions((*ctx)->viewer);
2695: (*ctx)->howoften = howoften;
2696: return 0;
2697: }
2699: /*@C
2700: TaoMonitorDrawCtxDestroy - Destroys the monitor context for TaoMonitorDrawSolution()
2702: Collective on Tao
2704: Input Parameters:
2705: . ctx - the monitor context
2707: Level: intermediate
2709: .seealso: TaoMonitorSet(), TaoMonitorDefault(), VecView(), TaoMonitorDrawSolution()
2710: @*/
2711: PetscErrorCode TaoMonitorDrawCtxDestroy(TaoMonitorDrawCtx *ictx)
2712: {
2713: PetscViewerDestroy(&(*ictx)->viewer);
2714: PetscFree(*ictx);
2715: return 0;
2716: }