Actual source code: taocg.c
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/unconstrained/impls/cg/taocg.h>
4: #define CG_FletcherReeves 0
5: #define CG_PolakRibiere 1
6: #define CG_PolakRibierePlus 2
7: #define CG_HestenesStiefel 3
8: #define CG_DaiYuan 4
9: #define CG_Types 5
11: static const char *CG_Table[64] = {"fr", "pr", "prp", "hs", "dy"};
13: static PetscErrorCode TaoSolve_CG(Tao tao)
14: {
15: TAO_CG *cgP = (TAO_CG *)tao->data;
16: TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
17: PetscReal step = 1.0, f, gnorm, gnorm2, delta, gd, ginner, beta;
18: PetscReal gd_old, gnorm2_old, f_old;
20: PetscFunctionBegin;
21: if (tao->XL || tao->XU || tao->ops->computebounds) PetscCall(PetscInfo(tao, "WARNING: Variable bounds have been set but will be ignored by cg algorithm\n"));
23: /* Check convergence criteria */
24: PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient));
25: PetscCall(VecNorm(tao->gradient, NORM_2, &gnorm));
26: PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
28: tao->reason = TAO_CONTINUE_ITERATING;
29: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
30: PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step));
31: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
32: if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
34: /* Set initial direction to -gradient */
35: PetscCall(VecCopy(tao->gradient, tao->stepdirection));
36: PetscCall(VecScale(tao->stepdirection, -1.0));
37: gnorm2 = gnorm * gnorm;
39: /* Set initial scaling for the function */
40: if (f != 0.0) {
41: delta = 2.0 * PetscAbsScalar(f) / gnorm2;
42: delta = PetscMax(delta, cgP->delta_min);
43: delta = PetscMin(delta, cgP->delta_max);
44: } else {
45: delta = 2.0 / gnorm2;
46: delta = PetscMax(delta, cgP->delta_min);
47: delta = PetscMin(delta, cgP->delta_max);
48: }
49: /* Set counter for gradient and reset steps */
50: cgP->ngradsteps = 0;
51: cgP->nresetsteps = 0;
53: while (1) {
54: /* Call general purpose update function */
55: PetscTryTypeMethod(tao, update, tao->niter, tao->user_update);
57: /* Save the current gradient information */
58: f_old = f;
59: gnorm2_old = gnorm2;
60: PetscCall(VecCopy(tao->solution, cgP->X_old));
61: PetscCall(VecCopy(tao->gradient, cgP->G_old));
62: PetscCall(VecDot(tao->gradient, tao->stepdirection, &gd));
63: if ((gd >= 0) || PetscIsInfOrNanReal(gd)) {
64: ++cgP->ngradsteps;
65: if (f != 0.0) {
66: delta = 2.0 * PetscAbsScalar(f) / gnorm2;
67: delta = PetscMax(delta, cgP->delta_min);
68: delta = PetscMin(delta, cgP->delta_max);
69: } else {
70: delta = 2.0 / gnorm2;
71: delta = PetscMax(delta, cgP->delta_min);
72: delta = PetscMin(delta, cgP->delta_max);
73: }
75: PetscCall(VecCopy(tao->gradient, tao->stepdirection));
76: PetscCall(VecScale(tao->stepdirection, -1.0));
77: }
79: /* Search direction for improving point */
80: PetscCall(TaoLineSearchSetInitialStepLength(tao->linesearch, delta));
81: PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_status));
82: PetscCall(TaoAddLineSearchCounts(tao));
83: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
84: /* Linesearch failed */
85: /* Reset factors and use scaled gradient step */
86: ++cgP->nresetsteps;
87: f = f_old;
88: gnorm2 = gnorm2_old;
89: PetscCall(VecCopy(cgP->X_old, tao->solution));
90: PetscCall(VecCopy(cgP->G_old, tao->gradient));
92: if (f != 0.0) {
93: delta = 2.0 * PetscAbsScalar(f) / gnorm2;
94: delta = PetscMax(delta, cgP->delta_min);
95: delta = PetscMin(delta, cgP->delta_max);
96: } else {
97: delta = 2.0 / gnorm2;
98: delta = PetscMax(delta, cgP->delta_min);
99: delta = PetscMin(delta, cgP->delta_max);
100: }
102: PetscCall(VecCopy(tao->gradient, tao->stepdirection));
103: PetscCall(VecScale(tao->stepdirection, -1.0));
105: PetscCall(TaoLineSearchSetInitialStepLength(tao->linesearch, delta));
106: PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_status));
107: PetscCall(TaoAddLineSearchCounts(tao));
109: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
110: /* Linesearch failed again */
111: /* switch to unscaled gradient */
112: f = f_old;
113: PetscCall(VecCopy(cgP->X_old, tao->solution));
114: PetscCall(VecCopy(cgP->G_old, tao->gradient));
115: delta = 1.0;
116: PetscCall(VecCopy(tao->solution, tao->stepdirection));
117: PetscCall(VecScale(tao->stepdirection, -1.0));
119: PetscCall(TaoLineSearchSetInitialStepLength(tao->linesearch, delta));
120: PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_status));
121: PetscCall(TaoAddLineSearchCounts(tao));
122: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
123: /* Line search failed for last time -- give up */
124: f = f_old;
125: PetscCall(VecCopy(cgP->X_old, tao->solution));
126: PetscCall(VecCopy(cgP->G_old, tao->gradient));
127: step = 0.0;
128: tao->reason = TAO_DIVERGED_LS_FAILURE;
129: }
130: }
131: }
133: /* Check for bad value */
134: PetscCall(VecNorm(tao->gradient, NORM_2, &gnorm));
135: PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User-provided compute function generated Inf or NaN");
137: /* Check for termination */
138: gnorm2 = gnorm * gnorm;
139: tao->niter++;
140: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
141: PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step));
142: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
143: if (tao->reason != TAO_CONTINUE_ITERATING) break;
145: /* Check for restart condition */
146: PetscCall(VecDot(tao->gradient, cgP->G_old, &ginner));
147: if (PetscAbsScalar(ginner) >= cgP->eta * gnorm2) {
148: /* Gradients far from orthogonal; use steepest descent direction */
149: beta = 0.0;
150: } else {
151: /* Gradients close to orthogonal; use conjugate gradient formula */
152: switch (cgP->cg_type) {
153: case CG_FletcherReeves:
154: beta = gnorm2 / gnorm2_old;
155: break;
157: case CG_PolakRibiere:
158: beta = (gnorm2 - ginner) / gnorm2_old;
159: break;
161: case CG_PolakRibierePlus:
162: beta = PetscMax((gnorm2 - ginner) / gnorm2_old, 0.0);
163: break;
165: case CG_HestenesStiefel:
166: PetscCall(VecDot(tao->gradient, tao->stepdirection, &gd));
167: PetscCall(VecDot(cgP->G_old, tao->stepdirection, &gd_old));
168: beta = (gnorm2 - ginner) / (gd - gd_old);
169: break;
171: case CG_DaiYuan:
172: PetscCall(VecDot(tao->gradient, tao->stepdirection, &gd));
173: PetscCall(VecDot(cgP->G_old, tao->stepdirection, &gd_old));
174: beta = gnorm2 / (gd - gd_old);
175: break;
177: default:
178: beta = 0.0;
179: break;
180: }
181: }
183: /* Compute the direction d=-g + beta*d */
184: PetscCall(VecAXPBY(tao->stepdirection, -1.0, beta, tao->gradient));
186: /* update initial steplength choice */
187: delta = 1.0;
188: delta = PetscMax(delta, cgP->delta_min);
189: delta = PetscMin(delta, cgP->delta_max);
190: }
191: PetscFunctionReturn(PETSC_SUCCESS);
192: }
194: static PetscErrorCode TaoSetUp_CG(Tao tao)
195: {
196: TAO_CG *cgP = (TAO_CG *)tao->data;
198: PetscFunctionBegin;
199: if (!tao->gradient) PetscCall(VecDuplicate(tao->solution, &tao->gradient));
200: if (!tao->stepdirection) PetscCall(VecDuplicate(tao->solution, &tao->stepdirection));
201: if (!cgP->X_old) PetscCall(VecDuplicate(tao->solution, &cgP->X_old));
202: if (!cgP->G_old) PetscCall(VecDuplicate(tao->gradient, &cgP->G_old));
203: PetscFunctionReturn(PETSC_SUCCESS);
204: }
206: static PetscErrorCode TaoDestroy_CG(Tao tao)
207: {
208: TAO_CG *cgP = (TAO_CG *)tao->data;
210: PetscFunctionBegin;
211: if (tao->setupcalled) {
212: PetscCall(VecDestroy(&cgP->X_old));
213: PetscCall(VecDestroy(&cgP->G_old));
214: }
215: PetscCall(TaoLineSearchDestroy(&tao->linesearch));
216: PetscCall(PetscFree(tao->data));
217: PetscFunctionReturn(PETSC_SUCCESS);
218: }
220: static PetscErrorCode TaoSetFromOptions_CG(Tao tao, PetscOptionItems *PetscOptionsObject)
221: {
222: TAO_CG *cgP = (TAO_CG *)tao->data;
224: PetscFunctionBegin;
225: PetscCall(TaoLineSearchSetFromOptions(tao->linesearch));
226: PetscOptionsHeadBegin(PetscOptionsObject, "Nonlinear Conjugate Gradient method for unconstrained optimization");
227: PetscCall(PetscOptionsReal("-tao_cg_eta", "restart tolerance", "", cgP->eta, &cgP->eta, NULL));
228: PetscCall(PetscOptionsEList("-tao_cg_type", "cg formula", "", CG_Table, CG_Types, CG_Table[cgP->cg_type], &cgP->cg_type, NULL));
229: PetscCall(PetscOptionsReal("-tao_cg_delta_min", "minimum delta value", "", cgP->delta_min, &cgP->delta_min, NULL));
230: PetscCall(PetscOptionsReal("-tao_cg_delta_max", "maximum delta value", "", cgP->delta_max, &cgP->delta_max, NULL));
231: PetscOptionsHeadEnd();
232: PetscFunctionReturn(PETSC_SUCCESS);
233: }
235: static PetscErrorCode TaoView_CG(Tao tao, PetscViewer viewer)
236: {
237: PetscBool isascii;
238: TAO_CG *cgP = (TAO_CG *)tao->data;
240: PetscFunctionBegin;
241: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
242: if (isascii) {
243: PetscCall(PetscViewerASCIIPushTab(viewer));
244: PetscCall(PetscViewerASCIIPrintf(viewer, "CG Type: %s\n", CG_Table[cgP->cg_type]));
245: PetscCall(PetscViewerASCIIPrintf(viewer, "Gradient steps: %" PetscInt_FMT "\n", cgP->ngradsteps));
246: PetscCall(PetscViewerASCIIPrintf(viewer, "Reset steps: %" PetscInt_FMT "\n", cgP->nresetsteps));
247: PetscCall(PetscViewerASCIIPopTab(viewer));
248: }
249: PetscFunctionReturn(PETSC_SUCCESS);
250: }
252: /*MC
253: TAOCG - Nonlinear conjugate gradient method is an extension of the
254: nonlinear conjugate gradient solver for nonlinear optimization.
256: Options Database Keys:
257: + -tao_cg_eta <r> - restart tolerance
258: . -tao_cg_type <taocg_type> - cg formula
259: . -tao_cg_delta_min <r> - minimum delta value
260: - -tao_cg_delta_max <r> - maximum delta value
262: Notes:
263: CG formulas are:
264: "fr" - Fletcher-Reeves
265: "pr" - Polak-Ribiere
266: "prp" - Polak-Ribiere-Plus
267: "hs" - Hestenes-Steifel
268: "dy" - Dai-Yuan
269: Level: beginner
270: M*/
272: PETSC_EXTERN PetscErrorCode TaoCreate_CG(Tao tao)
273: {
274: TAO_CG *cgP;
275: const char *morethuente_type = TAOLINESEARCHMT;
277: PetscFunctionBegin;
278: tao->ops->setup = TaoSetUp_CG;
279: tao->ops->solve = TaoSolve_CG;
280: tao->ops->view = TaoView_CG;
281: tao->ops->setfromoptions = TaoSetFromOptions_CG;
282: tao->ops->destroy = TaoDestroy_CG;
284: /* Override default settings (unless already changed) */
285: if (!tao->max_it_changed) tao->max_it = 2000;
286: if (!tao->max_funcs_changed) tao->max_funcs = 4000;
288: /* Note: nondefault values should be used for nonlinear conjugate gradient */
289: /* method. In particular, gtol should be less that 0.5; the value used in */
290: /* Nocedal and Wright is 0.10. We use the default values for the */
291: /* linesearch because it seems to work better. */
292: PetscCall(TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch));
293: PetscCall(PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1));
294: PetscCall(TaoLineSearchSetType(tao->linesearch, morethuente_type));
295: PetscCall(TaoLineSearchUseTaoRoutines(tao->linesearch, tao));
296: PetscCall(TaoLineSearchSetOptionsPrefix(tao->linesearch, tao->hdr.prefix));
298: PetscCall(PetscNew(&cgP));
299: tao->data = (void *)cgP;
300: cgP->eta = 0.1;
301: cgP->delta_min = 1e-7;
302: cgP->delta_max = 100;
303: cgP->cg_type = CG_PolakRibierePlus;
304: PetscFunctionReturn(PETSC_SUCCESS);
305: }