Actual source code: lmvm.c
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/unconstrained/impls/lmvm/lmvm.h>
4: #define LMVM_STEP_BFGS 0
5: #define LMVM_STEP_GRAD 1
7: static PetscErrorCode TaoSolve_LMVM(Tao tao)
8: {
9: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
10: PetscReal f, fold, gdx, gnorm;
11: PetscReal step = 1.0;
12: PetscInt stepType = LMVM_STEP_GRAD, nupdates;
13: TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
16: if (tao->XL || tao->XU || tao->ops->computebounds) {
17: PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by lmvm algorithm\n");
18: }
20: /* Check convergence criteria */
21: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
22: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
26: tao->reason = TAO_CONTINUE_ITERATING;
27: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
28: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
29: (*tao->ops->convergencetest)(tao,tao->cnvP);
30: if (tao->reason != TAO_CONTINUE_ITERATING) return 0;
32: /* Set counter for gradient/reset steps */
33: if (!lmP->recycle) {
34: lmP->bfgs = 0;
35: lmP->grad = 0;
36: MatLMVMReset(lmP->M, PETSC_FALSE);
37: }
39: /* Have not converged; continue with Newton method */
40: while (tao->reason == TAO_CONTINUE_ITERATING) {
41: /* Call general purpose update function */
42: if (tao->ops->update) {
43: (*tao->ops->update)(tao, tao->niter, tao->user_update);
44: }
46: /* Compute direction */
47: if (lmP->H0) {
48: MatLMVMSetJ0(lmP->M, lmP->H0);
49: stepType = LMVM_STEP_BFGS;
50: }
51: MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);
52: MatSolve(lmP->M, tao->gradient, lmP->D);
53: MatLMVMGetUpdateCount(lmP->M, &nupdates);
54: if (nupdates > 0) stepType = LMVM_STEP_BFGS;
56: /* Check for success (descent direction) */
57: VecDot(lmP->D, tao->gradient, &gdx);
58: if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
59: /* Step is not descent or direction produced not a number
60: We can assert bfgsUpdates > 1 in this case because
61: the first solve produces the scaled gradient direction,
62: which is guaranteed to be descent
64: Use steepest descent direction (scaled)
65: */
67: MatLMVMReset(lmP->M, PETSC_FALSE);
68: MatLMVMClearJ0(lmP->M);
69: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
70: MatSolve(lmP->M,tao->gradient, lmP->D);
72: /* On a reset, the direction cannot be not a number; it is a
73: scaled gradient step. No need to check for this condition. */
74: stepType = LMVM_STEP_GRAD;
75: }
76: VecScale(lmP->D, -1.0);
78: /* Perform the linesearch */
79: fold = f;
80: VecCopy(tao->solution, lmP->Xold);
81: VecCopy(tao->gradient, lmP->Gold);
83: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step,&ls_status);
84: TaoAddLineSearchCounts(tao);
86: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER && (stepType != LMVM_STEP_GRAD)) {
87: /* Reset factors and use scaled gradient step */
88: f = fold;
89: VecCopy(lmP->Xold, tao->solution);
90: VecCopy(lmP->Gold, tao->gradient);
92: /* Failed to obtain acceptable iterate with BFGS step */
93: /* Attempt to use the scaled gradient direction */
95: MatLMVMReset(lmP->M, PETSC_FALSE);
96: MatLMVMClearJ0(lmP->M);
97: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
98: MatSolve(lmP->M, tao->solution, tao->gradient);
100: /* On a reset, the direction cannot be not a number; it is a
101: scaled gradient step. No need to check for this condition. */
102: stepType = LMVM_STEP_GRAD;
103: VecScale(lmP->D, -1.0);
105: /* Perform the linesearch */
106: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step, &ls_status);
107: TaoAddLineSearchCounts(tao);
108: }
110: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
111: /* Failed to find an improving point */
112: f = fold;
113: VecCopy(lmP->Xold, tao->solution);
114: VecCopy(lmP->Gold, tao->gradient);
115: step = 0.0;
116: tao->reason = TAO_DIVERGED_LS_FAILURE;
117: } else {
118: /* LS found valid step, so tally up step type */
119: switch (stepType) {
120: case LMVM_STEP_BFGS:
121: ++lmP->bfgs;
122: break;
123: case LMVM_STEP_GRAD:
124: ++lmP->grad;
125: break;
126: default:
127: break;
128: }
129: /* Compute new gradient norm */
130: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
131: }
133: /* Check convergence */
134: tao->niter++;
135: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
136: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
137: (*tao->ops->convergencetest)(tao,tao->cnvP);
138: }
139: return 0;
140: }
142: static PetscErrorCode TaoSetUp_LMVM(Tao tao)
143: {
144: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
145: PetscInt n,N;
146: PetscBool is_spd;
148: /* Existence of tao->solution checked in TaoSetUp() */
149: if (!tao->gradient) VecDuplicate(tao->solution,&tao->gradient);
150: if (!tao->stepdirection) VecDuplicate(tao->solution,&tao->stepdirection);
151: if (!lmP->D) VecDuplicate(tao->solution,&lmP->D);
152: if (!lmP->Xold) VecDuplicate(tao->solution,&lmP->Xold);
153: if (!lmP->Gold) VecDuplicate(tao->solution,&lmP->Gold);
155: /* Create matrix for the limited memory approximation */
156: VecGetLocalSize(tao->solution,&n);
157: VecGetSize(tao->solution,&N);
158: MatSetSizes(lmP->M, n, n, N, N);
159: MatLMVMAllocate(lmP->M,tao->solution,tao->gradient);
160: MatGetOption(lmP->M, MAT_SPD, &is_spd);
163: /* If the user has set a matrix to solve as the initial H0, set the options prefix here, and set up the KSP */
164: if (lmP->H0) {
165: MatLMVMSetJ0(lmP->M, lmP->H0);
166: }
168: return 0;
169: }
171: /* ---------------------------------------------------------- */
172: static PetscErrorCode TaoDestroy_LMVM(Tao tao)
173: {
174: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
176: if (tao->setupcalled) {
177: VecDestroy(&lmP->Xold);
178: VecDestroy(&lmP->Gold);
179: VecDestroy(&lmP->D);
180: }
181: MatDestroy(&lmP->M);
182: if (lmP->H0) {
183: PetscObjectDereference((PetscObject)lmP->H0);
184: }
185: PetscFree(tao->data);
187: return 0;
188: }
190: /*------------------------------------------------------------*/
191: static PetscErrorCode TaoSetFromOptions_LMVM(PetscOptionItems *PetscOptionsObject,Tao tao)
192: {
193: TAO_LMVM *lm = (TAO_LMVM *)tao->data;
195: PetscOptionsHead(PetscOptionsObject,"Limited-memory variable-metric method for unconstrained optimization");
196: PetscOptionsBool("-tao_lmvm_recycle","enable recycling of the BFGS matrix between subsequent TaoSolve() calls","",lm->recycle,&lm->recycle,NULL);
197: TaoLineSearchSetFromOptions(tao->linesearch);
198: MatSetFromOptions(lm->M);
199: PetscOptionsTail();
200: return 0;
201: }
203: /*------------------------------------------------------------*/
204: static PetscErrorCode TaoView_LMVM(Tao tao, PetscViewer viewer)
205: {
206: TAO_LMVM *lm = (TAO_LMVM *)tao->data;
207: PetscBool isascii;
208: PetscInt recycled_its;
210: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
211: if (isascii) {
212: PetscViewerASCIIPrintf(viewer, " Gradient steps: %D\n", lm->grad);
213: if (lm->recycle) {
214: PetscViewerASCIIPrintf(viewer, " Recycle: on\n");
215: recycled_its = lm->bfgs + lm->grad;
216: PetscViewerASCIIPrintf(viewer, " Total recycled iterations: %D\n", recycled_its);
217: }
218: }
219: return 0;
220: }
222: /* ---------------------------------------------------------- */
224: /*MC
225: TAOLMVM - Limited Memory Variable Metric method is a quasi-Newton
226: optimization solver for unconstrained minimization. It solves
227: the Newton step
228: Hkdk = - gk
230: using an approximation Bk in place of Hk, where Bk is composed using
231: the BFGS update formula. A More-Thuente line search is then used
232: to computed the steplength in the dk direction
234: Options Database Keys:
235: + -tao_lmvm_recycle - enable recycling LMVM updates between TaoSolve() calls
236: - -tao_lmvm_no_scale - (developer) disables diagonal Broyden scaling on the LMVM approximation
238: Level: beginner
239: M*/
241: PETSC_EXTERN PetscErrorCode TaoCreate_LMVM(Tao tao)
242: {
243: TAO_LMVM *lmP;
244: const char *morethuente_type = TAOLINESEARCHMT;
246: tao->ops->setup = TaoSetUp_LMVM;
247: tao->ops->solve = TaoSolve_LMVM;
248: tao->ops->view = TaoView_LMVM;
249: tao->ops->setfromoptions = TaoSetFromOptions_LMVM;
250: tao->ops->destroy = TaoDestroy_LMVM;
252: PetscNewLog(tao,&lmP);
253: lmP->D = NULL;
254: lmP->M = NULL;
255: lmP->Xold = NULL;
256: lmP->Gold = NULL;
257: lmP->H0 = NULL;
258: lmP->recycle = PETSC_FALSE;
260: tao->data = (void*)lmP;
261: /* Override default settings (unless already changed) */
262: if (!tao->max_it_changed) tao->max_it = 2000;
263: if (!tao->max_funcs_changed) tao->max_funcs = 4000;
265: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
266: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
267: TaoLineSearchSetType(tao->linesearch,morethuente_type);
268: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
269: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
271: KSPInitializePackage();
272: MatCreate(((PetscObject)tao)->comm, &lmP->M);
273: PetscObjectIncrementTabLevel((PetscObject)lmP->M, (PetscObject)tao, 1);
274: MatSetType(lmP->M, MATLMVMBFGS);
275: MatSetOptionsPrefix(lmP->M, "tao_lmvm_");
276: return 0;
277: }