Actual source code: lmvm.c
petsc-3.10.5 2019-03-28
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/unconstrained/impls/lmvm/lmvm.h>
4: #define LMVM_STEP_BFGS 0
5: #define LMVM_STEP_GRAD 1
7: static PetscErrorCode TaoSolve_LMVM(Tao tao)
8: {
9: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
10: PetscReal f, fold, gdx, gnorm;
11: PetscReal step = 1.0;
12: PetscErrorCode ierr;
13: PetscInt stepType = LMVM_STEP_GRAD, nupdates;
14: TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
18: if (tao->XL || tao->XU || tao->ops->computebounds) {
19: PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by lmvm algorithm\n");
20: }
22: /* Check convergence criteria */
23: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
24: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
26: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
27:
28: tao->reason = TAO_CONTINUE_ITERATING;
29: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
30: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
31: (*tao->ops->convergencetest)(tao,tao->cnvP);
32: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
34: /* Set counter for gradient/reset steps */
35: if (!lmP->recycle) {
36: lmP->bfgs = 0;
37: lmP->grad = 0;
38: MatLMVMReset(lmP->M, PETSC_FALSE);
39: }
41: /* Have not converged; continue with Newton method */
42: while (tao->reason == TAO_CONTINUE_ITERATING) {
43: /* Compute direction */
44: if (lmP->H0) {
45: MatLMVMSetJ0(lmP->M, lmP->H0);
46: stepType = LMVM_STEP_BFGS;
47: }
48: MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);
49: MatSolve(lmP->M, tao->gradient, lmP->D);
50: MatLMVMGetUpdateCount(lmP->M, &nupdates);
51: if (nupdates > 0) stepType = LMVM_STEP_BFGS;
53: /* Check for success (descent direction) */
54: VecDot(lmP->D, tao->gradient, &gdx);
55: if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
56: /* Step is not descent or direction produced not a number
57: We can assert bfgsUpdates > 1 in this case because
58: the first solve produces the scaled gradient direction,
59: which is guaranteed to be descent
61: Use steepest descent direction (scaled)
62: */
63:
64: MatLMVMReset(lmP->M, PETSC_FALSE);
65: MatLMVMClearJ0(lmP->M);
66: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
67: MatSolve(lmP->M,tao->gradient, lmP->D);
69: /* On a reset, the direction cannot be not a number; it is a
70: scaled gradient step. No need to check for this condition. */
71: stepType = LMVM_STEP_GRAD;
72: }
73: VecScale(lmP->D, -1.0);
75: /* Perform the linesearch */
76: fold = f;
77: VecCopy(tao->solution, lmP->Xold);
78: VecCopy(tao->gradient, lmP->Gold);
80: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step,&ls_status);
81: TaoAddLineSearchCounts(tao);
83: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER && (stepType != LMVM_STEP_GRAD)) {
84: /* Reset factors and use scaled gradient step */
85: f = fold;
86: VecCopy(lmP->Xold, tao->solution);
87: VecCopy(lmP->Gold, tao->gradient);
89: /* Failed to obtain acceptable iterate with BFGS step */
90: /* Attempt to use the scaled gradient direction */
92: MatLMVMReset(lmP->M, PETSC_FALSE);
93: MatLMVMClearJ0(lmP->M);
94: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
95: MatSolve(lmP->M, tao->solution, tao->gradient);
97: /* On a reset, the direction cannot be not a number; it is a
98: scaled gradient step. No need to check for this condition. */
99: stepType = LMVM_STEP_GRAD;
100: VecScale(lmP->D, -1.0);
102: /* Perform the linesearch */
103: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step, &ls_status);
104: TaoAddLineSearchCounts(tao);
105: }
107: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
108: /* Failed to find an improving point */
109: f = fold;
110: VecCopy(lmP->Xold, tao->solution);
111: VecCopy(lmP->Gold, tao->gradient);
112: step = 0.0;
113: tao->reason = TAO_DIVERGED_LS_FAILURE;
114: } else {
115: /* LS found valid step, so tally up step type */
116: switch (stepType) {
117: case LMVM_STEP_BFGS:
118: ++lmP->bfgs;
119: break;
120: case LMVM_STEP_GRAD:
121: ++lmP->grad;
122: break;
123: default:
124: break;
125: }
126: /* Compute new gradient norm */
127: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
128: }
129:
130: /* Check convergence */
131: tao->niter++;
132: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
133: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
134: (*tao->ops->convergencetest)(tao,tao->cnvP);
135: }
136: return(0);
137: }
139: static PetscErrorCode TaoSetUp_LMVM(Tao tao)
140: {
141: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
142: PetscInt n,N;
144: PetscBool is_spd;
147: /* Existence of tao->solution checked in TaoSetUp() */
148: if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient); }
149: if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection); }
150: if (!lmP->D) {VecDuplicate(tao->solution,&lmP->D); }
151: if (!lmP->Xold) {VecDuplicate(tao->solution,&lmP->Xold); }
152: if (!lmP->Gold) {VecDuplicate(tao->solution,&lmP->Gold); }
154: /* Create matrix for the limited memory approximation */
155: VecGetLocalSize(tao->solution,&n);
156: VecGetSize(tao->solution,&N);
157: MatSetSizes(lmP->M, n, n, N, N);
158: MatLMVMAllocate(lmP->M,tao->solution,tao->gradient);
159: MatGetOption(lmP->M, MAT_SPD, &is_spd);
160: if (!is_spd) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix is not symmetric positive-definite.");
162: /* If the user has set a matrix to solve as the initial H0, set the options prefix here, and set up the KSP */
163: if (lmP->H0) {
164: MatLMVMSetJ0(lmP->M, lmP->H0);
165: }
167: return(0);
168: }
170: /* ---------------------------------------------------------- */
171: static PetscErrorCode TaoDestroy_LMVM(Tao tao)
172: {
173: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
177: if (tao->setupcalled) {
178: VecDestroy(&lmP->Xold);
179: VecDestroy(&lmP->Gold);
180: VecDestroy(&lmP->D);
181: }
182: MatDestroy(&lmP->M);
183: if (lmP->H0) {
184: PetscObjectDereference((PetscObject)lmP->H0);
185: }
186: PetscFree(tao->data);
188: return(0);
189: }
191: /*------------------------------------------------------------*/
192: static PetscErrorCode TaoSetFromOptions_LMVM(PetscOptionItems *PetscOptionsObject,Tao tao)
193: {
194: TAO_LMVM *lm = (TAO_LMVM *)tao->data;
198: PetscOptionsHead(PetscOptionsObject,"Limited-memory variable-metric method for unconstrained optimization");
199: PetscOptionsBool("-tao_lmvm_recycle","enable recycling of the BFGS matrix between subsequent TaoSolve() calls","",lm->recycle,&lm->recycle,NULL);
200: TaoLineSearchSetFromOptions(tao->linesearch);
201: MatSetFromOptions(lm->M);
202: PetscOptionsTail();
203: return(0);
204: }
206: /*------------------------------------------------------------*/
207: static PetscErrorCode TaoView_LMVM(Tao tao, PetscViewer viewer)
208: {
209: TAO_LMVM *lm = (TAO_LMVM *)tao->data;
210: PetscBool isascii;
211: PetscInt recycled_its;
215: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
216: if (isascii) {
217: PetscViewerASCIIPrintf(viewer, " Gradient steps: %D\n", lm->grad);
218: if (lm->recycle) {
219: PetscViewerASCIIPrintf(viewer, " Recycle: on\n");
220: recycled_its = lm->bfgs + lm->grad;
221: PetscViewerASCIIPrintf(viewer, " Total recycled iterations: %D\n", recycled_its);
222: }
223: }
224: return(0);
225: }
227: /* ---------------------------------------------------------- */
229: /*MC
230: TAOLMVM - Limited Memory Variable Metric method is a quasi-Newton
231: optimization solver for unconstrained minimization. It solves
232: the Newton step
233: Hkdk = - gk
235: using an approximation Bk in place of Hk, where Bk is composed using
236: the BFGS update formula. A More-Thuente line search is then used
237: to computed the steplength in the dk direction
238:
239: Options Database Keys:
240: . -tao_lmvm_recycle - enable recycling LMVM updates between TaoSolve() calls
241: . -tao_lmvm_no_scale - (developer) disables diagonal Broyden scaling on the LMVM approximation
243: Level: beginner
244: M*/
246: PETSC_EXTERN PetscErrorCode TaoCreate_LMVM(Tao tao)
247: {
248: TAO_LMVM *lmP;
249: const char *morethuente_type = TAOLINESEARCHMT;
253: tao->ops->setup = TaoSetUp_LMVM;
254: tao->ops->solve = TaoSolve_LMVM;
255: tao->ops->view = TaoView_LMVM;
256: tao->ops->setfromoptions = TaoSetFromOptions_LMVM;
257: tao->ops->destroy = TaoDestroy_LMVM;
259: PetscNewLog(tao,&lmP);
260: lmP->D = 0;
261: lmP->M = 0;
262: lmP->Xold = 0;
263: lmP->Gold = 0;
264: lmP->H0 = NULL;
265: lmP->recycle = PETSC_FALSE;
267: tao->data = (void*)lmP;
268: /* Override default settings (unless already changed) */
269: if (!tao->max_it_changed) tao->max_it = 2000;
270: if (!tao->max_funcs_changed) tao->max_funcs = 4000;
272: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
273: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
274: TaoLineSearchSetType(tao->linesearch,morethuente_type);
275: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
276: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
277:
278: KSPInitializePackage();
279: MatCreate(((PetscObject)tao)->comm, &lmP->M);
280: PetscObjectIncrementTabLevel((PetscObject)lmP->M, (PetscObject)tao, 1);
281: MatSetType(lmP->M, MATLMVMBFGS);
282: MatSetOptionsPrefix(lmP->M, "tao_lmvm_");
283: return(0);
284: }