Actual source code: bnls.c
1: #include <../src/tao/bound/impls/bnk/bnk.h>
2: #include <petscksp.h>
4: /*
5: Implements Newton's Method with a line search approach for
6: solving bound constrained minimization problems.
8: ------------------------------------------------------------
10: x_0 = VecMedian(x_0)
11: f_0, g_0 = TaoComputeObjectiveAndGradient(x_0)
12: pg_0 = project(g_0)
13: check convergence at pg_0
14: needH = TaoBNKInitialize(default:BNK_INIT_DIRECTION)
15: niter = 0
16: step_accepted = true
18: while niter < max_it
19: if needH
20: If max_cg_steps > 0
21: x_k, g_k, pg_k = TaoSolve(BNCG)
22: end
24: H_k = TaoComputeHessian(x_k)
25: if pc_type == BNK_PC_BFGS
26: add correction to BFGS approx
27: if scale_type == BNK_SCALE_AHESS
28: D = VecMedian(1e-6, abs(diag(H_k)), 1e6)
29: scale BFGS with VecReciprocal(D)
30: end
31: end
32: needH = False
33: end
35: if pc_type = BNK_PC_BFGS
36: B_k = BFGS
37: else
38: B_k = VecMedian(1e-6, abs(diag(H_k)), 1e6)
39: B_k = VecReciprocal(B_k)
40: end
41: w = x_k - VecMedian(x_k - 0.001*B_k*g_k)
42: eps = min(eps, norm2(w))
43: determine the active and inactive index sets such that
44: L = {i : (x_k)_i <= l_i + eps && (g_k)_i > 0}
45: U = {i : (x_k)_i >= u_i - eps && (g_k)_i < 0}
46: F = {i : l_i = (x_k)_i = u_i}
47: A = {L + U + F}
48: IA = {i : i not in A}
50: generate the reduced system Hr_k dr_k = -gr_k for variables in IA
51: if p > 0
52: Hr_k += p*
53: end
54: if pc_type == BNK_PC_BFGS && scale_type == BNK_SCALE_PHESS
55: D = VecMedian(1e-6, abs(diag(Hr_k)), 1e6)
56: scale BFGS with VecReciprocal(D)
57: end
58: solve Hr_k dr_k = -gr_k
59: set d_k to (l - x) for variables in L, (u - x) for variables in U, and 0 for variables in F
61: if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
62: dr_k = -BFGS*gr_k for variables in I
63: if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
64: reset the BFGS preconditioner
65: calculate scale delta and apply it to BFGS
66: dr_k = -BFGS*gr_k for variables in I
67: if dot(d_k, pg_k)) >= 0 || norm(d_k) == NaN || norm(d_k) == Inf
68: dr_k = -gr_k for variables in I
69: end
70: end
71: end
73: x_{k+1}, f_{k+1}, g_{k+1}, ls_failed = TaoBNKPerformLineSearch()
74: if ls_failed
75: f_{k+1} = f_k
76: x_{k+1} = x_k
77: g_{k+1} = g_k
78: pg_{k+1} = pg_k
79: terminate
80: else
81: pg_{k+1} = project(g_{k+1})
82: count the accepted step type (Newton, BFGS, scaled grad or grad)
83: end
85: niter += 1
86: check convergence at pg_{k+1}
87: end
88: */
90: PetscErrorCode TaoSolve_BNLS(Tao tao)
91: {
92: TAO_BNK *bnk = (TAO_BNK *)tao->data;
93: KSPConvergedReason ksp_reason;
94: TaoLineSearchConvergedReason ls_reason;
95: PetscReal steplen = 1.0, resnorm;
96: PetscBool cgTerminate, needH = PETSC_TRUE, stepAccepted, shift = PETSC_TRUE;
97: PetscInt stepType;
99: PetscFunctionBegin;
100: /* Initialize the preconditioner, KSP solver and trust radius/line search */
101: tao->reason = TAO_CONTINUE_ITERATING;
102: PetscCall(TaoBNKInitialize(tao, bnk->init_type, &needH));
103: if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
105: /* Have not converged; continue with Newton method */
106: while (tao->reason == TAO_CONTINUE_ITERATING) {
107: /* Call general purpose update function */
108: if (tao->ops->update) {
109: PetscUseTypeMethod(tao, update, tao->niter, tao->user_update);
110: PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &bnk->f, bnk->unprojected_gradient));
111: }
113: if (needH && bnk->inactive_idx) {
114: /* Take BNCG steps (if enabled) to trade-off Hessian evaluations for more gradient evaluations */
115: PetscCall(TaoBNKTakeCGSteps(tao, &cgTerminate));
116: if (cgTerminate) {
117: tao->reason = bnk->bncg->reason;
118: PetscFunctionReturn(PETSC_SUCCESS);
119: }
120: /* Compute the hessian and update the BFGS preconditioner at the new iterate */
121: PetscCall((*bnk->computehessian)(tao));
122: needH = PETSC_FALSE;
123: }
125: /* Use the common BNK kernel to compute the safeguarded Newton step (for inactive variables only) */
126: PetscCall((*bnk->computestep)(tao, shift, &ksp_reason, &stepType));
127: PetscCall(TaoBNKSafeguardStep(tao, ksp_reason, &stepType));
129: /* Store current solution before it changes */
130: bnk->fold = bnk->f;
131: PetscCall(VecCopy(tao->solution, bnk->Xold));
132: PetscCall(VecCopy(tao->gradient, bnk->Gold));
133: PetscCall(VecCopy(bnk->unprojected_gradient, bnk->unprojected_gradient_old));
135: /* Trigger the line search */
136: PetscCall(TaoBNKPerformLineSearch(tao, &stepType, &steplen, &ls_reason));
138: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
139: /* Failed to find an improving point */
140: needH = PETSC_FALSE;
141: bnk->f = bnk->fold;
142: PetscCall(VecCopy(bnk->Xold, tao->solution));
143: PetscCall(VecCopy(bnk->Gold, tao->gradient));
144: PetscCall(VecCopy(bnk->unprojected_gradient_old, bnk->unprojected_gradient));
145: steplen = 0.0;
146: tao->reason = TAO_DIVERGED_LS_FAILURE;
147: } else {
148: /* new iterate so we need to recompute the Hessian */
149: needH = PETSC_TRUE;
150: /* compute the projected gradient */
151: PetscCall(TaoBNKEstimateActiveSet(tao, bnk->as_type));
152: PetscCall(VecCopy(bnk->unprojected_gradient, tao->gradient));
153: if (bnk->active_idx) PetscCall(VecISSet(tao->gradient, bnk->active_idx, 0.0));
154: PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm));
155: /* update the trust radius based on the step length */
156: PetscCall(TaoBNKUpdateTrustRadius(tao, 0.0, 0.0, BNK_UPDATE_STEP, stepType, &stepAccepted));
157: /* count the accepted step type */
158: PetscCall(TaoBNKAddStepCounts(tao, stepType));
159: /* active BNCG recycling for next iteration */
160: PetscCall(TaoSetRecycleHistory(bnk->bncg, PETSC_TRUE));
161: }
163: /* Check for termination */
164: PetscCall(VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->W));
165: PetscCall(VecNorm(bnk->W, NORM_2, &resnorm));
166: PetscCheck(!PetscIsInfOrNanReal(resnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
167: ++tao->niter;
168: PetscCall(TaoLogConvergenceHistory(tao, bnk->f, resnorm, 0.0, tao->ksp_its));
169: PetscCall(TaoMonitor(tao, tao->niter, bnk->f, resnorm, 0.0, steplen));
170: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
171: }
172: PetscFunctionReturn(PETSC_SUCCESS);
173: }
175: /*------------------------------------------------------------*/
176: /*MC
177: TAOBNLS - Bounded Newton Line Search for nonlinear minimization with bound constraints.
179: Options Database Keys:
180: + -tao_bnk_max_cg_its - maximum number of bounded conjugate-gradient iterations taken in each Newton loop
181: . -tao_bnk_init_type - trust radius initialization method ("constant", "direction", "interpolation")
182: . -tao_bnk_update_type - trust radius update method ("step", "direction", "interpolation")
183: - -tao_bnk_as_type - active-set estimation method ("none", "bertsekas")
185: Level: beginner
186: M*/
187: PETSC_EXTERN PetscErrorCode TaoCreate_BNLS(Tao tao)
188: {
189: TAO_BNK *bnk;
191: PetscFunctionBegin;
192: PetscCall(TaoCreate_BNK(tao));
193: tao->ops->solve = TaoSolve_BNLS;
195: bnk = (TAO_BNK *)tao->data;
196: bnk->init_type = BNK_INIT_DIRECTION;
197: bnk->update_type = BNK_UPDATE_STEP; /* trust region updates based on line search step length */
198: PetscFunctionReturn(PETSC_SUCCESS);
199: }