Actual source code: ntr.c
1: #include <../src/tao/unconstrained/impls/ntr/ntrimpl.h>
3: #include <petscksp.h>
5: #define NTR_INIT_CONSTANT 0
6: #define NTR_INIT_DIRECTION 1
7: #define NTR_INIT_INTERPOLATION 2
8: #define NTR_INIT_TYPES 3
10: #define NTR_UPDATE_REDUCTION 0
11: #define NTR_UPDATE_INTERPOLATION 1
12: #define NTR_UPDATE_TYPES 2
14: static const char *NTR_INIT[64] = {"constant", "direction", "interpolation"};
16: static const char *NTR_UPDATE[64] = {"reduction", "interpolation"};
18: /*
19: TaoSolve_NTR - Implements Newton's Method with a trust region approach
20: for solving unconstrained minimization problems.
22: The basic algorithm is taken from MINPACK-2 (dstrn).
24: TaoSolve_NTR computes a local minimizer of a twice differentiable function
25: f by applying a trust region variant of Newton's method. At each stage
26: of the algorithm, we use the prconditioned conjugate gradient method to
27: determine an approximate minimizer of the quadratic equation
29: q(s) = <s, Hs + g>
31: subject to the trust region constraint
33: || s ||_M <= radius,
35: where radius is the trust region radius and M is a symmetric positive
36: definite matrix (the preconditioner). Here g is the gradient and H
37: is the Hessian matrix.
39: Note: TaoSolve_NTR MUST use the iterative solver KSPNASH, KSPSTCG,
40: or KSPGLTR. Thus, we set KSPNASH, KSPSTCG, or KSPGLTR in this
41: routine regardless of what the user may have previously specified.
42: */
43: static PetscErrorCode TaoSolve_NTR(Tao tao)
44: {
45: TAO_NTR *tr = (TAO_NTR *)tao->data;
46: KSPType ksp_type;
47: PetscBool is_nash, is_stcg, is_gltr, is_bfgs, is_jacobi, is_symmetric, sym_set;
48: KSPConvergedReason ksp_reason;
49: PC pc;
50: PetscReal fmin, ftrial, prered, actred, kappa, sigma, beta;
51: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
52: PetscReal f, gnorm;
54: PetscReal norm_d;
55: PetscInt needH;
57: PetscInt i_max = 5;
58: PetscInt j_max = 1;
59: PetscInt i, j, N, n, its;
61: PetscFunctionBegin;
62: if (tao->XL || tao->XU || tao->ops->computebounds) PetscCall(PetscInfo(tao, "WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n"));
64: PetscCall(KSPGetType(tao->ksp, &ksp_type));
65: PetscCall(PetscStrcmp(ksp_type, KSPNASH, &is_nash));
66: PetscCall(PetscStrcmp(ksp_type, KSPSTCG, &is_stcg));
67: PetscCall(PetscStrcmp(ksp_type, KSPGLTR, &is_gltr));
68: PetscCheck(is_nash || is_stcg || is_gltr, PetscObjectComm((PetscObject)tao), PETSC_ERR_SUP, "TAO_NTR requires nash, stcg, or gltr for the KSP");
70: /* Initialize the radius and modify if it is too large or small */
71: tao->trust = tao->trust0;
72: tao->trust = PetscMax(tao->trust, tr->min_radius);
73: tao->trust = PetscMin(tao->trust, tr->max_radius);
75: /* Allocate the vectors needed for the BFGS approximation */
76: PetscCall(KSPGetPC(tao->ksp, &pc));
77: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs));
78: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi));
79: if (is_bfgs) {
80: tr->bfgs_pre = pc;
81: PetscCall(PCLMVMGetMatLMVM(tr->bfgs_pre, &tr->M));
82: PetscCall(VecGetLocalSize(tao->solution, &n));
83: PetscCall(VecGetSize(tao->solution, &N));
84: PetscCall(MatSetSizes(tr->M, n, n, N, N));
85: PetscCall(MatLMVMAllocate(tr->M, tao->solution, tao->gradient));
86: PetscCall(MatIsSymmetricKnown(tr->M, &sym_set, &is_symmetric));
87: PetscCheck(sym_set && is_symmetric, PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
88: } else if (is_jacobi) PetscCall(PCJacobiSetUseAbs(pc, PETSC_TRUE));
90: /* Check convergence criteria */
91: PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient));
92: PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm));
93: PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
94: needH = 1;
96: tao->reason = TAO_CONTINUE_ITERATING;
97: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
98: PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0));
99: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
100: if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
102: /* Initialize trust-region radius */
103: switch (tr->init_type) {
104: case NTR_INIT_CONSTANT:
105: /* Use the initial radius specified */
106: break;
108: case NTR_INIT_INTERPOLATION:
109: /* Use the initial radius specified */
110: max_radius = 0.0;
112: for (j = 0; j < j_max; ++j) {
113: fmin = f;
114: sigma = 0.0;
116: if (needH) {
117: PetscCall(TaoComputeHessian(tao, tao->solution, tao->hessian, tao->hessian_pre));
118: needH = 0;
119: }
121: for (i = 0; i < i_max; ++i) {
122: PetscCall(VecCopy(tao->solution, tr->W));
123: PetscCall(VecAXPY(tr->W, -tao->trust / gnorm, tao->gradient));
124: PetscCall(TaoComputeObjective(tao, tr->W, &ftrial));
126: if (PetscIsInfOrNanReal(ftrial)) {
127: tau = tr->gamma1_i;
128: } else {
129: if (ftrial < fmin) {
130: fmin = ftrial;
131: sigma = -tao->trust / gnorm;
132: }
134: PetscCall(MatMult(tao->hessian, tao->gradient, tao->stepdirection));
135: PetscCall(VecDot(tao->gradient, tao->stepdirection, &prered));
137: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
138: actred = f - ftrial;
139: if ((PetscAbsScalar(actred) <= tr->epsilon) && (PetscAbsScalar(prered) <= tr->epsilon)) {
140: kappa = 1.0;
141: } else {
142: kappa = actred / prered;
143: }
145: tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
146: tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
147: tau_min = PetscMin(tau_1, tau_2);
148: tau_max = PetscMax(tau_1, tau_2);
150: if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu1_i) {
151: /* Great agreement */
152: max_radius = PetscMax(max_radius, tao->trust);
154: if (tau_max < 1.0) {
155: tau = tr->gamma3_i;
156: } else if (tau_max > tr->gamma4_i) {
157: tau = tr->gamma4_i;
158: } else {
159: tau = tau_max;
160: }
161: } else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu2_i) {
162: /* Good agreement */
163: max_radius = PetscMax(max_radius, tao->trust);
165: if (tau_max < tr->gamma2_i) {
166: tau = tr->gamma2_i;
167: } else if (tau_max > tr->gamma3_i) {
168: tau = tr->gamma3_i;
169: } else {
170: tau = tau_max;
171: }
172: } else {
173: /* Not good agreement */
174: if (tau_min > 1.0) {
175: tau = tr->gamma2_i;
176: } else if (tau_max < tr->gamma1_i) {
177: tau = tr->gamma1_i;
178: } else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
179: tau = tr->gamma1_i;
180: } else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
181: tau = tau_1;
182: } else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
183: tau = tau_2;
184: } else {
185: tau = tau_max;
186: }
187: }
188: }
189: tao->trust = tau * tao->trust;
190: }
192: if (fmin < f) {
193: f = fmin;
194: PetscCall(VecAXPY(tao->solution, sigma, tao->gradient));
195: PetscCall(TaoComputeGradient(tao, tao->solution, tao->gradient));
197: PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm));
198: PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
199: needH = 1;
201: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
202: PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0));
203: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
204: if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
205: }
206: }
207: tao->trust = PetscMax(tao->trust, max_radius);
209: /* Modify the radius if it is too large or small */
210: tao->trust = PetscMax(tao->trust, tr->min_radius);
211: tao->trust = PetscMin(tao->trust, tr->max_radius);
212: break;
214: default:
215: /* Norm of the first direction will initialize radius */
216: tao->trust = 0.0;
217: break;
218: }
220: /* Have not converged; continue with Newton method */
221: while (tao->reason == TAO_CONTINUE_ITERATING) {
222: /* Call general purpose update function */
223: PetscTryTypeMethod(tao, update, tao->niter, tao->user_update);
224: ++tao->niter;
225: tao->ksp_its = 0;
226: /* Compute the Hessian */
227: if (needH) {
228: PetscCall(TaoComputeHessian(tao, tao->solution, tao->hessian, tao->hessian_pre));
229: needH = 0;
230: }
232: if (tr->bfgs_pre) {
233: /* Update the limited memory preconditioner */
234: PetscCall(MatLMVMUpdate(tr->M, tao->solution, tao->gradient));
235: }
237: while (tao->reason == TAO_CONTINUE_ITERATING) {
238: PetscCall(KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre));
240: /* Solve the trust region subproblem */
241: PetscCall(KSPCGSetRadius(tao->ksp, tao->trust));
242: PetscCall(KSPSolve(tao->ksp, tao->gradient, tao->stepdirection));
243: PetscCall(KSPGetIterationNumber(tao->ksp, &its));
244: tao->ksp_its += its;
245: tao->ksp_tot_its += its;
246: PetscCall(KSPCGGetNormD(tao->ksp, &norm_d));
248: if (0.0 == tao->trust) {
249: /* Radius was uninitialized; use the norm of the direction */
250: if (norm_d > 0.0) {
251: tao->trust = norm_d;
253: /* Modify the radius if it is too large or small */
254: tao->trust = PetscMax(tao->trust, tr->min_radius);
255: tao->trust = PetscMin(tao->trust, tr->max_radius);
256: } else {
257: /* The direction was bad; set radius to default value and re-solve
258: the trust-region subproblem to get a direction */
259: tao->trust = tao->trust0;
261: /* Modify the radius if it is too large or small */
262: tao->trust = PetscMax(tao->trust, tr->min_radius);
263: tao->trust = PetscMin(tao->trust, tr->max_radius);
265: PetscCall(KSPCGSetRadius(tao->ksp, tao->trust));
266: PetscCall(KSPSolve(tao->ksp, tao->gradient, tao->stepdirection));
267: PetscCall(KSPGetIterationNumber(tao->ksp, &its));
268: tao->ksp_its += its;
269: tao->ksp_tot_its += its;
270: PetscCall(KSPCGGetNormD(tao->ksp, &norm_d));
272: PetscCheck(norm_d != 0.0, PetscObjectComm((PetscObject)tao), PETSC_ERR_PLIB, "Initial direction zero");
273: }
274: }
275: PetscCall(VecScale(tao->stepdirection, -1.0));
276: PetscCall(KSPGetConvergedReason(tao->ksp, &ksp_reason));
277: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (tr->bfgs_pre)) {
278: /* Preconditioner is numerically indefinite; reset the
279: approximate if using BFGS preconditioning. */
280: PetscCall(MatLMVMReset(tr->M, PETSC_FALSE));
281: PetscCall(MatLMVMUpdate(tr->M, tao->solution, tao->gradient));
282: }
284: if (NTR_UPDATE_REDUCTION == tr->update_type) {
285: /* Get predicted reduction */
286: PetscCall(KSPCGGetObjFcn(tao->ksp, &prered));
287: if (prered >= 0.0) {
288: /* The predicted reduction has the wrong sign. This cannot
289: happen in infinite precision arithmetic. Step should
290: be rejected! */
291: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
292: } else {
293: /* Compute trial step and function value */
294: PetscCall(VecCopy(tao->solution, tr->W));
295: PetscCall(VecAXPY(tr->W, 1.0, tao->stepdirection));
296: PetscCall(TaoComputeObjective(tao, tr->W, &ftrial));
298: if (PetscIsInfOrNanReal(ftrial)) {
299: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
300: } else {
301: /* Compute and actual reduction */
302: actred = f - ftrial;
303: prered = -prered;
304: if ((PetscAbsScalar(actred) <= tr->epsilon) && (PetscAbsScalar(prered) <= tr->epsilon)) {
305: kappa = 1.0;
306: } else {
307: kappa = actred / prered;
308: }
310: /* Accept or reject the step and update radius */
311: if (kappa < tr->eta1) {
312: /* Reject the step */
313: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
314: } else {
315: /* Accept the step */
316: if (kappa < tr->eta2) {
317: /* Marginal bad step */
318: tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
319: } else if (kappa < tr->eta3) {
320: /* Reasonable step */
321: tao->trust = tr->alpha3 * tao->trust;
322: } else if (kappa < tr->eta4) {
323: /* Good step */
324: tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
325: } else {
326: /* Very good step */
327: tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
328: }
329: break;
330: }
331: }
332: }
333: } else {
334: /* Get predicted reduction */
335: PetscCall(KSPCGGetObjFcn(tao->ksp, &prered));
336: if (prered >= 0.0) {
337: /* The predicted reduction has the wrong sign. This cannot
338: happen in infinite precision arithmetic. Step should
339: be rejected! */
340: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
341: } else {
342: PetscCall(VecCopy(tao->solution, tr->W));
343: PetscCall(VecAXPY(tr->W, 1.0, tao->stepdirection));
344: PetscCall(TaoComputeObjective(tao, tr->W, &ftrial));
345: if (PetscIsInfOrNanReal(ftrial)) {
346: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
347: } else {
348: PetscCall(VecDot(tao->gradient, tao->stepdirection, &beta));
349: actred = f - ftrial;
350: prered = -prered;
351: if ((PetscAbsScalar(actred) <= tr->epsilon) && (PetscAbsScalar(prered) <= tr->epsilon)) {
352: kappa = 1.0;
353: } else {
354: kappa = actred / prered;
355: }
357: tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
358: tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
359: tau_min = PetscMin(tau_1, tau_2);
360: tau_max = PetscMax(tau_1, tau_2);
362: if (kappa >= 1.0 - tr->mu1) {
363: /* Great agreement; accept step and update radius */
364: if (tau_max < 1.0) {
365: tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
366: } else if (tau_max > tr->gamma4) {
367: tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
368: } else {
369: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
370: }
371: break;
372: } else if (kappa >= 1.0 - tr->mu2) {
373: /* Good agreement */
375: if (tau_max < tr->gamma2) {
376: tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
377: } else if (tau_max > tr->gamma3) {
378: tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
379: } else if (tau_max < 1.0) {
380: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
381: } else {
382: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
383: }
384: break;
385: } else {
386: /* Not good agreement */
387: if (tau_min > 1.0) {
388: tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
389: } else if (tau_max < tr->gamma1) {
390: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
391: } else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
392: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
393: } else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) && ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
394: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
395: } else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) && ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
396: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
397: } else {
398: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
399: }
400: }
401: }
402: }
403: }
405: /* The step computed was not good and the radius was decreased.
406: Monitor the radius to terminate. */
407: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
408: PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust));
409: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
410: }
412: /* The radius may have been increased; modify if it is too large */
413: tao->trust = PetscMin(tao->trust, tr->max_radius);
415: if (tao->reason == TAO_CONTINUE_ITERATING) {
416: PetscCall(VecCopy(tr->W, tao->solution));
417: f = ftrial;
418: PetscCall(TaoComputeGradient(tao, tao->solution, tao->gradient));
419: PetscCall(TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm));
420: PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
421: needH = 1;
422: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
423: PetscCall(TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust));
424: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
425: }
426: }
427: PetscFunctionReturn(PETSC_SUCCESS);
428: }
430: /*------------------------------------------------------------*/
431: static PetscErrorCode TaoSetUp_NTR(Tao tao)
432: {
433: TAO_NTR *tr = (TAO_NTR *)tao->data;
435: PetscFunctionBegin;
436: if (!tao->gradient) PetscCall(VecDuplicate(tao->solution, &tao->gradient));
437: if (!tao->stepdirection) PetscCall(VecDuplicate(tao->solution, &tao->stepdirection));
438: if (!tr->W) PetscCall(VecDuplicate(tao->solution, &tr->W));
440: tr->bfgs_pre = NULL;
441: tr->M = NULL;
442: PetscFunctionReturn(PETSC_SUCCESS);
443: }
445: /*------------------------------------------------------------*/
446: static PetscErrorCode TaoDestroy_NTR(Tao tao)
447: {
448: TAO_NTR *tr = (TAO_NTR *)tao->data;
450: PetscFunctionBegin;
451: if (tao->setupcalled) PetscCall(VecDestroy(&tr->W));
452: PetscCall(KSPDestroy(&tao->ksp));
453: PetscCall(PetscFree(tao->data));
454: PetscFunctionReturn(PETSC_SUCCESS);
455: }
457: /*------------------------------------------------------------*/
458: static PetscErrorCode TaoSetFromOptions_NTR(Tao tao, PetscOptionItems *PetscOptionsObject)
459: {
460: TAO_NTR *tr = (TAO_NTR *)tao->data;
462: PetscFunctionBegin;
463: PetscOptionsHeadBegin(PetscOptionsObject, "Newton trust region method for unconstrained optimization");
464: PetscCall(PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type, NULL));
465: PetscCall(PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type, NULL));
466: PetscCall(PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1, NULL));
467: PetscCall(PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2, NULL));
468: PetscCall(PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3, NULL));
469: PetscCall(PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4, NULL));
470: PetscCall(PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1, NULL));
471: PetscCall(PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2, NULL));
472: PetscCall(PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3, NULL));
473: PetscCall(PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4, NULL));
474: PetscCall(PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5, NULL));
475: PetscCall(PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1, NULL));
476: PetscCall(PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2, NULL));
477: PetscCall(PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1, NULL));
478: PetscCall(PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2, NULL));
479: PetscCall(PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3, NULL));
480: PetscCall(PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4, NULL));
481: PetscCall(PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta, NULL));
482: PetscCall(PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i, NULL));
483: PetscCall(PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i, NULL));
484: PetscCall(PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i, NULL));
485: PetscCall(PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i, NULL));
486: PetscCall(PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i, NULL));
487: PetscCall(PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i, NULL));
488: PetscCall(PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i, NULL));
489: PetscCall(PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius, NULL));
490: PetscCall(PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius, NULL));
491: PetscCall(PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon, NULL));
492: PetscOptionsHeadEnd();
493: PetscCall(KSPSetFromOptions(tao->ksp));
494: PetscFunctionReturn(PETSC_SUCCESS);
495: }
497: /*------------------------------------------------------------*/
498: /*MC
499: TAONTR - Newton's method with trust region for unconstrained minimization.
500: At each iteration, the Newton trust region method solves the system.
501: NTR expects a KSP solver with a trust region radius.
502: min_d .5 dT Hk d + gkT d, s.t. ||d|| < Delta_k
504: Options Database Keys:
505: + -tao_ntr_init_type - "constant","direction","interpolation"
506: . -tao_ntr_update_type - "reduction","interpolation"
507: . -tao_ntr_min_radius - lower bound on trust region radius
508: . -tao_ntr_max_radius - upper bound on trust region radius
509: . -tao_ntr_epsilon - tolerance for accepting actual / predicted reduction
510: . -tao_ntr_mu1_i - mu1 interpolation init factor
511: . -tao_ntr_mu2_i - mu2 interpolation init factor
512: . -tao_ntr_gamma1_i - gamma1 interpolation init factor
513: . -tao_ntr_gamma2_i - gamma2 interpolation init factor
514: . -tao_ntr_gamma3_i - gamma3 interpolation init factor
515: . -tao_ntr_gamma4_i - gamma4 interpolation init factor
516: . -tao_ntr_theta_i - theta1 interpolation init factor
517: . -tao_ntr_eta1 - eta1 reduction update factor
518: . -tao_ntr_eta2 - eta2 reduction update factor
519: . -tao_ntr_eta3 - eta3 reduction update factor
520: . -tao_ntr_eta4 - eta4 reduction update factor
521: . -tao_ntr_alpha1 - alpha1 reduction update factor
522: . -tao_ntr_alpha2 - alpha2 reduction update factor
523: . -tao_ntr_alpha3 - alpha3 reduction update factor
524: . -tao_ntr_alpha4 - alpha4 reduction update factor
525: . -tao_ntr_alpha4 - alpha4 reduction update factor
526: . -tao_ntr_mu1 - mu1 interpolation update
527: . -tao_ntr_mu2 - mu2 interpolation update
528: . -tao_ntr_gamma1 - gamma1 interpolcation update
529: . -tao_ntr_gamma2 - gamma2 interpolcation update
530: . -tao_ntr_gamma3 - gamma3 interpolcation update
531: . -tao_ntr_gamma4 - gamma4 interpolation update
532: - -tao_ntr_theta - theta interpolation update
534: Level: beginner
535: M*/
537: PETSC_EXTERN PetscErrorCode TaoCreate_NTR(Tao tao)
538: {
539: TAO_NTR *tr;
541: PetscFunctionBegin;
543: PetscCall(PetscNew(&tr));
545: tao->ops->setup = TaoSetUp_NTR;
546: tao->ops->solve = TaoSolve_NTR;
547: tao->ops->setfromoptions = TaoSetFromOptions_NTR;
548: tao->ops->destroy = TaoDestroy_NTR;
550: /* Override default settings (unless already changed) */
551: if (!tao->max_it_changed) tao->max_it = 50;
552: if (!tao->trust0_changed) tao->trust0 = 100.0;
553: tao->data = (void *)tr;
555: /* Standard trust region update parameters */
556: tr->eta1 = 1.0e-4;
557: tr->eta2 = 0.25;
558: tr->eta3 = 0.50;
559: tr->eta4 = 0.90;
561: tr->alpha1 = 0.25;
562: tr->alpha2 = 0.50;
563: tr->alpha3 = 1.00;
564: tr->alpha4 = 2.00;
565: tr->alpha5 = 4.00;
567: /* Interpolation trust region update parameters */
568: tr->mu1 = 0.10;
569: tr->mu2 = 0.50;
571: tr->gamma1 = 0.25;
572: tr->gamma2 = 0.50;
573: tr->gamma3 = 2.00;
574: tr->gamma4 = 4.00;
576: tr->theta = 0.05;
578: /* Interpolation parameters for initialization */
579: tr->mu1_i = 0.35;
580: tr->mu2_i = 0.50;
582: tr->gamma1_i = 0.0625;
583: tr->gamma2_i = 0.50;
584: tr->gamma3_i = 2.00;
585: tr->gamma4_i = 5.00;
587: tr->theta_i = 0.25;
589: tr->min_radius = 1.0e-10;
590: tr->max_radius = 1.0e10;
591: tr->epsilon = 1.0e-6;
593: tr->init_type = NTR_INIT_INTERPOLATION;
594: tr->update_type = NTR_UPDATE_REDUCTION;
596: /* Set linear solver to default for trust region */
597: PetscCall(KSPCreate(((PetscObject)tao)->comm, &tao->ksp));
598: PetscCall(PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1));
599: PetscCall(KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix));
600: PetscCall(KSPAppendOptionsPrefix(tao->ksp, "tao_ntr_"));
601: PetscCall(KSPSetType(tao->ksp, KSPSTCG));
602: PetscFunctionReturn(PETSC_SUCCESS);
603: }