Actual source code: ntr.c
petsc-3.13.6 2020-09-29
1: #include <../src/tao/unconstrained/impls/ntr/ntrimpl.h>
3: #include <petscksp.h>
5: #define NTR_INIT_CONSTANT 0
6: #define NTR_INIT_DIRECTION 1
7: #define NTR_INIT_INTERPOLATION 2
8: #define NTR_INIT_TYPES 3
10: #define NTR_UPDATE_REDUCTION 0
11: #define NTR_UPDATE_INTERPOLATION 1
12: #define NTR_UPDATE_TYPES 2
14: static const char *NTR_INIT[64] = {"constant","direction","interpolation"};
16: static const char *NTR_UPDATE[64] = {"reduction","interpolation"};
18: /*
19: TaoSolve_NTR - Implements Newton's Method with a trust region approach
20: for solving unconstrained minimization problems.
22: The basic algorithm is taken from MINPACK-2 (dstrn).
24: TaoSolve_NTR computes a local minimizer of a twice differentiable function
25: f by applying a trust region variant of Newton's method. At each stage
26: of the algorithm, we use the prconditioned conjugate gradient method to
27: determine an approximate minimizer of the quadratic equation
29: q(s) = <s, Hs + g>
31: subject to the trust region constraint
33: || s ||_M <= radius,
35: where radius is the trust region radius and M is a symmetric positive
36: definite matrix (the preconditioner). Here g is the gradient and H
37: is the Hessian matrix.
39: Note: TaoSolve_NTR MUST use the iterative solver KSPNASH, KSPSTCG,
40: or KSPGLTR. Thus, we set KSPNASH, KSPSTCG, or KSPGLTR in this
41: routine regardless of what the user may have previously specified.
42: */
43: static PetscErrorCode TaoSolve_NTR(Tao tao)
44: {
45: TAO_NTR *tr = (TAO_NTR *)tao->data;
46: KSPType ksp_type;
47: PetscBool is_nash,is_stcg,is_gltr,is_bfgs,is_jacobi,is_symmetric,sym_set;
48: KSPConvergedReason ksp_reason;
49: PC pc;
50: PetscReal fmin, ftrial, prered, actred, kappa, sigma, beta;
51: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
52: PetscReal f, gnorm;
54: PetscReal norm_d;
55: PetscErrorCode ierr;
56: PetscInt bfgsUpdates = 0;
57: PetscInt needH;
59: PetscInt i_max = 5;
60: PetscInt j_max = 1;
61: PetscInt i, j, N, n, its;
64: if (tao->XL || tao->XU || tao->ops->computebounds) {
65: PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n");
66: }
68: KSPGetType(tao->ksp,&ksp_type);
69: PetscStrcmp(ksp_type,KSPNASH,&is_nash);
70: PetscStrcmp(ksp_type,KSPSTCG,&is_stcg);
71: PetscStrcmp(ksp_type,KSPGLTR,&is_gltr);
72: if (!is_nash && !is_stcg && !is_gltr) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_SUP,"TAO_NTR requires nash, stcg, or gltr for the KSP");
74: /* Initialize the radius and modify if it is too large or small */
75: tao->trust = tao->trust0;
76: tao->trust = PetscMax(tao->trust, tr->min_radius);
77: tao->trust = PetscMin(tao->trust, tr->max_radius);
79: /* Allocate the vectors needed for the BFGS approximation */
80: KSPGetPC(tao->ksp, &pc);
81: PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
82: PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
83: if (is_bfgs) {
84: tr->bfgs_pre = pc;
85: PCLMVMGetMatLMVM(tr->bfgs_pre, &tr->M);
86: VecGetLocalSize(tao->solution, &n);
87: VecGetSize(tao->solution, &N);
88: MatSetSizes(tr->M, n, n, N, N);
89: MatLMVMAllocate(tr->M, tao->solution, tao->gradient);
90: MatIsSymmetricKnown(tr->M, &sym_set, &is_symmetric);
91: if (!sym_set || !is_symmetric) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
92: } else if (is_jacobi) {
93: PCJacobiSetUseAbs(pc,PETSC_TRUE);
94: }
96: /* Check convergence criteria */
97: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
98: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
99: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER,"User provided compute function generated Inf or NaN");
100: needH = 1;
102: tao->reason = TAO_CONTINUE_ITERATING;
103: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
104: TaoMonitor(tao,tao->niter,f,gnorm,0.0,1.0);
105: (*tao->ops->convergencetest)(tao,tao->cnvP);
106: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
108: /* Initialize trust-region radius */
109: switch (tr->init_type) {
110: case NTR_INIT_CONSTANT:
111: /* Use the initial radius specified */
112: break;
114: case NTR_INIT_INTERPOLATION:
115: /* Use the initial radius specified */
116: max_radius = 0.0;
118: for (j = 0; j < j_max; ++j) {
119: fmin = f;
120: sigma = 0.0;
122: if (needH) {
123: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
124: needH = 0;
125: }
127: for (i = 0; i < i_max; ++i) {
129: VecCopy(tao->solution, tr->W);
130: VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient);
131: TaoComputeObjective(tao, tr->W, &ftrial);
133: if (PetscIsInfOrNanReal(ftrial)) {
134: tau = tr->gamma1_i;
135: }
136: else {
137: if (ftrial < fmin) {
138: fmin = ftrial;
139: sigma = -tao->trust / gnorm;
140: }
142: MatMult(tao->hessian, tao->gradient, tao->stepdirection);
143: VecDot(tao->gradient, tao->stepdirection, &prered);
145: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
146: actred = f - ftrial;
147: if ((PetscAbsScalar(actred) <= tr->epsilon) &&
148: (PetscAbsScalar(prered) <= tr->epsilon)) {
149: kappa = 1.0;
150: }
151: else {
152: kappa = actred / prered;
153: }
155: tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
156: tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
157: tau_min = PetscMin(tau_1, tau_2);
158: tau_max = PetscMax(tau_1, tau_2);
160: if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu1_i) {
161: /* Great agreement */
162: max_radius = PetscMax(max_radius, tao->trust);
164: if (tau_max < 1.0) {
165: tau = tr->gamma3_i;
166: }
167: else if (tau_max > tr->gamma4_i) {
168: tau = tr->gamma4_i;
169: }
170: else {
171: tau = tau_max;
172: }
173: }
174: else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tr->mu2_i) {
175: /* Good agreement */
176: max_radius = PetscMax(max_radius, tao->trust);
178: if (tau_max < tr->gamma2_i) {
179: tau = tr->gamma2_i;
180: }
181: else if (tau_max > tr->gamma3_i) {
182: tau = tr->gamma3_i;
183: }
184: else {
185: tau = tau_max;
186: }
187: }
188: else {
189: /* Not good agreement */
190: if (tau_min > 1.0) {
191: tau = tr->gamma2_i;
192: }
193: else if (tau_max < tr->gamma1_i) {
194: tau = tr->gamma1_i;
195: }
196: else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
197: tau = tr->gamma1_i;
198: }
199: else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) &&
200: ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
201: tau = tau_1;
202: }
203: else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) &&
204: ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
205: tau = tau_2;
206: }
207: else {
208: tau = tau_max;
209: }
210: }
211: }
212: tao->trust = tau * tao->trust;
213: }
215: if (fmin < f) {
216: f = fmin;
217: VecAXPY(tao->solution, sigma, tao->gradient);
218: TaoComputeGradient(tao,tao->solution, tao->gradient);
220: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
221: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
222: needH = 1;
224: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
225: TaoMonitor(tao,tao->niter,f,gnorm,0.0,1.0);
226: (*tao->ops->convergencetest)(tao,tao->cnvP);
227: if (tao->reason != TAO_CONTINUE_ITERATING) {
228: return(0);
229: }
230: }
231: }
232: tao->trust = PetscMax(tao->trust, max_radius);
234: /* Modify the radius if it is too large or small */
235: tao->trust = PetscMax(tao->trust, tr->min_radius);
236: tao->trust = PetscMin(tao->trust, tr->max_radius);
237: break;
239: default:
240: /* Norm of the first direction will initialize radius */
241: tao->trust = 0.0;
242: break;
243: }
245: /* Have not converged; continue with Newton method */
246: while (tao->reason == TAO_CONTINUE_ITERATING) {
247: /* Call general purpose update function */
248: if (tao->ops->update) {
249: (*tao->ops->update)(tao, tao->niter, tao->user_update);
250: }
251: ++tao->niter;
252: tao->ksp_its=0;
253: /* Compute the Hessian */
254: if (needH) {
255: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
256: needH = 0;
257: }
259: if (tr->bfgs_pre) {
260: /* Update the limited memory preconditioner */
261: MatLMVMUpdate(tr->M, tao->solution, tao->gradient);
262: ++bfgsUpdates;
263: }
265: while (tao->reason == TAO_CONTINUE_ITERATING) {
266: KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
268: /* Solve the trust region subproblem */
269: KSPCGSetRadius(tao->ksp,tao->trust);
270: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
271: KSPGetIterationNumber(tao->ksp,&its);
272: tao->ksp_its+=its;
273: tao->ksp_tot_its+=its;
274: KSPCGGetNormD(tao->ksp, &norm_d);
276: if (0.0 == tao->trust) {
277: /* Radius was uninitialized; use the norm of the direction */
278: if (norm_d > 0.0) {
279: tao->trust = norm_d;
281: /* Modify the radius if it is too large or small */
282: tao->trust = PetscMax(tao->trust, tr->min_radius);
283: tao->trust = PetscMin(tao->trust, tr->max_radius);
284: }
285: else {
286: /* The direction was bad; set radius to default value and re-solve
287: the trust-region subproblem to get a direction */
288: tao->trust = tao->trust0;
290: /* Modify the radius if it is too large or small */
291: tao->trust = PetscMax(tao->trust, tr->min_radius);
292: tao->trust = PetscMin(tao->trust, tr->max_radius);
294: KSPCGSetRadius(tao->ksp,tao->trust);
295: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
296: KSPGetIterationNumber(tao->ksp,&its);
297: tao->ksp_its+=its;
298: tao->ksp_tot_its+=its;
299: KSPCGGetNormD(tao->ksp, &norm_d);
301: if (norm_d == 0.0) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_PLIB, "Initial direction zero");
302: }
303: }
304: VecScale(tao->stepdirection, -1.0);
305: KSPGetConvergedReason(tao->ksp, &ksp_reason);
306: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (tr->bfgs_pre)) {
307: /* Preconditioner is numerically indefinite; reset the
308: approximate if using BFGS preconditioning. */
309: MatLMVMReset(tr->M, PETSC_FALSE);
310: MatLMVMUpdate(tr->M, tao->solution, tao->gradient);
311: bfgsUpdates = 1;
312: }
314: if (NTR_UPDATE_REDUCTION == tr->update_type) {
315: /* Get predicted reduction */
316: KSPCGGetObjFcn(tao->ksp,&prered);
317: if (prered >= 0.0) {
318: /* The predicted reduction has the wrong sign. This cannot
319: happen in infinite precision arithmetic. Step should
320: be rejected! */
321: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
322: }
323: else {
324: /* Compute trial step and function value */
325: VecCopy(tao->solution,tr->W);
326: VecAXPY(tr->W, 1.0, tao->stepdirection);
327: TaoComputeObjective(tao, tr->W, &ftrial);
329: if (PetscIsInfOrNanReal(ftrial)) {
330: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
331: } else {
332: /* Compute and actual reduction */
333: actred = f - ftrial;
334: prered = -prered;
335: if ((PetscAbsScalar(actred) <= tr->epsilon) &&
336: (PetscAbsScalar(prered) <= tr->epsilon)) {
337: kappa = 1.0;
338: }
339: else {
340: kappa = actred / prered;
341: }
343: /* Accept or reject the step and update radius */
344: if (kappa < tr->eta1) {
345: /* Reject the step */
346: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
347: }
348: else {
349: /* Accept the step */
350: if (kappa < tr->eta2) {
351: /* Marginal bad step */
352: tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
353: }
354: else if (kappa < tr->eta3) {
355: /* Reasonable step */
356: tao->trust = tr->alpha3 * tao->trust;
357: }
358: else if (kappa < tr->eta4) {
359: /* Good step */
360: tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
361: }
362: else {
363: /* Very good step */
364: tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
365: }
366: break;
367: }
368: }
369: }
370: }
371: else {
372: /* Get predicted reduction */
373: KSPCGGetObjFcn(tao->ksp,&prered);
374: if (prered >= 0.0) {
375: /* The predicted reduction has the wrong sign. This cannot
376: happen in infinite precision arithmetic. Step should
377: be rejected! */
378: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
379: }
380: else {
381: VecCopy(tao->solution, tr->W);
382: VecAXPY(tr->W, 1.0, tao->stepdirection);
383: TaoComputeObjective(tao, tr->W, &ftrial);
384: if (PetscIsInfOrNanReal(ftrial)) {
385: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
386: }
387: else {
388: VecDot(tao->gradient, tao->stepdirection, &beta);
389: actred = f - ftrial;
390: prered = -prered;
391: if ((PetscAbsScalar(actred) <= tr->epsilon) &&
392: (PetscAbsScalar(prered) <= tr->epsilon)) {
393: kappa = 1.0;
394: }
395: else {
396: kappa = actred / prered;
397: }
399: tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
400: tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
401: tau_min = PetscMin(tau_1, tau_2);
402: tau_max = PetscMax(tau_1, tau_2);
404: if (kappa >= 1.0 - tr->mu1) {
405: /* Great agreement; accept step and update radius */
406: if (tau_max < 1.0) {
407: tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
408: }
409: else if (tau_max > tr->gamma4) {
410: tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
411: }
412: else {
413: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
414: }
415: break;
416: }
417: else if (kappa >= 1.0 - tr->mu2) {
418: /* Good agreement */
420: if (tau_max < tr->gamma2) {
421: tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
422: }
423: else if (tau_max > tr->gamma3) {
424: tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
425: }
426: else if (tau_max < 1.0) {
427: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
428: }
429: else {
430: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
431: }
432: break;
433: }
434: else {
435: /* Not good agreement */
436: if (tau_min > 1.0) {
437: tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
438: }
439: else if (tau_max < tr->gamma1) {
440: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
441: }
442: else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
443: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
444: }
445: else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) &&
446: ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
447: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
448: }
449: else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) &&
450: ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
451: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
452: }
453: else {
454: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
455: }
456: }
457: }
458: }
459: }
461: /* The step computed was not good and the radius was decreased.
462: Monitor the radius to terminate. */
463: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
464: TaoMonitor(tao,tao->niter,f,gnorm,0.0,tao->trust);
465: (*tao->ops->convergencetest)(tao,tao->cnvP);
466: }
468: /* The radius may have been increased; modify if it is too large */
469: tao->trust = PetscMin(tao->trust, tr->max_radius);
471: if (tao->reason == TAO_CONTINUE_ITERATING) {
472: VecCopy(tr->W, tao->solution);
473: f = ftrial;
474: TaoComputeGradient(tao, tao->solution, tao->gradient);
475: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
476: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
477: needH = 1;
478: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
479: TaoMonitor(tao,tao->niter,f,gnorm,0.0,tao->trust);
480: (*tao->ops->convergencetest)(tao,tao->cnvP);
481: }
482: }
483: return(0);
484: }
486: /*------------------------------------------------------------*/
487: static PetscErrorCode TaoSetUp_NTR(Tao tao)
488: {
489: TAO_NTR *tr = (TAO_NTR *)tao->data;
493: if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient);}
494: if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
495: if (!tr->W) {VecDuplicate(tao->solution, &tr->W);}
497: tr->bfgs_pre = 0;
498: tr->M = 0;
499: return(0);
500: }
502: /*------------------------------------------------------------*/
503: static PetscErrorCode TaoDestroy_NTR(Tao tao)
504: {
505: TAO_NTR *tr = (TAO_NTR *)tao->data;
509: if (tao->setupcalled) {
510: VecDestroy(&tr->W);
511: }
512: PetscFree(tao->data);
513: return(0);
514: }
516: /*------------------------------------------------------------*/
517: static PetscErrorCode TaoSetFromOptions_NTR(PetscOptionItems *PetscOptionsObject,Tao tao)
518: {
519: TAO_NTR *tr = (TAO_NTR *)tao->data;
523: PetscOptionsHead(PetscOptionsObject,"Newton trust region method for unconstrained optimization");
524: PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type,NULL);
525: PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type,NULL);
526: PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1,NULL);
527: PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2,NULL);
528: PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3,NULL);
529: PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4,NULL);
530: PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1,NULL);
531: PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2,NULL);
532: PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3,NULL);
533: PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4,NULL);
534: PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5,NULL);
535: PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1,NULL);
536: PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2,NULL);
537: PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1,NULL);
538: PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2,NULL);
539: PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3,NULL);
540: PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4,NULL);
541: PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta,NULL);
542: PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i,NULL);
543: PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i,NULL);
544: PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i,NULL);
545: PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i,NULL);
546: PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i,NULL);
547: PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i,NULL);
548: PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i,NULL);
549: PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius,NULL);
550: PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius,NULL);
551: PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon,NULL);
552: PetscOptionsTail();
553: KSPSetFromOptions(tao->ksp);
554: return(0);
555: }
557: /*------------------------------------------------------------*/
558: /*MC
559: TAONTR - Newton's method with trust region for unconstrained minimization.
560: At each iteration, the Newton trust region method solves the system.
561: NTR expects a KSP solver with a trust region radius.
562: min_d .5 dT Hk d + gkT d, s.t. ||d|| < Delta_k
564: Options Database Keys:
565: + -tao_ntr_init_type - "constant","direction","interpolation"
566: . -tao_ntr_update_type - "reduction","interpolation"
567: . -tao_ntr_min_radius - lower bound on trust region radius
568: . -tao_ntr_max_radius - upper bound on trust region radius
569: . -tao_ntr_epsilon - tolerance for accepting actual / predicted reduction
570: . -tao_ntr_mu1_i - mu1 interpolation init factor
571: . -tao_ntr_mu2_i - mu2 interpolation init factor
572: . -tao_ntr_gamma1_i - gamma1 interpolation init factor
573: . -tao_ntr_gamma2_i - gamma2 interpolation init factor
574: . -tao_ntr_gamma3_i - gamma3 interpolation init factor
575: . -tao_ntr_gamma4_i - gamma4 interpolation init factor
576: . -tao_ntr_theta_i - theta1 interpolation init factor
577: . -tao_ntr_eta1 - eta1 reduction update factor
578: . -tao_ntr_eta2 - eta2 reduction update factor
579: . -tao_ntr_eta3 - eta3 reduction update factor
580: . -tao_ntr_eta4 - eta4 reduction update factor
581: . -tao_ntr_alpha1 - alpha1 reduction update factor
582: . -tao_ntr_alpha2 - alpha2 reduction update factor
583: . -tao_ntr_alpha3 - alpha3 reduction update factor
584: . -tao_ntr_alpha4 - alpha4 reduction update factor
585: . -tao_ntr_alpha4 - alpha4 reduction update factor
586: . -tao_ntr_mu1 - mu1 interpolation update
587: . -tao_ntr_mu2 - mu2 interpolation update
588: . -tao_ntr_gamma1 - gamma1 interpolcation update
589: . -tao_ntr_gamma2 - gamma2 interpolcation update
590: . -tao_ntr_gamma3 - gamma3 interpolcation update
591: . -tao_ntr_gamma4 - gamma4 interpolation update
592: - -tao_ntr_theta - theta interpolation update
594: Level: beginner
595: M*/
597: PETSC_EXTERN PetscErrorCode TaoCreate_NTR(Tao tao)
598: {
599: TAO_NTR *tr;
604: PetscNewLog(tao,&tr);
606: tao->ops->setup = TaoSetUp_NTR;
607: tao->ops->solve = TaoSolve_NTR;
608: tao->ops->setfromoptions = TaoSetFromOptions_NTR;
609: tao->ops->destroy = TaoDestroy_NTR;
611: /* Override default settings (unless already changed) */
612: if (!tao->max_it_changed) tao->max_it = 50;
613: if (!tao->trust0_changed) tao->trust0 = 100.0;
614: tao->data = (void*)tr;
616: /* Standard trust region update parameters */
617: tr->eta1 = 1.0e-4;
618: tr->eta2 = 0.25;
619: tr->eta3 = 0.50;
620: tr->eta4 = 0.90;
622: tr->alpha1 = 0.25;
623: tr->alpha2 = 0.50;
624: tr->alpha3 = 1.00;
625: tr->alpha4 = 2.00;
626: tr->alpha5 = 4.00;
628: /* Interpolation trust region update parameters */
629: tr->mu1 = 0.10;
630: tr->mu2 = 0.50;
632: tr->gamma1 = 0.25;
633: tr->gamma2 = 0.50;
634: tr->gamma3 = 2.00;
635: tr->gamma4 = 4.00;
637: tr->theta = 0.05;
639: /* Interpolation parameters for initialization */
640: tr->mu1_i = 0.35;
641: tr->mu2_i = 0.50;
643: tr->gamma1_i = 0.0625;
644: tr->gamma2_i = 0.50;
645: tr->gamma3_i = 2.00;
646: tr->gamma4_i = 5.00;
648: tr->theta_i = 0.25;
650: tr->min_radius = 1.0e-10;
651: tr->max_radius = 1.0e10;
652: tr->epsilon = 1.0e-6;
654: tr->init_type = NTR_INIT_INTERPOLATION;
655: tr->update_type = NTR_UPDATE_REDUCTION;
657: /* Set linear solver to default for trust region */
658: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
659: PetscObjectIncrementTabLevel((PetscObject)tao->ksp,(PetscObject)tao,1);
660: KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
661: KSPAppendOptionsPrefix(tao->ksp,"tao_ntr_");
662: KSPSetType(tao->ksp,KSPSTCG);
663: return(0);
664: }