Actual source code: ntr.c
petsc-3.9.4 2018-09-11
1: #include <../src/tao/matrix/lmvmmat.h>
2: #include <../src/tao/unconstrained/impls/ntr/ntrimpl.h>
4: #include <petscksp.h>
6: #define NTR_PC_NONE 0
7: #define NTR_PC_AHESS 1
8: #define NTR_PC_BFGS 2
9: #define NTR_PC_PETSC 3
10: #define NTR_PC_TYPES 4
12: #define BFGS_SCALE_AHESS 0
13: #define BFGS_SCALE_BFGS 1
14: #define BFGS_SCALE_TYPES 2
16: #define NTR_INIT_CONSTANT 0
17: #define NTR_INIT_DIRECTION 1
18: #define NTR_INIT_INTERPOLATION 2
19: #define NTR_INIT_TYPES 3
21: #define NTR_UPDATE_REDUCTION 0
22: #define NTR_UPDATE_INTERPOLATION 1
23: #define NTR_UPDATE_TYPES 2
25: static const char *NTR_PC[64] = {"none","ahess","bfgs","petsc"};
27: static const char *BFGS_SCALE[64] = {"ahess","bfgs"};
29: static const char *NTR_INIT[64] = {"constant","direction","interpolation"};
31: static const char *NTR_UPDATE[64] = {"reduction","interpolation"};
33: /* Routine for BFGS preconditioner */
34: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
35: {
37: Mat M;
42: PCShellGetContext(pc,(void**)&M);
43: MatLMVMSolve(M, b, x);
44: return(0);
45: }
47: /*
48: TaoSolve_NTR - Implements Newton's Method with a trust region approach
49: for solving unconstrained minimization problems.
51: The basic algorithm is taken from MINPACK-2 (dstrn).
53: TaoSolve_NTR computes a local minimizer of a twice differentiable function
54: f by applying a trust region variant of Newton's method. At each stage
55: of the algorithm, we use the prconditioned conjugate gradient method to
56: determine an approximate minimizer of the quadratic equation
58: q(s) = <s, Hs + g>
60: subject to the trust region constraint
62: || s ||_M <= radius,
64: where radius is the trust region radius and M is a symmetric positive
65: definite matrix (the preconditioner). Here g is the gradient and H
66: is the Hessian matrix.
68: Note: TaoSolve_NTR MUST use the iterative solver KSPCGNASH, KSPCGSTCG,
69: or KSPCGGLTR. Thus, we set KSPCGNASH, KSPCGSTCG, or KSPCGGLTR in this
70: routine regardless of what the user may have previously specified.
71: */
72: static PetscErrorCode TaoSolve_NTR(Tao tao)
73: {
74: TAO_NTR *tr = (TAO_NTR *)tao->data;
75: KSPType ksp_type;
76: PetscBool is_nash,is_stcg,is_gltr;
77: KSPConvergedReason ksp_reason;
78: PC pc;
79: PetscReal fmin, ftrial, prered, actred, kappa, sigma, beta;
80: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
81: PetscReal f, gnorm;
83: PetscReal delta;
84: PetscReal norm_d;
85: PetscErrorCode ierr;
86: PetscInt bfgsUpdates = 0;
87: PetscInt needH;
89: PetscInt i_max = 5;
90: PetscInt j_max = 1;
91: PetscInt i, j, N, n, its;
94: if (tao->XL || tao->XU || tao->ops->computebounds) {
95: PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n");
96: }
98: KSPGetType(tao->ksp,&ksp_type);
99: PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);
100: PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);
101: PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);
102: if (!is_nash && !is_stcg && !is_gltr) {
103: SETERRQ(PETSC_COMM_SELF,1,"TAO_NTR requires nash, stcg, or gltr for the KSP");
104: }
106: /* Initialize the radius and modify if it is too large or small */
107: tao->trust = tao->trust0;
108: tao->trust = PetscMax(tao->trust, tr->min_radius);
109: tao->trust = PetscMin(tao->trust, tr->max_radius);
111: if (NTR_PC_BFGS == tr->pc_type && !tr->M) {
112: VecGetLocalSize(tao->solution,&n);
113: VecGetSize(tao->solution,&N);
114: MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tr->M);
115: MatLMVMAllocateVectors(tr->M,tao->solution);
116: }
118: /* Check convergence criteria */
119: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
120: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
121: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Inf or NaN");
122: needH = 1;
124: tao->reason = TAO_CONTINUE_ITERATING;
125: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
126: TaoMonitor(tao,tao->niter,f,gnorm,0.0,1.0);
127: (*tao->ops->convergencetest)(tao,tao->cnvP);
128: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
130: /* Create vectors for the limited memory preconditioner */
131: if ((NTR_PC_BFGS == tr->pc_type) && (BFGS_SCALE_BFGS != tr->bfgs_scale_type)) {
132: if (!tr->Diag) {
133: VecDuplicate(tao->solution, &tr->Diag);
134: }
135: }
137: /* Modify the preconditioner to use the bfgs approximation */
138: KSPGetPC(tao->ksp, &pc);
139: switch(tr->pc_type) {
140: case NTR_PC_NONE:
141: PCSetType(pc, PCNONE);
142: PCSetFromOptions(pc);
143: break;
145: case NTR_PC_AHESS:
146: PCSetType(pc, PCJACOBI);
147: PCSetFromOptions(pc);
148: PCJacobiSetUseAbs(pc,PETSC_TRUE);
149: break;
151: case NTR_PC_BFGS:
152: PCSetType(pc, PCSHELL);
153: PCSetFromOptions(pc);
154: PCShellSetName(pc, "bfgs");
155: PCShellSetContext(pc, tr->M);
156: PCShellSetApply(pc, MatLMVMSolveShell);
157: break;
159: default:
160: /* Use the pc method set by pc_type */
161: break;
162: }
164: /* Initialize trust-region radius */
165: switch(tr->init_type) {
166: case NTR_INIT_CONSTANT:
167: /* Use the initial radius specified */
168: break;
170: case NTR_INIT_INTERPOLATION:
171: /* Use the initial radius specified */
172: max_radius = 0.0;
174: for (j = 0; j < j_max; ++j) {
175: fmin = f;
176: sigma = 0.0;
178: if (needH) {
179: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
180: needH = 0;
181: }
183: for (i = 0; i < i_max; ++i) {
185: VecCopy(tao->solution, tr->W);
186: VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient);
187: TaoComputeObjective(tao, tr->W, &ftrial);
189: if (PetscIsInfOrNanReal(ftrial)) {
190: tau = tr->gamma1_i;
191: }
192: else {
193: if (ftrial < fmin) {
194: fmin = ftrial;
195: sigma = -tao->trust / gnorm;
196: }
198: MatMult(tao->hessian, tao->gradient, tao->stepdirection);
199: VecDot(tao->gradient, tao->stepdirection, &prered);
201: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
202: actred = f - ftrial;
203: if ((PetscAbsScalar(actred) <= tr->epsilon) &&
204: (PetscAbsScalar(prered) <= tr->epsilon)) {
205: kappa = 1.0;
206: }
207: else {
208: kappa = actred / prered;
209: }
211: tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
212: tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
213: tau_min = PetscMin(tau_1, tau_2);
214: tau_max = PetscMax(tau_1, tau_2);
216: if (PetscAbsScalar(kappa - 1.0) <= tr->mu1_i) {
217: /* Great agreement */
218: max_radius = PetscMax(max_radius, tao->trust);
220: if (tau_max < 1.0) {
221: tau = tr->gamma3_i;
222: }
223: else if (tau_max > tr->gamma4_i) {
224: tau = tr->gamma4_i;
225: }
226: else {
227: tau = tau_max;
228: }
229: }
230: else if (PetscAbsScalar(kappa - 1.0) <= tr->mu2_i) {
231: /* Good agreement */
232: max_radius = PetscMax(max_radius, tao->trust);
234: if (tau_max < tr->gamma2_i) {
235: tau = tr->gamma2_i;
236: }
237: else if (tau_max > tr->gamma3_i) {
238: tau = tr->gamma3_i;
239: }
240: else {
241: tau = tau_max;
242: }
243: }
244: else {
245: /* Not good agreement */
246: if (tau_min > 1.0) {
247: tau = tr->gamma2_i;
248: }
249: else if (tau_max < tr->gamma1_i) {
250: tau = tr->gamma1_i;
251: }
252: else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
253: tau = tr->gamma1_i;
254: }
255: else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) &&
256: ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
257: tau = tau_1;
258: }
259: else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) &&
260: ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
261: tau = tau_2;
262: }
263: else {
264: tau = tau_max;
265: }
266: }
267: }
268: tao->trust = tau * tao->trust;
269: }
271: if (fmin < f) {
272: f = fmin;
273: VecAXPY(tao->solution, sigma, tao->gradient);
274: TaoComputeGradient(tao,tao->solution, tao->gradient);
276: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
278: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
279: needH = 1;
281: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
282: TaoMonitor(tao,tao->niter,f,gnorm,0.0,1.0);
283: (*tao->ops->convergencetest)(tao,tao->cnvP);
284: if (tao->reason != TAO_CONTINUE_ITERATING) {
285: return(0);
286: }
287: }
288: }
289: tao->trust = PetscMax(tao->trust, max_radius);
291: /* Modify the radius if it is too large or small */
292: tao->trust = PetscMax(tao->trust, tr->min_radius);
293: tao->trust = PetscMin(tao->trust, tr->max_radius);
294: break;
296: default:
297: /* Norm of the first direction will initialize radius */
298: tao->trust = 0.0;
299: break;
300: }
302: /* Set initial scaling for the BFGS preconditioner
303: This step is done after computing the initial trust-region radius
304: since the function value may have decreased */
305: if (NTR_PC_BFGS == tr->pc_type) {
306: if (f != 0.0) {
307: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
308: }
309: else {
310: delta = 2.0 / (gnorm*gnorm);
311: }
312: MatLMVMSetDelta(tr->M,delta);
313: }
315: /* Have not converged; continue with Newton method */
316: while (tao->reason == TAO_CONTINUE_ITERATING) {
317: ++tao->niter;
318: tao->ksp_its=0;
319: /* Compute the Hessian */
320: if (needH) {
321: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
322: needH = 0;
323: }
325: if (NTR_PC_BFGS == tr->pc_type) {
326: if (BFGS_SCALE_AHESS == tr->bfgs_scale_type) {
327: /* Obtain diagonal for the bfgs preconditioner */
328: MatGetDiagonal(tao->hessian, tr->Diag);
329: VecAbs(tr->Diag);
330: VecReciprocal(tr->Diag);
331: MatLMVMSetScale(tr->M,tr->Diag);
332: }
334: /* Update the limited memory preconditioner */
335: MatLMVMUpdate(tr->M, tao->solution, tao->gradient);
336: ++bfgsUpdates;
337: }
339: while (tao->reason == TAO_CONTINUE_ITERATING) {
340: KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
342: /* Solve the trust region subproblem */
343: KSPCGSetRadius(tao->ksp,tao->trust);
344: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
345: KSPGetIterationNumber(tao->ksp,&its);
346: tao->ksp_its+=its;
347: tao->ksp_tot_its+=its;
348: KSPCGGetNormD(tao->ksp, &norm_d);
350: if (0.0 == tao->trust) {
351: /* Radius was uninitialized; use the norm of the direction */
352: if (norm_d > 0.0) {
353: tao->trust = norm_d;
355: /* Modify the radius if it is too large or small */
356: tao->trust = PetscMax(tao->trust, tr->min_radius);
357: tao->trust = PetscMin(tao->trust, tr->max_radius);
358: }
359: else {
360: /* The direction was bad; set radius to default value and re-solve
361: the trust-region subproblem to get a direction */
362: tao->trust = tao->trust0;
364: /* Modify the radius if it is too large or small */
365: tao->trust = PetscMax(tao->trust, tr->min_radius);
366: tao->trust = PetscMin(tao->trust, tr->max_radius);
368: KSPCGSetRadius(tao->ksp,tao->trust);
369: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
370: KSPGetIterationNumber(tao->ksp,&its);
371: tao->ksp_its+=its;
372: tao->ksp_tot_its+=its;
373: KSPCGGetNormD(tao->ksp, &norm_d);
375: if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
376: }
377: }
378: VecScale(tao->stepdirection, -1.0);
379: KSPGetConvergedReason(tao->ksp, &ksp_reason);
380: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&
381: (NTR_PC_BFGS == tr->pc_type) && (bfgsUpdates > 1)) {
382: /* Preconditioner is numerically indefinite; reset the
383: approximate if using BFGS preconditioning. */
385: if (f != 0.0) {
386: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
387: }
388: else {
389: delta = 2.0 / (gnorm*gnorm);
390: }
391: MatLMVMSetDelta(tr->M, delta);
392: MatLMVMReset(tr->M);
393: MatLMVMUpdate(tr->M, tao->solution, tao->gradient);
394: bfgsUpdates = 1;
395: }
397: if (NTR_UPDATE_REDUCTION == tr->update_type) {
398: /* Get predicted reduction */
399: KSPCGGetObjFcn(tao->ksp,&prered);
400: if (prered >= 0.0) {
401: /* The predicted reduction has the wrong sign. This cannot
402: happen in infinite precision arithmetic. Step should
403: be rejected! */
404: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
405: }
406: else {
407: /* Compute trial step and function value */
408: VecCopy(tao->solution,tr->W);
409: VecAXPY(tr->W, 1.0, tao->stepdirection);
410: TaoComputeObjective(tao, tr->W, &ftrial);
412: if (PetscIsInfOrNanReal(ftrial)) {
413: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
414: } else {
415: /* Compute and actual reduction */
416: actred = f - ftrial;
417: prered = -prered;
418: if ((PetscAbsScalar(actred) <= tr->epsilon) &&
419: (PetscAbsScalar(prered) <= tr->epsilon)) {
420: kappa = 1.0;
421: }
422: else {
423: kappa = actred / prered;
424: }
426: /* Accept or reject the step and update radius */
427: if (kappa < tr->eta1) {
428: /* Reject the step */
429: tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
430: }
431: else {
432: /* Accept the step */
433: if (kappa < tr->eta2) {
434: /* Marginal bad step */
435: tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
436: }
437: else if (kappa < tr->eta3) {
438: /* Reasonable step */
439: tao->trust = tr->alpha3 * tao->trust;
440: }
441: else if (kappa < tr->eta4) {
442: /* Good step */
443: tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
444: }
445: else {
446: /* Very good step */
447: tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
448: }
449: break;
450: }
451: }
452: }
453: }
454: else {
455: /* Get predicted reduction */
456: KSPCGGetObjFcn(tao->ksp,&prered);
457: if (prered >= 0.0) {
458: /* The predicted reduction has the wrong sign. This cannot
459: happen in infinite precision arithmetic. Step should
460: be rejected! */
461: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
462: }
463: else {
464: VecCopy(tao->solution, tr->W);
465: VecAXPY(tr->W, 1.0, tao->stepdirection);
466: TaoComputeObjective(tao, tr->W, &ftrial);
467: if (PetscIsInfOrNanReal(ftrial)) {
468: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
469: }
470: else {
471: VecDot(tao->gradient, tao->stepdirection, &beta);
472: actred = f - ftrial;
473: prered = -prered;
474: if ((PetscAbsScalar(actred) <= tr->epsilon) &&
475: (PetscAbsScalar(prered) <= tr->epsilon)) {
476: kappa = 1.0;
477: }
478: else {
479: kappa = actred / prered;
480: }
482: tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
483: tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
484: tau_min = PetscMin(tau_1, tau_2);
485: tau_max = PetscMax(tau_1, tau_2);
487: if (kappa >= 1.0 - tr->mu1) {
488: /* Great agreement; accept step and update radius */
489: if (tau_max < 1.0) {
490: tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
491: }
492: else if (tau_max > tr->gamma4) {
493: tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
494: }
495: else {
496: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
497: }
498: break;
499: }
500: else if (kappa >= 1.0 - tr->mu2) {
501: /* Good agreement */
503: if (tau_max < tr->gamma2) {
504: tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
505: }
506: else if (tau_max > tr->gamma3) {
507: tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
508: }
509: else if (tau_max < 1.0) {
510: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
511: }
512: else {
513: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
514: }
515: break;
516: }
517: else {
518: /* Not good agreement */
519: if (tau_min > 1.0) {
520: tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
521: }
522: else if (tau_max < tr->gamma1) {
523: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
524: }
525: else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
526: tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
527: }
528: else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) &&
529: ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
530: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
531: }
532: else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) &&
533: ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
534: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
535: }
536: else {
537: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
538: }
539: }
540: }
541: }
542: }
544: /* The step computed was not good and the radius was decreased.
545: Monitor the radius to terminate. */
546: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
547: TaoMonitor(tao,tao->niter,f,gnorm,0.0,tao->trust);
548: (*tao->ops->convergencetest)(tao,tao->cnvP);
549: }
551: /* The radius may have been increased; modify if it is too large */
552: tao->trust = PetscMin(tao->trust, tr->max_radius);
554: if (tao->reason == TAO_CONTINUE_ITERATING) {
555: VecCopy(tr->W, tao->solution);
556: f = ftrial;
557: TaoComputeGradient(tao, tao->solution, tao->gradient);
558: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
559: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
560: needH = 1;
561: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
562: TaoMonitor(tao,tao->niter,f,gnorm,0.0,tao->trust);
563: (*tao->ops->convergencetest)(tao,tao->cnvP);
564: }
565: }
566: return(0);
567: }
569: /*------------------------------------------------------------*/
570: static PetscErrorCode TaoSetUp_NTR(Tao tao)
571: {
572: TAO_NTR *tr = (TAO_NTR *)tao->data;
576: if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient);}
577: if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
578: if (!tr->W) {VecDuplicate(tao->solution, &tr->W);}
580: tr->Diag = 0;
581: tr->M = 0;
582: return(0);
583: }
585: /*------------------------------------------------------------*/
586: static PetscErrorCode TaoDestroy_NTR(Tao tao)
587: {
588: TAO_NTR *tr = (TAO_NTR *)tao->data;
592: if (tao->setupcalled) {
593: VecDestroy(&tr->W);
594: }
595: MatDestroy(&tr->M);
596: VecDestroy(&tr->Diag);
597: PetscFree(tao->data);
598: return(0);
599: }
601: /*------------------------------------------------------------*/
602: static PetscErrorCode TaoSetFromOptions_NTR(PetscOptionItems *PetscOptionsObject,Tao tao)
603: {
604: TAO_NTR *tr = (TAO_NTR *)tao->data;
608: PetscOptionsHead(PetscOptionsObject,"Newton trust region method for unconstrained optimization");
609: PetscOptionsEList("-tao_ntr_pc_type", "pc type", "", NTR_PC, NTR_PC_TYPES, NTR_PC[tr->pc_type], &tr->pc_type,NULL);
610: PetscOptionsEList("-tao_ntr_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tr->bfgs_scale_type], &tr->bfgs_scale_type,NULL);
611: PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type,NULL);
612: PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type,NULL);
613: PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1,NULL);
614: PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2,NULL);
615: PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3,NULL);
616: PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4,NULL);
617: PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1,NULL);
618: PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2,NULL);
619: PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3,NULL);
620: PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4,NULL);
621: PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5,NULL);
622: PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1,NULL);
623: PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2,NULL);
624: PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1,NULL);
625: PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2,NULL);
626: PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3,NULL);
627: PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4,NULL);
628: PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta,NULL);
629: PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i,NULL);
630: PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i,NULL);
631: PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i,NULL);
632: PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i,NULL);
633: PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i,NULL);
634: PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i,NULL);
635: PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i,NULL);
636: PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius,NULL);
637: PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius,NULL);
638: PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon,NULL);
639: PetscOptionsTail();
640: KSPSetFromOptions(tao->ksp);
641: return(0);
642: }
644: /*------------------------------------------------------------*/
645: static PetscErrorCode TaoView_NTR(Tao tao, PetscViewer viewer)
646: {
647: TAO_NTR *tr = (TAO_NTR *)tao->data;
649: PetscInt nrejects;
650: PetscBool isascii;
653: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
654: if (isascii) {
655: PetscViewerASCIIPushTab(viewer);
656: if (NTR_PC_BFGS == tr->pc_type && tr->M) {
657: MatLMVMGetRejects(tr->M, &nrejects);
658: PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects);
659: }
660: PetscViewerASCIIPopTab(viewer);
661: }
662: return(0);
663: }
665: /*------------------------------------------------------------*/
666: /*MC
667: TAONTR - Newton's method with trust region for unconstrained minimization.
668: At each iteration, the Newton trust region method solves the system.
669: NTR expects a KSP solver with a trust region radius.
670: min_d .5 dT Hk d + gkT d, s.t. ||d|| < Delta_k
672: Options Database Keys:
673: + -tao_ntr_pc_type - "none","ahess","bfgs","petsc"
674: . -tao_ntr_bfgs_scale_type - type of scaling with bfgs pc, "ahess" or "bfgs"
675: . -tao_ntr_init_type - "constant","direction","interpolation"
676: . -tao_ntr_update_type - "reduction","interpolation"
677: . -tao_ntr_min_radius - lower bound on trust region radius
678: . -tao_ntr_max_radius - upper bound on trust region radius
679: . -tao_ntr_epsilon - tolerance for accepting actual / predicted reduction
680: . -tao_ntr_mu1_i - mu1 interpolation init factor
681: . -tao_ntr_mu2_i - mu2 interpolation init factor
682: . -tao_ntr_gamma1_i - gamma1 interpolation init factor
683: . -tao_ntr_gamma2_i - gamma2 interpolation init factor
684: . -tao_ntr_gamma3_i - gamma3 interpolation init factor
685: . -tao_ntr_gamma4_i - gamma4 interpolation init factor
686: . -tao_ntr_theta_i - thetha1 interpolation init factor
687: . -tao_ntr_eta1 - eta1 reduction update factor
688: . -tao_ntr_eta2 - eta2 reduction update factor
689: . -tao_ntr_eta3 - eta3 reduction update factor
690: . -tao_ntr_eta4 - eta4 reduction update factor
691: . -tao_ntr_alpha1 - alpha1 reduction update factor
692: . -tao_ntr_alpha2 - alpha2 reduction update factor
693: . -tao_ntr_alpha3 - alpha3 reduction update factor
694: . -tao_ntr_alpha4 - alpha4 reduction update factor
695: . -tao_ntr_alpha4 - alpha4 reduction update factor
696: . -tao_ntr_mu1 - mu1 interpolation update
697: . -tao_ntr_mu2 - mu2 interpolation update
698: . -tao_ntr_gamma1 - gamma1 interpolcation update
699: . -tao_ntr_gamma2 - gamma2 interpolcation update
700: . -tao_ntr_gamma3 - gamma3 interpolcation update
701: . -tao_ntr_gamma4 - gamma4 interpolation update
702: - -tao_ntr_theta - theta interpolation update
704: Level: beginner
705: M*/
707: PETSC_EXTERN PetscErrorCode TaoCreate_NTR(Tao tao)
708: {
709: TAO_NTR *tr;
714: PetscNewLog(tao,&tr);
716: tao->ops->setup = TaoSetUp_NTR;
717: tao->ops->solve = TaoSolve_NTR;
718: tao->ops->view = TaoView_NTR;
719: tao->ops->setfromoptions = TaoSetFromOptions_NTR;
720: tao->ops->destroy = TaoDestroy_NTR;
722: /* Override default settings (unless already changed) */
723: if (!tao->max_it_changed) tao->max_it = 50;
724: if (!tao->trust0_changed) tao->trust0 = 100.0;
725: tao->data = (void*)tr;
727: /* Standard trust region update parameters */
728: tr->eta1 = 1.0e-4;
729: tr->eta2 = 0.25;
730: tr->eta3 = 0.50;
731: tr->eta4 = 0.90;
733: tr->alpha1 = 0.25;
734: tr->alpha2 = 0.50;
735: tr->alpha3 = 1.00;
736: tr->alpha4 = 2.00;
737: tr->alpha5 = 4.00;
739: /* Interpolation trust region update parameters */
740: tr->mu1 = 0.10;
741: tr->mu2 = 0.50;
743: tr->gamma1 = 0.25;
744: tr->gamma2 = 0.50;
745: tr->gamma3 = 2.00;
746: tr->gamma4 = 4.00;
748: tr->theta = 0.05;
750: /* Interpolation parameters for initialization */
751: tr->mu1_i = 0.35;
752: tr->mu2_i = 0.50;
754: tr->gamma1_i = 0.0625;
755: tr->gamma2_i = 0.50;
756: tr->gamma3_i = 2.00;
757: tr->gamma4_i = 5.00;
759: tr->theta_i = 0.25;
761: tr->min_radius = 1.0e-10;
762: tr->max_radius = 1.0e10;
763: tr->epsilon = 1.0e-6;
765: tr->pc_type = NTR_PC_BFGS;
766: tr->bfgs_scale_type = BFGS_SCALE_AHESS;
767: tr->init_type = NTR_INIT_INTERPOLATION;
768: tr->update_type = NTR_UPDATE_REDUCTION;
770: /* Set linear solver to default for trust region */
771: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
772: PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1);
773: KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
774: KSPSetType(tao->ksp,KSPCGSTCG);
775: return(0);
776: }