Actual source code: ntl.c
petsc-3.11.4 2019-09-28
1: #include <../src/tao/unconstrained/impls/ntl/ntlimpl.h>
3: #include <petscksp.h>
5: #define NTL_INIT_CONSTANT 0
6: #define NTL_INIT_DIRECTION 1
7: #define NTL_INIT_INTERPOLATION 2
8: #define NTL_INIT_TYPES 3
10: #define NTL_UPDATE_REDUCTION 0
11: #define NTL_UPDATE_INTERPOLATION 1
12: #define NTL_UPDATE_TYPES 2
14: static const char *NTL_INIT[64] = {"constant","direction","interpolation"};
16: static const char *NTL_UPDATE[64] = {"reduction","interpolation"};
18: /* Implements Newton's Method with a trust-region, line-search approach for
19: solving unconstrained minimization problems. A More'-Thuente line search
20: is used to guarantee that the bfgs preconditioner remains positive
21: definite. */
23: #define NTL_NEWTON 0
24: #define NTL_BFGS 1
25: #define NTL_SCALED_GRADIENT 2
26: #define NTL_GRADIENT 3
28: static PetscErrorCode TaoSolve_NTL(Tao tao)
29: {
30: TAO_NTL *tl = (TAO_NTL *)tao->data;
31: KSPType ksp_type;
32: PetscBool is_nash,is_stcg,is_gltr,is_bfgs,is_jacobi,is_symmetric,sym_set;
33: KSPConvergedReason ksp_reason;
34: PC pc;
35: TaoLineSearchConvergedReason ls_reason;
37: PetscReal fmin, ftrial, prered, actred, kappa, sigma;
38: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
39: PetscReal f, fold, gdx, gnorm;
40: PetscReal step = 1.0;
42: PetscReal norm_d = 0.0;
43: PetscErrorCode ierr;
44: PetscInt stepType;
45: PetscInt its;
47: PetscInt bfgsUpdates = 0;
48: PetscInt needH;
50: PetscInt i_max = 5;
51: PetscInt j_max = 1;
52: PetscInt i, j, n, N;
54: PetscInt tr_reject;
57: if (tao->XL || tao->XU || tao->ops->computebounds) {
58: PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by ntl algorithm\n");
59: }
61: KSPGetType(tao->ksp,&ksp_type);
62: PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);
63: PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);
64: PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);
65: if (!is_nash && !is_stcg && !is_gltr) {
66: SETERRQ(PETSC_COMM_SELF,1,"TAO_NTR requires nash, stcg, or gltr for the KSP");
67: }
69: /* Initialize the radius and modify if it is too large or small */
70: tao->trust = tao->trust0;
71: tao->trust = PetscMax(tao->trust, tl->min_radius);
72: tao->trust = PetscMin(tao->trust, tl->max_radius);
74: /* Allocate the vectors needed for the BFGS approximation */
75: KSPGetPC(tao->ksp, &pc);
76: PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
77: PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
78: if (is_bfgs) {
79: tl->bfgs_pre = pc;
80: PCLMVMGetMatLMVM(tl->bfgs_pre, &tl->M);
81: VecGetLocalSize(tao->solution, &n);
82: VecGetSize(tao->solution, &N);
83: MatSetSizes(tl->M, n, n, N, N);
84: MatLMVMAllocate(tl->M, tao->solution, tao->gradient);
85: MatIsSymmetricKnown(tl->M, &sym_set, &is_symmetric);
86: if (!sym_set || !is_symmetric) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
87: } else if (is_jacobi) {
88: PCJacobiSetUseAbs(pc,PETSC_TRUE);
89: }
91: /* Check convergence criteria */
92: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
93: VecNorm(tao->gradient, NORM_2, &gnorm);
94: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
95: needH = 1;
97: tao->reason = TAO_CONTINUE_ITERATING;
98: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
99: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
100: (*tao->ops->convergencetest)(tao,tao->cnvP);
101: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
103: /* Initialize trust-region radius */
104: switch(tl->init_type) {
105: case NTL_INIT_CONSTANT:
106: /* Use the initial radius specified */
107: break;
109: case NTL_INIT_INTERPOLATION:
110: /* Use the initial radius specified */
111: max_radius = 0.0;
113: for (j = 0; j < j_max; ++j) {
114: fmin = f;
115: sigma = 0.0;
117: if (needH) {
118: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
119: needH = 0;
120: }
122: for (i = 0; i < i_max; ++i) {
123: VecCopy(tao->solution, tl->W);
124: VecAXPY(tl->W, -tao->trust/gnorm, tao->gradient);
126: TaoComputeObjective(tao, tl->W, &ftrial);
127: if (PetscIsInfOrNanReal(ftrial)) {
128: tau = tl->gamma1_i;
129: } else {
130: if (ftrial < fmin) {
131: fmin = ftrial;
132: sigma = -tao->trust / gnorm;
133: }
135: MatMult(tao->hessian, tao->gradient, tao->stepdirection);
136: VecDot(tao->gradient, tao->stepdirection, &prered);
138: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
139: actred = f - ftrial;
140: if ((PetscAbsScalar(actred) <= tl->epsilon) && (PetscAbsScalar(prered) <= tl->epsilon)) {
141: kappa = 1.0;
142: } else {
143: kappa = actred / prered;
144: }
146: tau_1 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust + (1.0 - tl->theta_i) * prered - actred);
147: tau_2 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust - (1.0 + tl->theta_i) * prered + actred);
148: tau_min = PetscMin(tau_1, tau_2);
149: tau_max = PetscMax(tau_1, tau_2);
151: if (PetscAbsScalar(kappa - 1.0) <= tl->mu1_i) {
152: /* Great agreement */
153: max_radius = PetscMax(max_radius, tao->trust);
155: if (tau_max < 1.0) {
156: tau = tl->gamma3_i;
157: } else if (tau_max > tl->gamma4_i) {
158: tau = tl->gamma4_i;
159: } else if (tau_1 >= 1.0 && tau_1 <= tl->gamma4_i && tau_2 < 1.0) {
160: tau = tau_1;
161: } else if (tau_2 >= 1.0 && tau_2 <= tl->gamma4_i && tau_1 < 1.0) {
162: tau = tau_2;
163: } else {
164: tau = tau_max;
165: }
166: } else if (PetscAbsScalar(kappa - 1.0) <= tl->mu2_i) {
167: /* Good agreement */
168: max_radius = PetscMax(max_radius, tao->trust);
170: if (tau_max < tl->gamma2_i) {
171: tau = tl->gamma2_i;
172: } else if (tau_max > tl->gamma3_i) {
173: tau = tl->gamma3_i;
174: } else {
175: tau = tau_max;
176: }
177: } else {
178: /* Not good agreement */
179: if (tau_min > 1.0) {
180: tau = tl->gamma2_i;
181: } else if (tau_max < tl->gamma1_i) {
182: tau = tl->gamma1_i;
183: } else if ((tau_min < tl->gamma1_i) && (tau_max >= 1.0)) {
184: tau = tl->gamma1_i;
185: } else if ((tau_1 >= tl->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1_i) || (tau_2 >= 1.0))) {
186: tau = tau_1;
187: } else if ((tau_2 >= tl->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1_i) || (tau_2 >= 1.0))) {
188: tau = tau_2;
189: } else {
190: tau = tau_max;
191: }
192: }
193: }
194: tao->trust = tau * tao->trust;
195: }
197: if (fmin < f) {
198: f = fmin;
199: VecAXPY(tao->solution, sigma, tao->gradient);
200: TaoComputeGradient(tao, tao->solution, tao->gradient);
202: VecNorm(tao->gradient, NORM_2, &gnorm);
203: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
204: needH = 1;
206: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
207: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
208: (*tao->ops->convergencetest)(tao,tao->cnvP);
209: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
210: }
211: }
212: tao->trust = PetscMax(tao->trust, max_radius);
214: /* Modify the radius if it is too large or small */
215: tao->trust = PetscMax(tao->trust, tl->min_radius);
216: tao->trust = PetscMin(tao->trust, tl->max_radius);
217: break;
219: default:
220: /* Norm of the first direction will initialize radius */
221: tao->trust = 0.0;
222: break;
223: }
225: /* Set counter for gradient/reset steps */
226: tl->ntrust = 0;
227: tl->newt = 0;
228: tl->bfgs = 0;
229: tl->grad = 0;
231: /* Have not converged; continue with Newton method */
232: while (tao->reason == TAO_CONTINUE_ITERATING) {
233: /* Call general purpose update function */
234: if (tao->ops->update) {
235: (*tao->ops->update)(tao, tao->niter, tao->user_update);
236: }
237: ++tao->niter;
238: tao->ksp_its=0;
239: /* Compute the Hessian */
240: if (needH) {
241: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
242: }
244: if (tl->bfgs_pre) {
245: /* Update the limited memory preconditioner */
246: MatLMVMUpdate(tl->M,tao->solution, tao->gradient);
247: ++bfgsUpdates;
248: }
249: KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
250: /* Solve the Newton system of equations */
251: KSPCGSetRadius(tao->ksp,tl->max_radius);
252: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
253: KSPGetIterationNumber(tao->ksp,&its);
254: tao->ksp_its+=its;
255: tao->ksp_tot_its+=its;
256: KSPCGGetNormD(tao->ksp, &norm_d);
258: if (0.0 == tao->trust) {
259: /* Radius was uninitialized; use the norm of the direction */
260: if (norm_d > 0.0) {
261: tao->trust = norm_d;
263: /* Modify the radius if it is too large or small */
264: tao->trust = PetscMax(tao->trust, tl->min_radius);
265: tao->trust = PetscMin(tao->trust, tl->max_radius);
266: } else {
267: /* The direction was bad; set radius to default value and re-solve
268: the trust-region subproblem to get a direction */
269: tao->trust = tao->trust0;
271: /* Modify the radius if it is too large or small */
272: tao->trust = PetscMax(tao->trust, tl->min_radius);
273: tao->trust = PetscMin(tao->trust, tl->max_radius);
275: KSPCGSetRadius(tao->ksp,tl->max_radius);
276: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
277: KSPGetIterationNumber(tao->ksp,&its);
278: tao->ksp_its+=its;
279: tao->ksp_tot_its+=its;
280: KSPCGGetNormD(tao->ksp, &norm_d);
282: if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
283: }
284: }
286: VecScale(tao->stepdirection, -1.0);
287: KSPGetConvergedReason(tao->ksp, &ksp_reason);
288: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (tl->bfgs_pre)) {
289: /* Preconditioner is numerically indefinite; reset the
290: approximate if using BFGS preconditioning. */
291: MatLMVMReset(tl->M, PETSC_FALSE);
292: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
293: bfgsUpdates = 1;
294: }
296: /* Check trust-region reduction conditions */
297: tr_reject = 0;
298: if (NTL_UPDATE_REDUCTION == tl->update_type) {
299: /* Get predicted reduction */
300: KSPCGGetObjFcn(tao->ksp,&prered);
301: if (prered >= 0.0) {
302: /* The predicted reduction has the wrong sign. This cannot
303: happen in infinite precision arithmetic. Step should
304: be rejected! */
305: tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
306: tr_reject = 1;
307: } else {
308: /* Compute trial step and function value */
309: VecCopy(tao->solution, tl->W);
310: VecAXPY(tl->W, 1.0, tao->stepdirection);
311: TaoComputeObjective(tao, tl->W, &ftrial);
313: if (PetscIsInfOrNanReal(ftrial)) {
314: tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
315: tr_reject = 1;
316: } else {
317: /* Compute and actual reduction */
318: actred = f - ftrial;
319: prered = -prered;
320: if ((PetscAbsScalar(actred) <= tl->epsilon) &&
321: (PetscAbsScalar(prered) <= tl->epsilon)) {
322: kappa = 1.0;
323: } else {
324: kappa = actred / prered;
325: }
327: /* Accept of reject the step and update radius */
328: if (kappa < tl->eta1) {
329: /* Reject the step */
330: tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
331: tr_reject = 1;
332: } else {
333: /* Accept the step */
334: if (kappa < tl->eta2) {
335: /* Marginal bad step */
336: tao->trust = tl->alpha2 * PetscMin(tao->trust, norm_d);
337: } else if (kappa < tl->eta3) {
338: /* Reasonable step */
339: tao->trust = tl->alpha3 * tao->trust;
340: } else if (kappa < tl->eta4) {
341: /* Good step */
342: tao->trust = PetscMax(tl->alpha4 * norm_d, tao->trust);
343: } else {
344: /* Very good step */
345: tao->trust = PetscMax(tl->alpha5 * norm_d, tao->trust);
346: }
347: }
348: }
349: }
350: } else {
351: /* Get predicted reduction */
352: KSPCGGetObjFcn(tao->ksp,&prered);
353: if (prered >= 0.0) {
354: /* The predicted reduction has the wrong sign. This cannot
355: happen in infinite precision arithmetic. Step should
356: be rejected! */
357: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
358: tr_reject = 1;
359: } else {
360: VecCopy(tao->solution, tl->W);
361: VecAXPY(tl->W, 1.0, tao->stepdirection);
362: TaoComputeObjective(tao, tl->W, &ftrial);
363: if (PetscIsInfOrNanReal(ftrial)) {
364: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
365: tr_reject = 1;
366: } else {
367: VecDot(tao->gradient, tao->stepdirection, &gdx);
369: actred = f - ftrial;
370: prered = -prered;
371: if ((PetscAbsScalar(actred) <= tl->epsilon) &&
372: (PetscAbsScalar(prered) <= tl->epsilon)) {
373: kappa = 1.0;
374: } else {
375: kappa = actred / prered;
376: }
378: tau_1 = tl->theta * gdx / (tl->theta * gdx - (1.0 - tl->theta) * prered + actred);
379: tau_2 = tl->theta * gdx / (tl->theta * gdx + (1.0 + tl->theta) * prered - actred);
380: tau_min = PetscMin(tau_1, tau_2);
381: tau_max = PetscMax(tau_1, tau_2);
383: if (kappa >= 1.0 - tl->mu1) {
384: /* Great agreement; accept step and update radius */
385: if (tau_max < 1.0) {
386: tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
387: } else if (tau_max > tl->gamma4) {
388: tao->trust = PetscMax(tao->trust, tl->gamma4 * norm_d);
389: } else {
390: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
391: }
392: } else if (kappa >= 1.0 - tl->mu2) {
393: /* Good agreement */
395: if (tau_max < tl->gamma2) {
396: tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
397: } else if (tau_max > tl->gamma3) {
398: tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
399: } else if (tau_max < 1.0) {
400: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
401: } else {
402: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
403: }
404: } else {
405: /* Not good agreement */
406: if (tau_min > 1.0) {
407: tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
408: } else if (tau_max < tl->gamma1) {
409: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
410: } else if ((tau_min < tl->gamma1) && (tau_max >= 1.0)) {
411: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
412: } else if ((tau_1 >= tl->gamma1) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1) || (tau_2 >= 1.0))) {
413: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
414: } else if ((tau_2 >= tl->gamma1) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1) || (tau_2 >= 1.0))) {
415: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
416: } else {
417: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
418: }
419: tr_reject = 1;
420: }
421: }
422: }
423: }
425: if (tr_reject) {
426: /* The trust-region constraints rejected the step. Apply a linesearch.
427: Check for descent direction. */
428: VecDot(tao->stepdirection, tao->gradient, &gdx);
429: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
430: /* Newton step is not descent or direction produced Inf or NaN */
432: if (!tl->bfgs_pre) {
433: /* We don't have the bfgs matrix around and updated
434: Must use gradient direction in this case */
435: VecCopy(tao->gradient, tao->stepdirection);
436: VecScale(tao->stepdirection, -1.0);
437: ++tl->grad;
438: stepType = NTL_GRADIENT;
439: } else {
440: /* Attempt to use the BFGS direction */
441: MatSolve(tl->M, tao->gradient, tao->stepdirection);
442: VecScale(tao->stepdirection, -1.0);
444: /* Check for success (descent direction) */
445: VecDot(tao->stepdirection, tao->gradient, &gdx);
446: if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
447: /* BFGS direction is not descent or direction produced not a number
448: We can assert bfgsUpdates > 1 in this case because
449: the first solve produces the scaled gradient direction,
450: which is guaranteed to be descent */
452: /* Use steepest descent direction (scaled) */
453: MatLMVMReset(tl->M, PETSC_FALSE);
454: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
455: MatSolve(tl->M, tao->gradient, tao->stepdirection);
456: VecScale(tao->stepdirection, -1.0);
458: bfgsUpdates = 1;
459: ++tl->grad;
460: stepType = NTL_GRADIENT;
461: } else {
462: MatLMVMGetUpdateCount(tl->M, &bfgsUpdates);
463: if (1 == bfgsUpdates) {
464: /* The first BFGS direction is always the scaled gradient */
465: ++tl->grad;
466: stepType = NTL_GRADIENT;
467: } else {
468: ++tl->bfgs;
469: stepType = NTL_BFGS;
470: }
471: }
472: }
473: } else {
474: /* Computed Newton step is descent */
475: ++tl->newt;
476: stepType = NTL_NEWTON;
477: }
479: /* Perform the linesearch */
480: fold = f;
481: VecCopy(tao->solution, tl->Xold);
482: VecCopy(tao->gradient, tl->Gold);
484: step = 1.0;
485: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
486: TaoAddLineSearchCounts(tao);
488: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NTL_GRADIENT) { /* Linesearch failed */
489: /* Linesearch failed */
490: f = fold;
491: VecCopy(tl->Xold, tao->solution);
492: VecCopy(tl->Gold, tao->gradient);
494: switch(stepType) {
495: case NTL_NEWTON:
496: /* Failed to obtain acceptable iterate with Newton step */
498: if (tl->bfgs_pre) {
499: /* We don't have the bfgs matrix around and being updated
500: Must use gradient direction in this case */
501: VecCopy(tao->gradient, tao->stepdirection);
502: ++tl->grad;
503: stepType = NTL_GRADIENT;
504: } else {
505: /* Attempt to use the BFGS direction */
506: MatSolve(tl->M, tao->gradient, tao->stepdirection);
509: /* Check for success (descent direction) */
510: VecDot(tao->stepdirection, tao->gradient, &gdx);
511: if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
512: /* BFGS direction is not descent or direction produced
513: not a number. We can assert bfgsUpdates > 1 in this case
514: Use steepest descent direction (scaled) */
515: MatLMVMReset(tl->M, PETSC_FALSE);
516: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
517: MatSolve(tl->M, tao->gradient, tao->stepdirection);
519: bfgsUpdates = 1;
520: ++tl->grad;
521: stepType = NTL_GRADIENT;
522: } else {
523: MatLMVMGetUpdateCount(tl->M, &bfgsUpdates);
524: if (1 == bfgsUpdates) {
525: /* The first BFGS direction is always the scaled gradient */
526: ++tl->grad;
527: stepType = NTL_GRADIENT;
528: } else {
529: ++tl->bfgs;
530: stepType = NTL_BFGS;
531: }
532: }
533: }
534: break;
536: case NTL_BFGS:
537: /* Can only enter if pc_type == NTL_PC_BFGS
538: Failed to obtain acceptable iterate with BFGS step
539: Attempt to use the scaled gradient direction */
540: MatLMVMReset(tl->M, PETSC_FALSE);
541: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
542: MatSolve(tl->M, tao->gradient, tao->stepdirection);
544: bfgsUpdates = 1;
545: ++tl->grad;
546: stepType = NTL_GRADIENT;
547: break;
548: }
549: VecScale(tao->stepdirection, -1.0);
551: /* This may be incorrect; linesearch has values for stepmax and stepmin
552: that should be reset. */
553: step = 1.0;
554: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
555: TaoAddLineSearchCounts(tao);
556: }
558: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
559: /* Failed to find an improving point */
560: f = fold;
561: VecCopy(tl->Xold, tao->solution);
562: VecCopy(tl->Gold, tao->gradient);
563: tao->trust = 0.0;
564: step = 0.0;
565: tao->reason = TAO_DIVERGED_LS_FAILURE;
566: break;
567: } else if (stepType == NTL_NEWTON) {
568: if (step < tl->nu1) {
569: /* Very bad step taken; reduce radius */
570: tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
571: } else if (step < tl->nu2) {
572: /* Reasonably bad step taken; reduce radius */
573: tao->trust = tl->omega2 * PetscMin(norm_d, tao->trust);
574: } else if (step < tl->nu3) {
575: /* Reasonable step was taken; leave radius alone */
576: if (tl->omega3 < 1.0) {
577: tao->trust = tl->omega3 * PetscMin(norm_d, tao->trust);
578: } else if (tl->omega3 > 1.0) {
579: tao->trust = PetscMax(tl->omega3 * norm_d, tao->trust);
580: }
581: } else if (step < tl->nu4) {
582: /* Full step taken; increase the radius */
583: tao->trust = PetscMax(tl->omega4 * norm_d, tao->trust);
584: } else {
585: /* More than full step taken; increase the radius */
586: tao->trust = PetscMax(tl->omega5 * norm_d, tao->trust);
587: }
588: } else {
589: /* Newton step was not good; reduce the radius */
590: tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
591: }
592: } else {
593: /* Trust-region step is accepted */
594: VecCopy(tl->W, tao->solution);
595: f = ftrial;
596: TaoComputeGradient(tao, tao->solution, tao->gradient);
597: ++tl->ntrust;
598: }
600: /* The radius may have been increased; modify if it is too large */
601: tao->trust = PetscMin(tao->trust, tl->max_radius);
603: /* Check for converged */
604: VecNorm(tao->gradient, NORM_2, &gnorm);
605: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
606: needH = 1;
608: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
609: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
610: (*tao->ops->convergencetest)(tao,tao->cnvP);
611: }
612: return(0);
613: }
615: /* ---------------------------------------------------------- */
616: static PetscErrorCode TaoSetUp_NTL(Tao tao)
617: {
618: TAO_NTL *tl = (TAO_NTL *)tao->data;
622: if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient); }
623: if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
624: if (!tl->W) { VecDuplicate(tao->solution, &tl->W);}
625: if (!tl->Xold) { VecDuplicate(tao->solution, &tl->Xold);}
626: if (!tl->Gold) { VecDuplicate(tao->solution, &tl->Gold);}
627: tl->bfgs_pre = 0;
628: tl->M = 0;
629: return(0);
630: }
632: /*------------------------------------------------------------*/
633: static PetscErrorCode TaoDestroy_NTL(Tao tao)
634: {
635: TAO_NTL *tl = (TAO_NTL *)tao->data;
639: if (tao->setupcalled) {
640: VecDestroy(&tl->W);
641: VecDestroy(&tl->Xold);
642: VecDestroy(&tl->Gold);
643: }
644: PetscFree(tao->data);
645: return(0);
646: }
648: /*------------------------------------------------------------*/
649: static PetscErrorCode TaoSetFromOptions_NTL(PetscOptionItems *PetscOptionsObject,Tao tao)
650: {
651: TAO_NTL *tl = (TAO_NTL *)tao->data;
655: PetscOptionsHead(PetscOptionsObject,"Newton trust region with line search method for unconstrained optimization");
656: PetscOptionsEList("-tao_ntl_init_type", "radius initialization type", "", NTL_INIT, NTL_INIT_TYPES, NTL_INIT[tl->init_type], &tl->init_type,NULL);
657: PetscOptionsEList("-tao_ntl_update_type", "radius update type", "", NTL_UPDATE, NTL_UPDATE_TYPES, NTL_UPDATE[tl->update_type], &tl->update_type,NULL);
658: PetscOptionsReal("-tao_ntl_eta1", "poor steplength; reduce radius", "", tl->eta1, &tl->eta1,NULL);
659: PetscOptionsReal("-tao_ntl_eta2", "reasonable steplength; leave radius alone", "", tl->eta2, &tl->eta2,NULL);
660: PetscOptionsReal("-tao_ntl_eta3", "good steplength; increase radius", "", tl->eta3, &tl->eta3,NULL);
661: PetscOptionsReal("-tao_ntl_eta4", "excellent steplength; greatly increase radius", "", tl->eta4, &tl->eta4,NULL);
662: PetscOptionsReal("-tao_ntl_alpha1", "", "", tl->alpha1, &tl->alpha1,NULL);
663: PetscOptionsReal("-tao_ntl_alpha2", "", "", tl->alpha2, &tl->alpha2,NULL);
664: PetscOptionsReal("-tao_ntl_alpha3", "", "", tl->alpha3, &tl->alpha3,NULL);
665: PetscOptionsReal("-tao_ntl_alpha4", "", "", tl->alpha4, &tl->alpha4,NULL);
666: PetscOptionsReal("-tao_ntl_alpha5", "", "", tl->alpha5, &tl->alpha5,NULL);
667: PetscOptionsReal("-tao_ntl_nu1", "poor steplength; reduce radius", "", tl->nu1, &tl->nu1,NULL);
668: PetscOptionsReal("-tao_ntl_nu2", "reasonable steplength; leave radius alone", "", tl->nu2, &tl->nu2,NULL);
669: PetscOptionsReal("-tao_ntl_nu3", "good steplength; increase radius", "", tl->nu3, &tl->nu3,NULL);
670: PetscOptionsReal("-tao_ntl_nu4", "excellent steplength; greatly increase radius", "", tl->nu4, &tl->nu4,NULL);
671: PetscOptionsReal("-tao_ntl_omega1", "", "", tl->omega1, &tl->omega1,NULL);
672: PetscOptionsReal("-tao_ntl_omega2", "", "", tl->omega2, &tl->omega2,NULL);
673: PetscOptionsReal("-tao_ntl_omega3", "", "", tl->omega3, &tl->omega3,NULL);
674: PetscOptionsReal("-tao_ntl_omega4", "", "", tl->omega4, &tl->omega4,NULL);
675: PetscOptionsReal("-tao_ntl_omega5", "", "", tl->omega5, &tl->omega5,NULL);
676: PetscOptionsReal("-tao_ntl_mu1_i", "", "", tl->mu1_i, &tl->mu1_i,NULL);
677: PetscOptionsReal("-tao_ntl_mu2_i", "", "", tl->mu2_i, &tl->mu2_i,NULL);
678: PetscOptionsReal("-tao_ntl_gamma1_i", "", "", tl->gamma1_i, &tl->gamma1_i,NULL);
679: PetscOptionsReal("-tao_ntl_gamma2_i", "", "", tl->gamma2_i, &tl->gamma2_i,NULL);
680: PetscOptionsReal("-tao_ntl_gamma3_i", "", "", tl->gamma3_i, &tl->gamma3_i,NULL);
681: PetscOptionsReal("-tao_ntl_gamma4_i", "", "", tl->gamma4_i, &tl->gamma4_i,NULL);
682: PetscOptionsReal("-tao_ntl_theta_i", "", "", tl->theta_i, &tl->theta_i,NULL);
683: PetscOptionsReal("-tao_ntl_mu1", "", "", tl->mu1, &tl->mu1,NULL);
684: PetscOptionsReal("-tao_ntl_mu2", "", "", tl->mu2, &tl->mu2,NULL);
685: PetscOptionsReal("-tao_ntl_gamma1", "", "", tl->gamma1, &tl->gamma1,NULL);
686: PetscOptionsReal("-tao_ntl_gamma2", "", "", tl->gamma2, &tl->gamma2,NULL);
687: PetscOptionsReal("-tao_ntl_gamma3", "", "", tl->gamma3, &tl->gamma3,NULL);
688: PetscOptionsReal("-tao_ntl_gamma4", "", "", tl->gamma4, &tl->gamma4,NULL);
689: PetscOptionsReal("-tao_ntl_theta", "", "", tl->theta, &tl->theta,NULL);
690: PetscOptionsReal("-tao_ntl_min_radius", "lower bound on initial radius", "", tl->min_radius, &tl->min_radius,NULL);
691: PetscOptionsReal("-tao_ntl_max_radius", "upper bound on radius", "", tl->max_radius, &tl->max_radius,NULL);
692: PetscOptionsReal("-tao_ntl_epsilon", "tolerance used when computing actual and predicted reduction", "", tl->epsilon, &tl->epsilon,NULL);
693: PetscOptionsTail();
694: TaoLineSearchSetFromOptions(tao->linesearch);
695: KSPSetFromOptions(tao->ksp);
696: return(0);
697: }
699: /*------------------------------------------------------------*/
700: static PetscErrorCode TaoView_NTL(Tao tao, PetscViewer viewer)
701: {
702: TAO_NTL *tl = (TAO_NTL *)tao->data;
703: PetscBool isascii;
707: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
708: if (isascii) {
709: PetscViewerASCIIPushTab(viewer);
710: PetscViewerASCIIPrintf(viewer, "Trust-region steps: %D\n", tl->ntrust);
711: PetscViewerASCIIPrintf(viewer, "Newton search steps: %D\n", tl->newt);
712: PetscViewerASCIIPrintf(viewer, "BFGS search steps: %D\n", tl->bfgs);
713: PetscViewerASCIIPrintf(viewer, "Gradient search steps: %D\n", tl->grad);
714: PetscViewerASCIIPopTab(viewer);
715: }
716: return(0);
717: }
719: /* ---------------------------------------------------------- */
720: /*MC
721: TAONTL - Newton's method with trust region globalization and line search fallback.
722: At each iteration, the Newton trust region method solves the system for d
723: and performs a line search in the d direction:
725: min_d .5 dT Hk d + gkT d, s.t. ||d|| < Delta_k
727: Options Database Keys:
728: + -tao_ntl_init_type - "constant","direction","interpolation"
729: . -tao_ntl_update_type - "reduction","interpolation"
730: . -tao_ntl_min_radius - lower bound on trust region radius
731: . -tao_ntl_max_radius - upper bound on trust region radius
732: . -tao_ntl_epsilon - tolerance for accepting actual / predicted reduction
733: . -tao_ntl_mu1_i - mu1 interpolation init factor
734: . -tao_ntl_mu2_i - mu2 interpolation init factor
735: . -tao_ntl_gamma1_i - gamma1 interpolation init factor
736: . -tao_ntl_gamma2_i - gamma2 interpolation init factor
737: . -tao_ntl_gamma3_i - gamma3 interpolation init factor
738: . -tao_ntl_gamma4_i - gamma4 interpolation init factor
739: . -tao_ntl_theta_i - thetha1 interpolation init factor
740: . -tao_ntl_eta1 - eta1 reduction update factor
741: . -tao_ntl_eta2 - eta2 reduction update factor
742: . -tao_ntl_eta3 - eta3 reduction update factor
743: . -tao_ntl_eta4 - eta4 reduction update factor
744: . -tao_ntl_alpha1 - alpha1 reduction update factor
745: . -tao_ntl_alpha2 - alpha2 reduction update factor
746: . -tao_ntl_alpha3 - alpha3 reduction update factor
747: . -tao_ntl_alpha4 - alpha4 reduction update factor
748: . -tao_ntl_alpha4 - alpha4 reduction update factor
749: . -tao_ntl_mu1 - mu1 interpolation update
750: . -tao_ntl_mu2 - mu2 interpolation update
751: . -tao_ntl_gamma1 - gamma1 interpolcation update
752: . -tao_ntl_gamma2 - gamma2 interpolcation update
753: . -tao_ntl_gamma3 - gamma3 interpolcation update
754: . -tao_ntl_gamma4 - gamma4 interpolation update
755: - -tao_ntl_theta - theta1 interpolation update
757: Level: beginner
758: M*/
759: PETSC_EXTERN PetscErrorCode TaoCreate_NTL(Tao tao)
760: {
761: TAO_NTL *tl;
763: const char *morethuente_type = TAOLINESEARCHMT;
766: PetscNewLog(tao,&tl);
767: tao->ops->setup = TaoSetUp_NTL;
768: tao->ops->solve = TaoSolve_NTL;
769: tao->ops->view = TaoView_NTL;
770: tao->ops->setfromoptions = TaoSetFromOptions_NTL;
771: tao->ops->destroy = TaoDestroy_NTL;
773: /* Override default settings (unless already changed) */
774: if (!tao->max_it_changed) tao->max_it = 50;
775: if (!tao->trust0_changed) tao->trust0 = 100.0;
777: tao->data = (void*)tl;
779: /* Default values for trust-region radius update based on steplength */
780: tl->nu1 = 0.25;
781: tl->nu2 = 0.50;
782: tl->nu3 = 1.00;
783: tl->nu4 = 1.25;
785: tl->omega1 = 0.25;
786: tl->omega2 = 0.50;
787: tl->omega3 = 1.00;
788: tl->omega4 = 2.00;
789: tl->omega5 = 4.00;
791: /* Default values for trust-region radius update based on reduction */
792: tl->eta1 = 1.0e-4;
793: tl->eta2 = 0.25;
794: tl->eta3 = 0.50;
795: tl->eta4 = 0.90;
797: tl->alpha1 = 0.25;
798: tl->alpha2 = 0.50;
799: tl->alpha3 = 1.00;
800: tl->alpha4 = 2.00;
801: tl->alpha5 = 4.00;
803: /* Default values for trust-region radius update based on interpolation */
804: tl->mu1 = 0.10;
805: tl->mu2 = 0.50;
807: tl->gamma1 = 0.25;
808: tl->gamma2 = 0.50;
809: tl->gamma3 = 2.00;
810: tl->gamma4 = 4.00;
812: tl->theta = 0.05;
814: /* Default values for trust region initialization based on interpolation */
815: tl->mu1_i = 0.35;
816: tl->mu2_i = 0.50;
818: tl->gamma1_i = 0.0625;
819: tl->gamma2_i = 0.5;
820: tl->gamma3_i = 2.0;
821: tl->gamma4_i = 5.0;
823: tl->theta_i = 0.25;
825: /* Remaining parameters */
826: tl->min_radius = 1.0e-10;
827: tl->max_radius = 1.0e10;
828: tl->epsilon = 1.0e-6;
830: tl->init_type = NTL_INIT_INTERPOLATION;
831: tl->update_type = NTL_UPDATE_REDUCTION;
833: TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
834: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch,(PetscObject)tao,1);
835: TaoLineSearchSetType(tao->linesearch, morethuente_type);
836: TaoLineSearchUseTaoRoutines(tao->linesearch, tao);
837: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
838: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
839: PetscObjectIncrementTabLevel((PetscObject)tao->ksp,(PetscObject)tao,1);
840: KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
841: KSPAppendOptionsPrefix(tao->ksp,"tao_ntl_");
842: KSPSetType(tao->ksp,KSPCGSTCG);
843: return(0);
844: }