Actual source code: ntl.c
1: #include <../src/tao/unconstrained/impls/ntl/ntlimpl.h>
3: #include <petscksp.h>
5: #define NTL_INIT_CONSTANT 0
6: #define NTL_INIT_DIRECTION 1
7: #define NTL_INIT_INTERPOLATION 2
8: #define NTL_INIT_TYPES 3
10: #define NTL_UPDATE_REDUCTION 0
11: #define NTL_UPDATE_INTERPOLATION 1
12: #define NTL_UPDATE_TYPES 2
14: static const char *NTL_INIT[64] = {"constant","direction","interpolation"};
16: static const char *NTL_UPDATE[64] = {"reduction","interpolation"};
18: /* Implements Newton's Method with a trust-region, line-search approach for
19: solving unconstrained minimization problems. A More'-Thuente line search
20: is used to guarantee that the bfgs preconditioner remains positive
21: definite. */
23: #define NTL_NEWTON 0
24: #define NTL_BFGS 1
25: #define NTL_SCALED_GRADIENT 2
26: #define NTL_GRADIENT 3
28: static PetscErrorCode TaoSolve_NTL(Tao tao)
29: {
30: TAO_NTL *tl = (TAO_NTL *)tao->data;
31: KSPType ksp_type;
32: PetscBool is_nash,is_stcg,is_gltr,is_bfgs,is_jacobi,is_symmetric,sym_set;
33: KSPConvergedReason ksp_reason;
34: PC pc;
35: TaoLineSearchConvergedReason ls_reason;
37: PetscReal fmin, ftrial, prered, actred, kappa, sigma;
38: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
39: PetscReal f, fold, gdx, gnorm;
40: PetscReal step = 1.0;
42: PetscReal norm_d = 0.0;
43: PetscErrorCode ierr;
44: PetscInt stepType;
45: PetscInt its;
47: PetscInt bfgsUpdates = 0;
48: PetscInt needH;
50: PetscInt i_max = 5;
51: PetscInt j_max = 1;
52: PetscInt i, j, n, N;
54: PetscInt tr_reject;
57: if (tao->XL || tao->XU || tao->ops->computebounds) {
58: PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by ntl algorithm\n");
59: }
61: KSPGetType(tao->ksp,&ksp_type);
62: PetscStrcmp(ksp_type,KSPNASH,&is_nash);
63: PetscStrcmp(ksp_type,KSPSTCG,&is_stcg);
64: PetscStrcmp(ksp_type,KSPGLTR,&is_gltr);
65: if (!is_nash && !is_stcg && !is_gltr) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_SUP,"TAO_NTR requires nash, stcg, or gltr for the KSP");
67: /* Initialize the radius and modify if it is too large or small */
68: tao->trust = tao->trust0;
69: tao->trust = PetscMax(tao->trust, tl->min_radius);
70: tao->trust = PetscMin(tao->trust, tl->max_radius);
72: /* Allocate the vectors needed for the BFGS approximation */
73: KSPGetPC(tao->ksp, &pc);
74: PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
75: PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
76: if (is_bfgs) {
77: tl->bfgs_pre = pc;
78: PCLMVMGetMatLMVM(tl->bfgs_pre, &tl->M);
79: VecGetLocalSize(tao->solution, &n);
80: VecGetSize(tao->solution, &N);
81: MatSetSizes(tl->M, n, n, N, N);
82: MatLMVMAllocate(tl->M, tao->solution, tao->gradient);
83: MatIsSymmetricKnown(tl->M, &sym_set, &is_symmetric);
84: if (!sym_set || !is_symmetric) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
85: } else if (is_jacobi) {
86: PCJacobiSetUseAbs(pc,PETSC_TRUE);
87: }
89: /* Check convergence criteria */
90: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
91: VecNorm(tao->gradient, NORM_2, &gnorm);
92: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
93: needH = 1;
95: tao->reason = TAO_CONTINUE_ITERATING;
96: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
97: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
98: (*tao->ops->convergencetest)(tao,tao->cnvP);
99: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
101: /* Initialize trust-region radius */
102: switch(tl->init_type) {
103: case NTL_INIT_CONSTANT:
104: /* Use the initial radius specified */
105: break;
107: case NTL_INIT_INTERPOLATION:
108: /* Use the initial radius specified */
109: max_radius = 0.0;
111: for (j = 0; j < j_max; ++j) {
112: fmin = f;
113: sigma = 0.0;
115: if (needH) {
116: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
117: needH = 0;
118: }
120: for (i = 0; i < i_max; ++i) {
121: VecCopy(tao->solution, tl->W);
122: VecAXPY(tl->W, -tao->trust/gnorm, tao->gradient);
124: TaoComputeObjective(tao, tl->W, &ftrial);
125: if (PetscIsInfOrNanReal(ftrial)) {
126: tau = tl->gamma1_i;
127: } else {
128: if (ftrial < fmin) {
129: fmin = ftrial;
130: sigma = -tao->trust / gnorm;
131: }
133: MatMult(tao->hessian, tao->gradient, tao->stepdirection);
134: VecDot(tao->gradient, tao->stepdirection, &prered);
136: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
137: actred = f - ftrial;
138: if ((PetscAbsScalar(actred) <= tl->epsilon) && (PetscAbsScalar(prered) <= tl->epsilon)) {
139: kappa = 1.0;
140: } else {
141: kappa = actred / prered;
142: }
144: tau_1 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust + (1.0 - tl->theta_i) * prered - actred);
145: tau_2 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust - (1.0 + tl->theta_i) * prered + actred);
146: tau_min = PetscMin(tau_1, tau_2);
147: tau_max = PetscMax(tau_1, tau_2);
149: if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tl->mu1_i) {
150: /* Great agreement */
151: max_radius = PetscMax(max_radius, tao->trust);
153: if (tau_max < 1.0) {
154: tau = tl->gamma3_i;
155: } else if (tau_max > tl->gamma4_i) {
156: tau = tl->gamma4_i;
157: } else if (tau_1 >= 1.0 && tau_1 <= tl->gamma4_i && tau_2 < 1.0) {
158: tau = tau_1;
159: } else if (tau_2 >= 1.0 && tau_2 <= tl->gamma4_i && tau_1 < 1.0) {
160: tau = tau_2;
161: } else {
162: tau = tau_max;
163: }
164: } else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= tl->mu2_i) {
165: /* Good agreement */
166: max_radius = PetscMax(max_radius, tao->trust);
168: if (tau_max < tl->gamma2_i) {
169: tau = tl->gamma2_i;
170: } else if (tau_max > tl->gamma3_i) {
171: tau = tl->gamma3_i;
172: } else {
173: tau = tau_max;
174: }
175: } else {
176: /* Not good agreement */
177: if (tau_min > 1.0) {
178: tau = tl->gamma2_i;
179: } else if (tau_max < tl->gamma1_i) {
180: tau = tl->gamma1_i;
181: } else if ((tau_min < tl->gamma1_i) && (tau_max >= 1.0)) {
182: tau = tl->gamma1_i;
183: } else if ((tau_1 >= tl->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1_i) || (tau_2 >= 1.0))) {
184: tau = tau_1;
185: } else if ((tau_2 >= tl->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1_i) || (tau_2 >= 1.0))) {
186: tau = tau_2;
187: } else {
188: tau = tau_max;
189: }
190: }
191: }
192: tao->trust = tau * tao->trust;
193: }
195: if (fmin < f) {
196: f = fmin;
197: VecAXPY(tao->solution, sigma, tao->gradient);
198: TaoComputeGradient(tao, tao->solution, tao->gradient);
200: VecNorm(tao->gradient, NORM_2, &gnorm);
201: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
202: needH = 1;
204: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
205: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
206: (*tao->ops->convergencetest)(tao,tao->cnvP);
207: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
208: }
209: }
210: tao->trust = PetscMax(tao->trust, max_radius);
212: /* Modify the radius if it is too large or small */
213: tao->trust = PetscMax(tao->trust, tl->min_radius);
214: tao->trust = PetscMin(tao->trust, tl->max_radius);
215: break;
217: default:
218: /* Norm of the first direction will initialize radius */
219: tao->trust = 0.0;
220: break;
221: }
223: /* Set counter for gradient/reset steps */
224: tl->ntrust = 0;
225: tl->newt = 0;
226: tl->bfgs = 0;
227: tl->grad = 0;
229: /* Have not converged; continue with Newton method */
230: while (tao->reason == TAO_CONTINUE_ITERATING) {
231: /* Call general purpose update function */
232: if (tao->ops->update) {
233: (*tao->ops->update)(tao, tao->niter, tao->user_update);
234: }
235: ++tao->niter;
236: tao->ksp_its=0;
237: /* Compute the Hessian */
238: if (needH) {
239: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
240: }
242: if (tl->bfgs_pre) {
243: /* Update the limited memory preconditioner */
244: MatLMVMUpdate(tl->M,tao->solution, tao->gradient);
245: ++bfgsUpdates;
246: }
247: KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
248: /* Solve the Newton system of equations */
249: KSPCGSetRadius(tao->ksp,tl->max_radius);
250: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
251: KSPGetIterationNumber(tao->ksp,&its);
252: tao->ksp_its+=its;
253: tao->ksp_tot_its+=its;
254: KSPCGGetNormD(tao->ksp, &norm_d);
256: if (0.0 == tao->trust) {
257: /* Radius was uninitialized; use the norm of the direction */
258: if (norm_d > 0.0) {
259: tao->trust = norm_d;
261: /* Modify the radius if it is too large or small */
262: tao->trust = PetscMax(tao->trust, tl->min_radius);
263: tao->trust = PetscMin(tao->trust, tl->max_radius);
264: } else {
265: /* The direction was bad; set radius to default value and re-solve
266: the trust-region subproblem to get a direction */
267: tao->trust = tao->trust0;
269: /* Modify the radius if it is too large or small */
270: tao->trust = PetscMax(tao->trust, tl->min_radius);
271: tao->trust = PetscMin(tao->trust, tl->max_radius);
273: KSPCGSetRadius(tao->ksp,tl->max_radius);
274: KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
275: KSPGetIterationNumber(tao->ksp,&its);
276: tao->ksp_its+=its;
277: tao->ksp_tot_its+=its;
278: KSPCGGetNormD(tao->ksp, &norm_d);
280: if (norm_d == 0.0) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_PLIB, "Initial direction zero");
281: }
282: }
284: VecScale(tao->stepdirection, -1.0);
285: KSPGetConvergedReason(tao->ksp, &ksp_reason);
286: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (tl->bfgs_pre)) {
287: /* Preconditioner is numerically indefinite; reset the
288: approximate if using BFGS preconditioning. */
289: MatLMVMReset(tl->M, PETSC_FALSE);
290: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
291: bfgsUpdates = 1;
292: }
294: /* Check trust-region reduction conditions */
295: tr_reject = 0;
296: if (NTL_UPDATE_REDUCTION == tl->update_type) {
297: /* Get predicted reduction */
298: KSPCGGetObjFcn(tao->ksp,&prered);
299: if (prered >= 0.0) {
300: /* The predicted reduction has the wrong sign. This cannot
301: happen in infinite precision arithmetic. Step should
302: be rejected! */
303: tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
304: tr_reject = 1;
305: } else {
306: /* Compute trial step and function value */
307: VecCopy(tao->solution, tl->W);
308: VecAXPY(tl->W, 1.0, tao->stepdirection);
309: TaoComputeObjective(tao, tl->W, &ftrial);
311: if (PetscIsInfOrNanReal(ftrial)) {
312: tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
313: tr_reject = 1;
314: } else {
315: /* Compute and actual reduction */
316: actred = f - ftrial;
317: prered = -prered;
318: if ((PetscAbsScalar(actred) <= tl->epsilon) &&
319: (PetscAbsScalar(prered) <= tl->epsilon)) {
320: kappa = 1.0;
321: } else {
322: kappa = actred / prered;
323: }
325: /* Accept of reject the step and update radius */
326: if (kappa < tl->eta1) {
327: /* Reject the step */
328: tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
329: tr_reject = 1;
330: } else {
331: /* Accept the step */
332: if (kappa < tl->eta2) {
333: /* Marginal bad step */
334: tao->trust = tl->alpha2 * PetscMin(tao->trust, norm_d);
335: } else if (kappa < tl->eta3) {
336: /* Reasonable step */
337: tao->trust = tl->alpha3 * tao->trust;
338: } else if (kappa < tl->eta4) {
339: /* Good step */
340: tao->trust = PetscMax(tl->alpha4 * norm_d, tao->trust);
341: } else {
342: /* Very good step */
343: tao->trust = PetscMax(tl->alpha5 * norm_d, tao->trust);
344: }
345: }
346: }
347: }
348: } else {
349: /* Get predicted reduction */
350: KSPCGGetObjFcn(tao->ksp,&prered);
351: if (prered >= 0.0) {
352: /* The predicted reduction has the wrong sign. This cannot
353: happen in infinite precision arithmetic. Step should
354: be rejected! */
355: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
356: tr_reject = 1;
357: } else {
358: VecCopy(tao->solution, tl->W);
359: VecAXPY(tl->W, 1.0, tao->stepdirection);
360: TaoComputeObjective(tao, tl->W, &ftrial);
361: if (PetscIsInfOrNanReal(ftrial)) {
362: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
363: tr_reject = 1;
364: } else {
365: VecDot(tao->gradient, tao->stepdirection, &gdx);
367: actred = f - ftrial;
368: prered = -prered;
369: if ((PetscAbsScalar(actred) <= tl->epsilon) &&
370: (PetscAbsScalar(prered) <= tl->epsilon)) {
371: kappa = 1.0;
372: } else {
373: kappa = actred / prered;
374: }
376: tau_1 = tl->theta * gdx / (tl->theta * gdx - (1.0 - tl->theta) * prered + actred);
377: tau_2 = tl->theta * gdx / (tl->theta * gdx + (1.0 + tl->theta) * prered - actred);
378: tau_min = PetscMin(tau_1, tau_2);
379: tau_max = PetscMax(tau_1, tau_2);
381: if (kappa >= 1.0 - tl->mu1) {
382: /* Great agreement; accept step and update radius */
383: if (tau_max < 1.0) {
384: tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
385: } else if (tau_max > tl->gamma4) {
386: tao->trust = PetscMax(tao->trust, tl->gamma4 * norm_d);
387: } else {
388: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
389: }
390: } else if (kappa >= 1.0 - tl->mu2) {
391: /* Good agreement */
393: if (tau_max < tl->gamma2) {
394: tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
395: } else if (tau_max > tl->gamma3) {
396: tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
397: } else if (tau_max < 1.0) {
398: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
399: } else {
400: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
401: }
402: } else {
403: /* Not good agreement */
404: if (tau_min > 1.0) {
405: tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
406: } else if (tau_max < tl->gamma1) {
407: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
408: } else if ((tau_min < tl->gamma1) && (tau_max >= 1.0)) {
409: tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
410: } else if ((tau_1 >= tl->gamma1) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1) || (tau_2 >= 1.0))) {
411: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
412: } else if ((tau_2 >= tl->gamma1) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1) || (tau_2 >= 1.0))) {
413: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
414: } else {
415: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
416: }
417: tr_reject = 1;
418: }
419: }
420: }
421: }
423: if (tr_reject) {
424: /* The trust-region constraints rejected the step. Apply a linesearch.
425: Check for descent direction. */
426: VecDot(tao->stepdirection, tao->gradient, &gdx);
427: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
428: /* Newton step is not descent or direction produced Inf or NaN */
430: if (!tl->bfgs_pre) {
431: /* We don't have the bfgs matrix around and updated
432: Must use gradient direction in this case */
433: VecCopy(tao->gradient, tao->stepdirection);
434: VecScale(tao->stepdirection, -1.0);
435: ++tl->grad;
436: stepType = NTL_GRADIENT;
437: } else {
438: /* Attempt to use the BFGS direction */
439: MatSolve(tl->M, tao->gradient, tao->stepdirection);
440: VecScale(tao->stepdirection, -1.0);
442: /* Check for success (descent direction) */
443: VecDot(tao->stepdirection, tao->gradient, &gdx);
444: if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
445: /* BFGS direction is not descent or direction produced not a number
446: We can assert bfgsUpdates > 1 in this case because
447: the first solve produces the scaled gradient direction,
448: which is guaranteed to be descent */
450: /* Use steepest descent direction (scaled) */
451: MatLMVMReset(tl->M, PETSC_FALSE);
452: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
453: MatSolve(tl->M, tao->gradient, tao->stepdirection);
454: VecScale(tao->stepdirection, -1.0);
456: bfgsUpdates = 1;
457: ++tl->grad;
458: stepType = NTL_GRADIENT;
459: } else {
460: MatLMVMGetUpdateCount(tl->M, &bfgsUpdates);
461: if (1 == bfgsUpdates) {
462: /* The first BFGS direction is always the scaled gradient */
463: ++tl->grad;
464: stepType = NTL_GRADIENT;
465: } else {
466: ++tl->bfgs;
467: stepType = NTL_BFGS;
468: }
469: }
470: }
471: } else {
472: /* Computed Newton step is descent */
473: ++tl->newt;
474: stepType = NTL_NEWTON;
475: }
477: /* Perform the linesearch */
478: fold = f;
479: VecCopy(tao->solution, tl->Xold);
480: VecCopy(tao->gradient, tl->Gold);
482: step = 1.0;
483: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
484: TaoAddLineSearchCounts(tao);
486: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NTL_GRADIENT) { /* Linesearch failed */
487: /* Linesearch failed */
488: f = fold;
489: VecCopy(tl->Xold, tao->solution);
490: VecCopy(tl->Gold, tao->gradient);
492: switch(stepType) {
493: case NTL_NEWTON:
494: /* Failed to obtain acceptable iterate with Newton step */
496: if (tl->bfgs_pre) {
497: /* We don't have the bfgs matrix around and being updated
498: Must use gradient direction in this case */
499: VecCopy(tao->gradient, tao->stepdirection);
500: ++tl->grad;
501: stepType = NTL_GRADIENT;
502: } else {
503: /* Attempt to use the BFGS direction */
504: MatSolve(tl->M, tao->gradient, tao->stepdirection);
507: /* Check for success (descent direction) */
508: VecDot(tao->stepdirection, tao->gradient, &gdx);
509: if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
510: /* BFGS direction is not descent or direction produced
511: not a number. We can assert bfgsUpdates > 1 in this case
512: Use steepest descent direction (scaled) */
513: MatLMVMReset(tl->M, PETSC_FALSE);
514: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
515: MatSolve(tl->M, tao->gradient, tao->stepdirection);
517: bfgsUpdates = 1;
518: ++tl->grad;
519: stepType = NTL_GRADIENT;
520: } else {
521: MatLMVMGetUpdateCount(tl->M, &bfgsUpdates);
522: if (1 == bfgsUpdates) {
523: /* The first BFGS direction is always the scaled gradient */
524: ++tl->grad;
525: stepType = NTL_GRADIENT;
526: } else {
527: ++tl->bfgs;
528: stepType = NTL_BFGS;
529: }
530: }
531: }
532: break;
534: case NTL_BFGS:
535: /* Can only enter if pc_type == NTL_PC_BFGS
536: Failed to obtain acceptable iterate with BFGS step
537: Attempt to use the scaled gradient direction */
538: MatLMVMReset(tl->M, PETSC_FALSE);
539: MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
540: MatSolve(tl->M, tao->gradient, tao->stepdirection);
542: bfgsUpdates = 1;
543: ++tl->grad;
544: stepType = NTL_GRADIENT;
545: break;
546: }
547: VecScale(tao->stepdirection, -1.0);
549: /* This may be incorrect; linesearch has values for stepmax and stepmin
550: that should be reset. */
551: step = 1.0;
552: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
553: TaoAddLineSearchCounts(tao);
554: }
556: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
557: /* Failed to find an improving point */
558: f = fold;
559: VecCopy(tl->Xold, tao->solution);
560: VecCopy(tl->Gold, tao->gradient);
561: tao->trust = 0.0;
562: step = 0.0;
563: tao->reason = TAO_DIVERGED_LS_FAILURE;
564: break;
565: } else if (stepType == NTL_NEWTON) {
566: if (step < tl->nu1) {
567: /* Very bad step taken; reduce radius */
568: tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
569: } else if (step < tl->nu2) {
570: /* Reasonably bad step taken; reduce radius */
571: tao->trust = tl->omega2 * PetscMin(norm_d, tao->trust);
572: } else if (step < tl->nu3) {
573: /* Reasonable step was taken; leave radius alone */
574: if (tl->omega3 < 1.0) {
575: tao->trust = tl->omega3 * PetscMin(norm_d, tao->trust);
576: } else if (tl->omega3 > 1.0) {
577: tao->trust = PetscMax(tl->omega3 * norm_d, tao->trust);
578: }
579: } else if (step < tl->nu4) {
580: /* Full step taken; increase the radius */
581: tao->trust = PetscMax(tl->omega4 * norm_d, tao->trust);
582: } else {
583: /* More than full step taken; increase the radius */
584: tao->trust = PetscMax(tl->omega5 * norm_d, tao->trust);
585: }
586: } else {
587: /* Newton step was not good; reduce the radius */
588: tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
589: }
590: } else {
591: /* Trust-region step is accepted */
592: VecCopy(tl->W, tao->solution);
593: f = ftrial;
594: TaoComputeGradient(tao, tao->solution, tao->gradient);
595: ++tl->ntrust;
596: }
598: /* The radius may have been increased; modify if it is too large */
599: tao->trust = PetscMin(tao->trust, tl->max_radius);
601: /* Check for converged */
602: VecNorm(tao->gradient, NORM_2, &gnorm);
603: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER,"User provided compute function generated Not-a-Number");
604: needH = 1;
606: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
607: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
608: (*tao->ops->convergencetest)(tao,tao->cnvP);
609: }
610: return(0);
611: }
613: /* ---------------------------------------------------------- */
614: static PetscErrorCode TaoSetUp_NTL(Tao tao)
615: {
616: TAO_NTL *tl = (TAO_NTL *)tao->data;
620: if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient); }
621: if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
622: if (!tl->W) { VecDuplicate(tao->solution, &tl->W);}
623: if (!tl->Xold) { VecDuplicate(tao->solution, &tl->Xold);}
624: if (!tl->Gold) { VecDuplicate(tao->solution, &tl->Gold);}
625: tl->bfgs_pre = NULL;
626: tl->M = NULL;
627: return(0);
628: }
630: /*------------------------------------------------------------*/
631: static PetscErrorCode TaoDestroy_NTL(Tao tao)
632: {
633: TAO_NTL *tl = (TAO_NTL *)tao->data;
637: if (tao->setupcalled) {
638: VecDestroy(&tl->W);
639: VecDestroy(&tl->Xold);
640: VecDestroy(&tl->Gold);
641: }
642: PetscFree(tao->data);
643: return(0);
644: }
646: /*------------------------------------------------------------*/
647: static PetscErrorCode TaoSetFromOptions_NTL(PetscOptionItems *PetscOptionsObject,Tao tao)
648: {
649: TAO_NTL *tl = (TAO_NTL *)tao->data;
653: PetscOptionsHead(PetscOptionsObject,"Newton trust region with line search method for unconstrained optimization");
654: PetscOptionsEList("-tao_ntl_init_type", "radius initialization type", "", NTL_INIT, NTL_INIT_TYPES, NTL_INIT[tl->init_type], &tl->init_type,NULL);
655: PetscOptionsEList("-tao_ntl_update_type", "radius update type", "", NTL_UPDATE, NTL_UPDATE_TYPES, NTL_UPDATE[tl->update_type], &tl->update_type,NULL);
656: PetscOptionsReal("-tao_ntl_eta1", "poor steplength; reduce radius", "", tl->eta1, &tl->eta1,NULL);
657: PetscOptionsReal("-tao_ntl_eta2", "reasonable steplength; leave radius alone", "", tl->eta2, &tl->eta2,NULL);
658: PetscOptionsReal("-tao_ntl_eta3", "good steplength; increase radius", "", tl->eta3, &tl->eta3,NULL);
659: PetscOptionsReal("-tao_ntl_eta4", "excellent steplength; greatly increase radius", "", tl->eta4, &tl->eta4,NULL);
660: PetscOptionsReal("-tao_ntl_alpha1", "", "", tl->alpha1, &tl->alpha1,NULL);
661: PetscOptionsReal("-tao_ntl_alpha2", "", "", tl->alpha2, &tl->alpha2,NULL);
662: PetscOptionsReal("-tao_ntl_alpha3", "", "", tl->alpha3, &tl->alpha3,NULL);
663: PetscOptionsReal("-tao_ntl_alpha4", "", "", tl->alpha4, &tl->alpha4,NULL);
664: PetscOptionsReal("-tao_ntl_alpha5", "", "", tl->alpha5, &tl->alpha5,NULL);
665: PetscOptionsReal("-tao_ntl_nu1", "poor steplength; reduce radius", "", tl->nu1, &tl->nu1,NULL);
666: PetscOptionsReal("-tao_ntl_nu2", "reasonable steplength; leave radius alone", "", tl->nu2, &tl->nu2,NULL);
667: PetscOptionsReal("-tao_ntl_nu3", "good steplength; increase radius", "", tl->nu3, &tl->nu3,NULL);
668: PetscOptionsReal("-tao_ntl_nu4", "excellent steplength; greatly increase radius", "", tl->nu4, &tl->nu4,NULL);
669: PetscOptionsReal("-tao_ntl_omega1", "", "", tl->omega1, &tl->omega1,NULL);
670: PetscOptionsReal("-tao_ntl_omega2", "", "", tl->omega2, &tl->omega2,NULL);
671: PetscOptionsReal("-tao_ntl_omega3", "", "", tl->omega3, &tl->omega3,NULL);
672: PetscOptionsReal("-tao_ntl_omega4", "", "", tl->omega4, &tl->omega4,NULL);
673: PetscOptionsReal("-tao_ntl_omega5", "", "", tl->omega5, &tl->omega5,NULL);
674: PetscOptionsReal("-tao_ntl_mu1_i", "", "", tl->mu1_i, &tl->mu1_i,NULL);
675: PetscOptionsReal("-tao_ntl_mu2_i", "", "", tl->mu2_i, &tl->mu2_i,NULL);
676: PetscOptionsReal("-tao_ntl_gamma1_i", "", "", tl->gamma1_i, &tl->gamma1_i,NULL);
677: PetscOptionsReal("-tao_ntl_gamma2_i", "", "", tl->gamma2_i, &tl->gamma2_i,NULL);
678: PetscOptionsReal("-tao_ntl_gamma3_i", "", "", tl->gamma3_i, &tl->gamma3_i,NULL);
679: PetscOptionsReal("-tao_ntl_gamma4_i", "", "", tl->gamma4_i, &tl->gamma4_i,NULL);
680: PetscOptionsReal("-tao_ntl_theta_i", "", "", tl->theta_i, &tl->theta_i,NULL);
681: PetscOptionsReal("-tao_ntl_mu1", "", "", tl->mu1, &tl->mu1,NULL);
682: PetscOptionsReal("-tao_ntl_mu2", "", "", tl->mu2, &tl->mu2,NULL);
683: PetscOptionsReal("-tao_ntl_gamma1", "", "", tl->gamma1, &tl->gamma1,NULL);
684: PetscOptionsReal("-tao_ntl_gamma2", "", "", tl->gamma2, &tl->gamma2,NULL);
685: PetscOptionsReal("-tao_ntl_gamma3", "", "", tl->gamma3, &tl->gamma3,NULL);
686: PetscOptionsReal("-tao_ntl_gamma4", "", "", tl->gamma4, &tl->gamma4,NULL);
687: PetscOptionsReal("-tao_ntl_theta", "", "", tl->theta, &tl->theta,NULL);
688: PetscOptionsReal("-tao_ntl_min_radius", "lower bound on initial radius", "", tl->min_radius, &tl->min_radius,NULL);
689: PetscOptionsReal("-tao_ntl_max_radius", "upper bound on radius", "", tl->max_radius, &tl->max_radius,NULL);
690: PetscOptionsReal("-tao_ntl_epsilon", "tolerance used when computing actual and predicted reduction", "", tl->epsilon, &tl->epsilon,NULL);
691: PetscOptionsTail();
692: TaoLineSearchSetFromOptions(tao->linesearch);
693: KSPSetFromOptions(tao->ksp);
694: return(0);
695: }
697: /*------------------------------------------------------------*/
698: static PetscErrorCode TaoView_NTL(Tao tao, PetscViewer viewer)
699: {
700: TAO_NTL *tl = (TAO_NTL *)tao->data;
701: PetscBool isascii;
705: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
706: if (isascii) {
707: PetscViewerASCIIPushTab(viewer);
708: PetscViewerASCIIPrintf(viewer, "Trust-region steps: %D\n", tl->ntrust);
709: PetscViewerASCIIPrintf(viewer, "Newton search steps: %D\n", tl->newt);
710: PetscViewerASCIIPrintf(viewer, "BFGS search steps: %D\n", tl->bfgs);
711: PetscViewerASCIIPrintf(viewer, "Gradient search steps: %D\n", tl->grad);
712: PetscViewerASCIIPopTab(viewer);
713: }
714: return(0);
715: }
717: /* ---------------------------------------------------------- */
718: /*MC
719: TAONTL - Newton's method with trust region globalization and line search fallback.
720: At each iteration, the Newton trust region method solves the system for d
721: and performs a line search in the d direction:
723: min_d .5 dT Hk d + gkT d, s.t. ||d|| < Delta_k
725: Options Database Keys:
726: + -tao_ntl_init_type - "constant","direction","interpolation"
727: . -tao_ntl_update_type - "reduction","interpolation"
728: . -tao_ntl_min_radius - lower bound on trust region radius
729: . -tao_ntl_max_radius - upper bound on trust region radius
730: . -tao_ntl_epsilon - tolerance for accepting actual / predicted reduction
731: . -tao_ntl_mu1_i - mu1 interpolation init factor
732: . -tao_ntl_mu2_i - mu2 interpolation init factor
733: . -tao_ntl_gamma1_i - gamma1 interpolation init factor
734: . -tao_ntl_gamma2_i - gamma2 interpolation init factor
735: . -tao_ntl_gamma3_i - gamma3 interpolation init factor
736: . -tao_ntl_gamma4_i - gamma4 interpolation init factor
737: . -tao_ntl_theta_i - theta1 interpolation init factor
738: . -tao_ntl_eta1 - eta1 reduction update factor
739: . -tao_ntl_eta2 - eta2 reduction update factor
740: . -tao_ntl_eta3 - eta3 reduction update factor
741: . -tao_ntl_eta4 - eta4 reduction update factor
742: . -tao_ntl_alpha1 - alpha1 reduction update factor
743: . -tao_ntl_alpha2 - alpha2 reduction update factor
744: . -tao_ntl_alpha3 - alpha3 reduction update factor
745: . -tao_ntl_alpha4 - alpha4 reduction update factor
746: . -tao_ntl_alpha4 - alpha4 reduction update factor
747: . -tao_ntl_mu1 - mu1 interpolation update
748: . -tao_ntl_mu2 - mu2 interpolation update
749: . -tao_ntl_gamma1 - gamma1 interpolcation update
750: . -tao_ntl_gamma2 - gamma2 interpolcation update
751: . -tao_ntl_gamma3 - gamma3 interpolcation update
752: . -tao_ntl_gamma4 - gamma4 interpolation update
753: - -tao_ntl_theta - theta1 interpolation update
755: Level: beginner
756: M*/
757: PETSC_EXTERN PetscErrorCode TaoCreate_NTL(Tao tao)
758: {
759: TAO_NTL *tl;
761: const char *morethuente_type = TAOLINESEARCHMT;
764: PetscNewLog(tao,&tl);
765: tao->ops->setup = TaoSetUp_NTL;
766: tao->ops->solve = TaoSolve_NTL;
767: tao->ops->view = TaoView_NTL;
768: tao->ops->setfromoptions = TaoSetFromOptions_NTL;
769: tao->ops->destroy = TaoDestroy_NTL;
771: /* Override default settings (unless already changed) */
772: if (!tao->max_it_changed) tao->max_it = 50;
773: if (!tao->trust0_changed) tao->trust0 = 100.0;
775: tao->data = (void*)tl;
777: /* Default values for trust-region radius update based on steplength */
778: tl->nu1 = 0.25;
779: tl->nu2 = 0.50;
780: tl->nu3 = 1.00;
781: tl->nu4 = 1.25;
783: tl->omega1 = 0.25;
784: tl->omega2 = 0.50;
785: tl->omega3 = 1.00;
786: tl->omega4 = 2.00;
787: tl->omega5 = 4.00;
789: /* Default values for trust-region radius update based on reduction */
790: tl->eta1 = 1.0e-4;
791: tl->eta2 = 0.25;
792: tl->eta3 = 0.50;
793: tl->eta4 = 0.90;
795: tl->alpha1 = 0.25;
796: tl->alpha2 = 0.50;
797: tl->alpha3 = 1.00;
798: tl->alpha4 = 2.00;
799: tl->alpha5 = 4.00;
801: /* Default values for trust-region radius update based on interpolation */
802: tl->mu1 = 0.10;
803: tl->mu2 = 0.50;
805: tl->gamma1 = 0.25;
806: tl->gamma2 = 0.50;
807: tl->gamma3 = 2.00;
808: tl->gamma4 = 4.00;
810: tl->theta = 0.05;
812: /* Default values for trust region initialization based on interpolation */
813: tl->mu1_i = 0.35;
814: tl->mu2_i = 0.50;
816: tl->gamma1_i = 0.0625;
817: tl->gamma2_i = 0.5;
818: tl->gamma3_i = 2.0;
819: tl->gamma4_i = 5.0;
821: tl->theta_i = 0.25;
823: /* Remaining parameters */
824: tl->min_radius = 1.0e-10;
825: tl->max_radius = 1.0e10;
826: tl->epsilon = 1.0e-6;
828: tl->init_type = NTL_INIT_INTERPOLATION;
829: tl->update_type = NTL_UPDATE_REDUCTION;
831: TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
832: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch,(PetscObject)tao,1);
833: TaoLineSearchSetType(tao->linesearch, morethuente_type);
834: TaoLineSearchUseTaoRoutines(tao->linesearch, tao);
835: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
836: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
837: PetscObjectIncrementTabLevel((PetscObject)tao->ksp,(PetscObject)tao,1);
838: KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
839: KSPAppendOptionsPrefix(tao->ksp,"tao_ntl_");
840: KSPSetType(tao->ksp,KSPSTCG);
841: return(0);
842: }