Actual source code: nls.c
petsc-3.13.6 2020-09-29
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/unconstrained/impls/nls/nlsimpl.h>
4: #include <petscksp.h>
6: #define NLS_INIT_CONSTANT 0
7: #define NLS_INIT_DIRECTION 1
8: #define NLS_INIT_INTERPOLATION 2
9: #define NLS_INIT_TYPES 3
11: #define NLS_UPDATE_STEP 0
12: #define NLS_UPDATE_REDUCTION 1
13: #define NLS_UPDATE_INTERPOLATION 2
14: #define NLS_UPDATE_TYPES 3
16: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};
18: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};
20: /*
21: Implements Newton's Method with a line search approach for solving
22: unconstrained minimization problems. A More'-Thuente line search
23: is used to guarantee that the bfgs preconditioner remains positive
24: definite.
26: The method can shift the Hessian matrix. The shifting procedure is
27: adapted from the PATH algorithm for solving complementarity
28: problems.
30: The linear system solve should be done with a conjugate gradient
31: method, although any method can be used.
32: */
34: #define NLS_NEWTON 0
35: #define NLS_BFGS 1
36: #define NLS_GRADIENT 2
38: static PetscErrorCode TaoSolve_NLS(Tao tao)
39: {
40: PetscErrorCode ierr;
41: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
42: KSPType ksp_type;
43: PetscBool is_nash,is_stcg,is_gltr,is_bfgs,is_jacobi,is_symmetric,sym_set;
44: KSPConvergedReason ksp_reason;
45: PC pc;
46: TaoLineSearchConvergedReason ls_reason;
48: PetscReal fmin, ftrial, f_full, prered, actred, kappa, sigma;
49: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
50: PetscReal f, fold, gdx, gnorm, pert;
51: PetscReal step = 1.0;
52: PetscReal norm_d = 0.0, e_min;
54: PetscInt stepType;
55: PetscInt bfgsUpdates = 0;
56: PetscInt n,N,kspits;
57: PetscInt needH = 1;
59: PetscInt i_max = 5;
60: PetscInt j_max = 1;
61: PetscInt i, j;
64: if (tao->XL || tao->XU || tao->ops->computebounds) {
65: PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
66: }
68: /* Initialized variables */
69: pert = nlsP->sval;
71: /* Number of times ksp stopped because of these reasons */
72: nlsP->ksp_atol = 0;
73: nlsP->ksp_rtol = 0;
74: nlsP->ksp_dtol = 0;
75: nlsP->ksp_ctol = 0;
76: nlsP->ksp_negc = 0;
77: nlsP->ksp_iter = 0;
78: nlsP->ksp_othr = 0;
80: /* Initialize trust-region radius when using nash, stcg, or gltr
81: Command automatically ignored for other methods
82: Will be reset during the first iteration
83: */
84: KSPGetType(tao->ksp,&ksp_type);
85: PetscStrcmp(ksp_type,KSPNASH,&is_nash);
86: PetscStrcmp(ksp_type,KSPSTCG,&is_stcg);
87: PetscStrcmp(ksp_type,KSPGLTR,&is_gltr);
89: KSPCGSetRadius(tao->ksp,nlsP->max_radius);
91: if (is_nash || is_stcg || is_gltr) {
92: if (tao->trust0 < 0.0) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_ARG_OUTOFRANGE,"Initial radius negative");
93: tao->trust = tao->trust0;
94: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
95: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
96: }
98: /* Check convergence criteria */
99: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
100: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
101: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
102:
103: tao->reason = TAO_CONTINUE_ITERATING;
104: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
105: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
106: (*tao->ops->convergencetest)(tao,tao->cnvP);
107: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
109: /* Allocate the vectors needed for the BFGS approximation */
110: KSPGetPC(tao->ksp, &pc);
111: PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
112: PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
113: if (is_bfgs) {
114: nlsP->bfgs_pre = pc;
115: PCLMVMGetMatLMVM(nlsP->bfgs_pre, &nlsP->M);
116: VecGetLocalSize(tao->solution, &n);
117: VecGetSize(tao->solution, &N);
118: MatSetSizes(nlsP->M, n, n, N, N);
119: MatLMVMAllocate(nlsP->M, tao->solution, tao->gradient);
120: MatIsSymmetricKnown(nlsP->M, &sym_set, &is_symmetric);
121: if (!sym_set || !is_symmetric) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
122: } else if (is_jacobi) {
123: PCJacobiSetUseAbs(pc,PETSC_TRUE);
124: }
126: /* Initialize trust-region radius. The initialization is only performed
127: when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
128: if (is_nash || is_stcg || is_gltr) {
129: switch (nlsP->init_type) {
130: case NLS_INIT_CONSTANT:
131: /* Use the initial radius specified */
132: break;
134: case NLS_INIT_INTERPOLATION:
135: /* Use the initial radius specified */
136: max_radius = 0.0;
138: for (j = 0; j < j_max; ++j) {
139: fmin = f;
140: sigma = 0.0;
142: if (needH) {
143: TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
144: needH = 0;
145: }
147: for (i = 0; i < i_max; ++i) {
148: VecCopy(tao->solution,nlsP->W);
149: VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
150: TaoComputeObjective(tao, nlsP->W, &ftrial);
151: if (PetscIsInfOrNanReal(ftrial)) {
152: tau = nlsP->gamma1_i;
153: } else {
154: if (ftrial < fmin) {
155: fmin = ftrial;
156: sigma = -tao->trust / gnorm;
157: }
159: MatMult(tao->hessian, tao->gradient, nlsP->D);
160: VecDot(tao->gradient, nlsP->D, &prered);
162: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
163: actred = f - ftrial;
164: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
165: kappa = 1.0;
166: } else {
167: kappa = actred / prered;
168: }
170: tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
171: tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
172: tau_min = PetscMin(tau_1, tau_2);
173: tau_max = PetscMax(tau_1, tau_2);
175: if (PetscAbsScalar(kappa - (PetscReal)1.0) <= nlsP->mu1_i) {
176: /* Great agreement */
177: max_radius = PetscMax(max_radius, tao->trust);
179: if (tau_max < 1.0) {
180: tau = nlsP->gamma3_i;
181: } else if (tau_max > nlsP->gamma4_i) {
182: tau = nlsP->gamma4_i;
183: } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
184: tau = tau_1;
185: } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
186: tau = tau_2;
187: } else {
188: tau = tau_max;
189: }
190: } else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= nlsP->mu2_i) {
191: /* Good agreement */
192: max_radius = PetscMax(max_radius, tao->trust);
194: if (tau_max < nlsP->gamma2_i) {
195: tau = nlsP->gamma2_i;
196: } else if (tau_max > nlsP->gamma3_i) {
197: tau = nlsP->gamma3_i;
198: } else {
199: tau = tau_max;
200: }
201: } else {
202: /* Not good agreement */
203: if (tau_min > 1.0) {
204: tau = nlsP->gamma2_i;
205: } else if (tau_max < nlsP->gamma1_i) {
206: tau = nlsP->gamma1_i;
207: } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
208: tau = nlsP->gamma1_i;
209: } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
210: tau = tau_1;
211: } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
212: tau = tau_2;
213: } else {
214: tau = tau_max;
215: }
216: }
217: }
218: tao->trust = tau * tao->trust;
219: }
221: if (fmin < f) {
222: f = fmin;
223: VecAXPY(tao->solution,sigma,tao->gradient);
224: TaoComputeGradient(tao,tao->solution,tao->gradient);
226: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
227: if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute gradient generated Inf or NaN");
228: needH = 1;
230: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
231: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
232: (*tao->ops->convergencetest)(tao,tao->cnvP);
233: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
234: }
235: }
236: tao->trust = PetscMax(tao->trust, max_radius);
238: /* Modify the radius if it is too large or small */
239: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
240: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
241: break;
243: default:
244: /* Norm of the first direction will initialize radius */
245: tao->trust = 0.0;
246: break;
247: }
248: }
250: /* Set counter for gradient/reset steps*/
251: nlsP->newt = 0;
252: nlsP->bfgs = 0;
253: nlsP->grad = 0;
255: /* Have not converged; continue with Newton method */
256: while (tao->reason == TAO_CONTINUE_ITERATING) {
257: /* Call general purpose update function */
258: if (tao->ops->update) {
259: (*tao->ops->update)(tao, tao->niter, tao->user_update);
260: }
261: ++tao->niter;
262: tao->ksp_its=0;
264: /* Compute the Hessian */
265: if (needH) {
266: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
267: }
269: /* Shift the Hessian matrix */
270: if (pert > 0) {
271: MatShift(tao->hessian, pert);
272: if (tao->hessian != tao->hessian_pre) {
273: MatShift(tao->hessian_pre, pert);
274: }
275: }
277: if (nlsP->bfgs_pre) {
278: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
279: ++bfgsUpdates;
280: }
282: /* Solve the Newton system of equations */
283: KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
284: if (is_nash || is_stcg || is_gltr) {
285: KSPCGSetRadius(tao->ksp,nlsP->max_radius);
286: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
287: KSPGetIterationNumber(tao->ksp,&kspits);
288: tao->ksp_its+=kspits;
289: tao->ksp_tot_its+=kspits;
290: KSPCGGetNormD(tao->ksp,&norm_d);
292: if (0.0 == tao->trust) {
293: /* Radius was uninitialized; use the norm of the direction */
294: if (norm_d > 0.0) {
295: tao->trust = norm_d;
297: /* Modify the radius if it is too large or small */
298: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
299: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
300: } else {
301: /* The direction was bad; set radius to default value and re-solve
302: the trust-region subproblem to get a direction */
303: tao->trust = tao->trust0;
305: /* Modify the radius if it is too large or small */
306: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
307: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
309: KSPCGSetRadius(tao->ksp,nlsP->max_radius);
310: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
311: KSPGetIterationNumber(tao->ksp,&kspits);
312: tao->ksp_its+=kspits;
313: tao->ksp_tot_its+=kspits;
314: KSPCGGetNormD(tao->ksp,&norm_d);
316: if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB, "Initial direction zero");
317: }
318: }
319: } else {
320: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
321: KSPGetIterationNumber(tao->ksp, &kspits);
322: tao->ksp_its += kspits;
323: tao->ksp_tot_its+=kspits;
324: }
325: VecScale(nlsP->D, -1.0);
326: KSPGetConvergedReason(tao->ksp, &ksp_reason);
327: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (nlsP->bfgs_pre)) {
328: /* Preconditioner is numerically indefinite; reset the
329: approximate if using BFGS preconditioning. */
330: MatLMVMReset(nlsP->M, PETSC_FALSE);
331: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
332: bfgsUpdates = 1;
333: }
335: if (KSP_CONVERGED_ATOL == ksp_reason) {
336: ++nlsP->ksp_atol;
337: } else if (KSP_CONVERGED_RTOL == ksp_reason) {
338: ++nlsP->ksp_rtol;
339: } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
340: ++nlsP->ksp_ctol;
341: } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
342: ++nlsP->ksp_negc;
343: } else if (KSP_DIVERGED_DTOL == ksp_reason) {
344: ++nlsP->ksp_dtol;
345: } else if (KSP_DIVERGED_ITS == ksp_reason) {
346: ++nlsP->ksp_iter;
347: } else {
348: ++nlsP->ksp_othr;
349: }
351: /* Check for success (descent direction) */
352: VecDot(nlsP->D, tao->gradient, &gdx);
353: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
354: /* Newton step is not descent or direction produced Inf or NaN
355: Update the perturbation for next time */
356: if (pert <= 0.0) {
357: /* Initialize the perturbation */
358: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
359: if (is_gltr) {
360: KSPGLTRGetMinEig(tao->ksp,&e_min);
361: pert = PetscMax(pert, -e_min);
362: }
363: } else {
364: /* Increase the perturbation */
365: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
366: }
368: if (!nlsP->bfgs_pre) {
369: /* We don't have the bfgs matrix around and updated
370: Must use gradient direction in this case */
371: VecCopy(tao->gradient, nlsP->D);
372: VecScale(nlsP->D, -1.0);
373: ++nlsP->grad;
374: stepType = NLS_GRADIENT;
375: } else {
376: /* Attempt to use the BFGS direction */
377: MatSolve(nlsP->M, tao->gradient, nlsP->D);
378: VecScale(nlsP->D, -1.0);
380: /* Check for success (descent direction) */
381: VecDot(tao->gradient, nlsP->D, &gdx);
382: if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
383: /* BFGS direction is not descent or direction produced not a number
384: We can assert bfgsUpdates > 1 in this case because
385: the first solve produces the scaled gradient direction,
386: which is guaranteed to be descent */
388: /* Use steepest descent direction (scaled) */
389: MatLMVMReset(nlsP->M, PETSC_FALSE);
390: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
391: MatSolve(nlsP->M, tao->gradient, nlsP->D);
392: VecScale(nlsP->D, -1.0);
394: bfgsUpdates = 1;
395: ++nlsP->grad;
396: stepType = NLS_GRADIENT;
397: } else {
398: MatLMVMGetUpdateCount(nlsP->M, &bfgsUpdates);
399: if (1 == bfgsUpdates) {
400: /* The first BFGS direction is always the scaled gradient */
401: ++nlsP->grad;
402: stepType = NLS_GRADIENT;
403: } else {
404: ++nlsP->bfgs;
405: stepType = NLS_BFGS;
406: }
407: }
408: }
409: } else {
410: /* Computed Newton step is descent */
411: switch (ksp_reason) {
412: case KSP_DIVERGED_NANORINF:
413: case KSP_DIVERGED_BREAKDOWN:
414: case KSP_DIVERGED_INDEFINITE_MAT:
415: case KSP_DIVERGED_INDEFINITE_PC:
416: case KSP_CONVERGED_CG_NEG_CURVE:
417: /* Matrix or preconditioner is indefinite; increase perturbation */
418: if (pert <= 0.0) {
419: /* Initialize the perturbation */
420: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
421: if (is_gltr) {
422: KSPGLTRGetMinEig(tao->ksp, &e_min);
423: pert = PetscMax(pert, -e_min);
424: }
425: } else {
426: /* Increase the perturbation */
427: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
428: }
429: break;
431: default:
432: /* Newton step computation is good; decrease perturbation */
433: pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
434: if (pert < nlsP->pmin) {
435: pert = 0.0;
436: }
437: break;
438: }
440: ++nlsP->newt;
441: stepType = NLS_NEWTON;
442: }
444: /* Perform the linesearch */
445: fold = f;
446: VecCopy(tao->solution, nlsP->Xold);
447: VecCopy(tao->gradient, nlsP->Gold);
449: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
450: TaoAddLineSearchCounts(tao);
452: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) { /* Linesearch failed */
453: f = fold;
454: VecCopy(nlsP->Xold, tao->solution);
455: VecCopy(nlsP->Gold, tao->gradient);
457: switch(stepType) {
458: case NLS_NEWTON:
459: /* Failed to obtain acceptable iterate with Newton 1step
460: Update the perturbation for next time */
461: if (pert <= 0.0) {
462: /* Initialize the perturbation */
463: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
464: if (is_gltr) {
465: KSPGLTRGetMinEig(tao->ksp,&e_min);
466: pert = PetscMax(pert, -e_min);
467: }
468: } else {
469: /* Increase the perturbation */
470: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
471: }
473: if (!nlsP->bfgs_pre) {
474: /* We don't have the bfgs matrix around and being updated
475: Must use gradient direction in this case */
476: VecCopy(tao->gradient, nlsP->D);
477: ++nlsP->grad;
478: stepType = NLS_GRADIENT;
479: } else {
480: /* Attempt to use the BFGS direction */
481: MatSolve(nlsP->M, tao->gradient, nlsP->D);
482: /* Check for success (descent direction) */
483: VecDot(tao->solution, nlsP->D, &gdx);
484: if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
485: /* BFGS direction is not descent or direction produced not a number
486: We can assert bfgsUpdates > 1 in this case
487: Use steepest descent direction (scaled) */
488: MatLMVMReset(nlsP->M, PETSC_FALSE);
489: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
490: MatSolve(nlsP->M, tao->gradient, nlsP->D);
492: bfgsUpdates = 1;
493: ++nlsP->grad;
494: stepType = NLS_GRADIENT;
495: } else {
496: if (1 == bfgsUpdates) {
497: /* The first BFGS direction is always the scaled gradient */
498: ++nlsP->grad;
499: stepType = NLS_GRADIENT;
500: } else {
501: ++nlsP->bfgs;
502: stepType = NLS_BFGS;
503: }
504: }
505: }
506: break;
508: case NLS_BFGS:
509: /* Can only enter if pc_type == NLS_PC_BFGS
510: Failed to obtain acceptable iterate with BFGS step
511: Attempt to use the scaled gradient direction */
512: MatLMVMReset(nlsP->M, PETSC_FALSE);
513: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
514: MatSolve(nlsP->M, tao->gradient, nlsP->D);
516: bfgsUpdates = 1;
517: ++nlsP->grad;
518: stepType = NLS_GRADIENT;
519: break;
520: }
521: VecScale(nlsP->D, -1.0);
523: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
524: TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
525: TaoAddLineSearchCounts(tao);
526: }
528: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
529: /* Failed to find an improving point */
530: f = fold;
531: VecCopy(nlsP->Xold, tao->solution);
532: VecCopy(nlsP->Gold, tao->gradient);
533: step = 0.0;
534: tao->reason = TAO_DIVERGED_LS_FAILURE;
535: break;
536: }
538: /* Update trust region radius */
539: if (is_nash || is_stcg || is_gltr) {
540: switch(nlsP->update_type) {
541: case NLS_UPDATE_STEP:
542: if (stepType == NLS_NEWTON) {
543: if (step < nlsP->nu1) {
544: /* Very bad step taken; reduce radius */
545: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
546: } else if (step < nlsP->nu2) {
547: /* Reasonably bad step taken; reduce radius */
548: tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
549: } else if (step < nlsP->nu3) {
550: /* Reasonable step was taken; leave radius alone */
551: if (nlsP->omega3 < 1.0) {
552: tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
553: } else if (nlsP->omega3 > 1.0) {
554: tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
555: }
556: } else if (step < nlsP->nu4) {
557: /* Full step taken; increase the radius */
558: tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
559: } else {
560: /* More than full step taken; increase the radius */
561: tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
562: }
563: } else {
564: /* Newton step was not good; reduce the radius */
565: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
566: }
567: break;
569: case NLS_UPDATE_REDUCTION:
570: if (stepType == NLS_NEWTON) {
571: /* Get predicted reduction */
573: KSPCGGetObjFcn(tao->ksp,&prered);
574: if (prered >= 0.0) {
575: /* The predicted reduction has the wrong sign. This cannot */
576: /* happen in infinite precision arithmetic. Step should */
577: /* be rejected! */
578: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
579: } else {
580: if (PetscIsInfOrNanReal(f_full)) {
581: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
582: } else {
583: /* Compute and actual reduction */
584: actred = fold - f_full;
585: prered = -prered;
586: if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
587: (PetscAbsScalar(prered) <= nlsP->epsilon)) {
588: kappa = 1.0;
589: } else {
590: kappa = actred / prered;
591: }
593: /* Accept of reject the step and update radius */
594: if (kappa < nlsP->eta1) {
595: /* Very bad step */
596: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
597: } else if (kappa < nlsP->eta2) {
598: /* Marginal bad step */
599: tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
600: } else if (kappa < nlsP->eta3) {
601: /* Reasonable step */
602: if (nlsP->alpha3 < 1.0) {
603: tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
604: } else if (nlsP->alpha3 > 1.0) {
605: tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
606: }
607: } else if (kappa < nlsP->eta4) {
608: /* Good step */
609: tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
610: } else {
611: /* Very good step */
612: tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
613: }
614: }
615: }
616: } else {
617: /* Newton step was not good; reduce the radius */
618: tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
619: }
620: break;
622: default:
623: if (stepType == NLS_NEWTON) {
624: KSPCGGetObjFcn(tao->ksp,&prered);
625: if (prered >= 0.0) {
626: /* The predicted reduction has the wrong sign. This cannot */
627: /* happen in infinite precision arithmetic. Step should */
628: /* be rejected! */
629: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
630: } else {
631: if (PetscIsInfOrNanReal(f_full)) {
632: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
633: } else {
634: actred = fold - f_full;
635: prered = -prered;
636: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
637: kappa = 1.0;
638: } else {
639: kappa = actred / prered;
640: }
642: tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
643: tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
644: tau_min = PetscMin(tau_1, tau_2);
645: tau_max = PetscMax(tau_1, tau_2);
647: if (kappa >= 1.0 - nlsP->mu1) {
648: /* Great agreement */
649: if (tau_max < 1.0) {
650: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
651: } else if (tau_max > nlsP->gamma4) {
652: tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
653: } else {
654: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
655: }
656: } else if (kappa >= 1.0 - nlsP->mu2) {
657: /* Good agreement */
659: if (tau_max < nlsP->gamma2) {
660: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
661: } else if (tau_max > nlsP->gamma3) {
662: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
663: } else if (tau_max < 1.0) {
664: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
665: } else {
666: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
667: }
668: } else {
669: /* Not good agreement */
670: if (tau_min > 1.0) {
671: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
672: } else if (tau_max < nlsP->gamma1) {
673: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
674: } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
675: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
676: } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
677: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
678: } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
679: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
680: } else {
681: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
682: }
683: }
684: }
685: }
686: } else {
687: /* Newton step was not good; reduce the radius */
688: tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
689: }
690: break;
691: }
693: /* The radius may have been increased; modify if it is too large */
694: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
695: }
697: /* Check for termination */
698: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
699: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER,"User provided compute function generated Not-a-Number");
700: needH = 1;
701: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
702: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
703: (*tao->ops->convergencetest)(tao,tao->cnvP);
704: }
705: return(0);
706: }
708: /* ---------------------------------------------------------- */
709: static PetscErrorCode TaoSetUp_NLS(Tao tao)
710: {
711: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
715: if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
716: if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
717: if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
718: if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
719: if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
720: if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
721: nlsP->M = 0;
722: nlsP->bfgs_pre = 0;
723: return(0);
724: }
726: /*------------------------------------------------------------*/
727: static PetscErrorCode TaoDestroy_NLS(Tao tao)
728: {
729: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
733: if (tao->setupcalled) {
734: VecDestroy(&nlsP->D);
735: VecDestroy(&nlsP->W);
736: VecDestroy(&nlsP->Xold);
737: VecDestroy(&nlsP->Gold);
738: }
739: PetscFree(tao->data);
740: return(0);
741: }
743: /*------------------------------------------------------------*/
744: static PetscErrorCode TaoSetFromOptions_NLS(PetscOptionItems *PetscOptionsObject,Tao tao)
745: {
746: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
750: PetscOptionsHead(PetscOptionsObject,"Newton line search method for unconstrained optimization");
751: PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
752: PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
753: PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval,NULL);
754: PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin,NULL);
755: PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax,NULL);
756: PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac,NULL);
757: PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin,NULL);
758: PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax,NULL);
759: PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac,NULL);
760: PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac,NULL);
761: PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac,NULL);
762: PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac,NULL);
763: PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1,NULL);
764: PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2,NULL);
765: PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3,NULL);
766: PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4,NULL);
767: PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1,NULL);
768: PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2,NULL);
769: PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3,NULL);
770: PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4,NULL);
771: PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5,NULL);
772: PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1,NULL);
773: PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2,NULL);
774: PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3,NULL);
775: PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4,NULL);
776: PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1,NULL);
777: PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2,NULL);
778: PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3,NULL);
779: PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4,NULL);
780: PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5,NULL);
781: PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i,NULL);
782: PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i,NULL);
783: PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i,NULL);
784: PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i,NULL);
785: PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i,NULL);
786: PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i,NULL);
787: PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i,NULL);
788: PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1,NULL);
789: PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2,NULL);
790: PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1,NULL);
791: PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2,NULL);
792: PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3,NULL);
793: PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4,NULL);
794: PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta,NULL);
795: PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius,NULL);
796: PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius,NULL);
797: PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon,NULL);
798: PetscOptionsTail();
799: TaoLineSearchSetFromOptions(tao->linesearch);
800: KSPSetFromOptions(tao->ksp);
801: return(0);
802: }
805: /*------------------------------------------------------------*/
806: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
807: {
808: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
809: PetscBool isascii;
813: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
814: if (isascii) {
815: PetscViewerASCIIPushTab(viewer);
816: PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
817: PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
818: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);
820: PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
821: PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
822: PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
823: PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
824: PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
825: PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
826: PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
827: PetscViewerASCIIPopTab(viewer);
828: }
829: return(0);
830: }
832: /* ---------------------------------------------------------- */
833: /*MC
834: TAONLS - Newton's method with linesearch for unconstrained minimization.
835: At each iteration, the Newton line search method solves the symmetric
836: system of equations to obtain the step diretion dk:
837: Hk dk = -gk
838: a More-Thuente line search is applied on the direction dk to approximately
839: solve
840: min_t f(xk + t d_k)
842: Options Database Keys:
843: + -tao_nls_init_type - "constant","direction","interpolation"
844: . -tao_nls_update_type - "step","direction","interpolation"
845: . -tao_nls_sval - perturbation starting value
846: . -tao_nls_imin - minimum initial perturbation
847: . -tao_nls_imax - maximum initial perturbation
848: . -tao_nls_pmin - minimum perturbation
849: . -tao_nls_pmax - maximum perturbation
850: . -tao_nls_pgfac - growth factor
851: . -tao_nls_psfac - shrink factor
852: . -tao_nls_imfac - initial merit factor
853: . -tao_nls_pmgfac - merit growth factor
854: . -tao_nls_pmsfac - merit shrink factor
855: . -tao_nls_eta1 - poor steplength; reduce radius
856: . -tao_nls_eta2 - reasonable steplength; leave radius
857: . -tao_nls_eta3 - good steplength; increase readius
858: . -tao_nls_eta4 - excellent steplength; greatly increase radius
859: . -tao_nls_alpha1 - alpha1 reduction
860: . -tao_nls_alpha2 - alpha2 reduction
861: . -tao_nls_alpha3 - alpha3 reduction
862: . -tao_nls_alpha4 - alpha4 reduction
863: . -tao_nls_alpha - alpha5 reduction
864: . -tao_nls_mu1 - mu1 interpolation update
865: . -tao_nls_mu2 - mu2 interpolation update
866: . -tao_nls_gamma1 - gamma1 interpolation update
867: . -tao_nls_gamma2 - gamma2 interpolation update
868: . -tao_nls_gamma3 - gamma3 interpolation update
869: . -tao_nls_gamma4 - gamma4 interpolation update
870: . -tao_nls_theta - theta interpolation update
871: . -tao_nls_omega1 - omega1 step update
872: . -tao_nls_omega2 - omega2 step update
873: . -tao_nls_omega3 - omega3 step update
874: . -tao_nls_omega4 - omega4 step update
875: . -tao_nls_omega5 - omega5 step update
876: . -tao_nls_mu1_i - mu1 interpolation init factor
877: . -tao_nls_mu2_i - mu2 interpolation init factor
878: . -tao_nls_gamma1_i - gamma1 interpolation init factor
879: . -tao_nls_gamma2_i - gamma2 interpolation init factor
880: . -tao_nls_gamma3_i - gamma3 interpolation init factor
881: . -tao_nls_gamma4_i - gamma4 interpolation init factor
882: - -tao_nls_theta_i - theta interpolation init factor
884: Level: beginner
885: M*/
887: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
888: {
889: TAO_NLS *nlsP;
890: const char *morethuente_type = TAOLINESEARCHMT;
894: PetscNewLog(tao,&nlsP);
896: tao->ops->setup = TaoSetUp_NLS;
897: tao->ops->solve = TaoSolve_NLS;
898: tao->ops->view = TaoView_NLS;
899: tao->ops->setfromoptions = TaoSetFromOptions_NLS;
900: tao->ops->destroy = TaoDestroy_NLS;
902: /* Override default settings (unless already changed) */
903: if (!tao->max_it_changed) tao->max_it = 50;
904: if (!tao->trust0_changed) tao->trust0 = 100.0;
906: tao->data = (void*)nlsP;
908: nlsP->sval = 0.0;
909: nlsP->imin = 1.0e-4;
910: nlsP->imax = 1.0e+2;
911: nlsP->imfac = 1.0e-1;
913: nlsP->pmin = 1.0e-12;
914: nlsP->pmax = 1.0e+2;
915: nlsP->pgfac = 1.0e+1;
916: nlsP->psfac = 4.0e-1;
917: nlsP->pmgfac = 1.0e-1;
918: nlsP->pmsfac = 1.0e-1;
920: /* Default values for trust-region radius update based on steplength */
921: nlsP->nu1 = 0.25;
922: nlsP->nu2 = 0.50;
923: nlsP->nu3 = 1.00;
924: nlsP->nu4 = 1.25;
926: nlsP->omega1 = 0.25;
927: nlsP->omega2 = 0.50;
928: nlsP->omega3 = 1.00;
929: nlsP->omega4 = 2.00;
930: nlsP->omega5 = 4.00;
932: /* Default values for trust-region radius update based on reduction */
933: nlsP->eta1 = 1.0e-4;
934: nlsP->eta2 = 0.25;
935: nlsP->eta3 = 0.50;
936: nlsP->eta4 = 0.90;
938: nlsP->alpha1 = 0.25;
939: nlsP->alpha2 = 0.50;
940: nlsP->alpha3 = 1.00;
941: nlsP->alpha4 = 2.00;
942: nlsP->alpha5 = 4.00;
944: /* Default values for trust-region radius update based on interpolation */
945: nlsP->mu1 = 0.10;
946: nlsP->mu2 = 0.50;
948: nlsP->gamma1 = 0.25;
949: nlsP->gamma2 = 0.50;
950: nlsP->gamma3 = 2.00;
951: nlsP->gamma4 = 4.00;
953: nlsP->theta = 0.05;
955: /* Default values for trust region initialization based on interpolation */
956: nlsP->mu1_i = 0.35;
957: nlsP->mu2_i = 0.50;
959: nlsP->gamma1_i = 0.0625;
960: nlsP->gamma2_i = 0.5;
961: nlsP->gamma3_i = 2.0;
962: nlsP->gamma4_i = 5.0;
964: nlsP->theta_i = 0.25;
966: /* Remaining parameters */
967: nlsP->min_radius = 1.0e-10;
968: nlsP->max_radius = 1.0e10;
969: nlsP->epsilon = 1.0e-6;
971: nlsP->init_type = NLS_INIT_INTERPOLATION;
972: nlsP->update_type = NLS_UPDATE_STEP;
974: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
975: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch,(PetscObject)tao,1);
976: TaoLineSearchSetType(tao->linesearch,morethuente_type);
977: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
978: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
980: /* Set linear solver to default for symmetric matrices */
981: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
982: PetscObjectIncrementTabLevel((PetscObject)tao->ksp,(PetscObject)tao,1);
983: KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
984: KSPAppendOptionsPrefix(tao->ksp,"tao_nls_");
985: KSPSetType(tao->ksp,KSPSTCG);
986: return(0);
987: }