Actual source code: nls.c
petsc-3.7.3 2016-08-01
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/matrix/lmvmmat.h>
3: #include <../src/tao/unconstrained/impls/nls/nls.h>
5: #include <petscksp.h>
6: #include <petscpc.h>
7: #include <petsc/private/kspimpl.h>
8: #include <petsc/private/pcimpl.h>
10: #define NLS_KSP_CG 0
11: #define NLS_KSP_NASH 1
12: #define NLS_KSP_STCG 2
13: #define NLS_KSP_GLTR 3
14: #define NLS_KSP_PETSC 4
15: #define NLS_KSP_TYPES 5
17: #define NLS_PC_NONE 0
18: #define NLS_PC_AHESS 1
19: #define NLS_PC_BFGS 2
20: #define NLS_PC_PETSC 3
21: #define NLS_PC_TYPES 4
23: #define BFGS_SCALE_AHESS 0
24: #define BFGS_SCALE_PHESS 1
25: #define BFGS_SCALE_BFGS 2
26: #define BFGS_SCALE_TYPES 3
28: #define NLS_INIT_CONSTANT 0
29: #define NLS_INIT_DIRECTION 1
30: #define NLS_INIT_INTERPOLATION 2
31: #define NLS_INIT_TYPES 3
33: #define NLS_UPDATE_STEP 0
34: #define NLS_UPDATE_REDUCTION 1
35: #define NLS_UPDATE_INTERPOLATION 2
36: #define NLS_UPDATE_TYPES 3
38: static const char *NLS_KSP[64] = {"cg", "nash", "stcg", "gltr", "petsc"};
40: static const char *NLS_PC[64] = {"none", "ahess", "bfgs", "petsc"};
42: static const char *BFGS_SCALE[64] = {"ahess", "phess", "bfgs"};
44: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};
46: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};
48: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x);
49: /* Routine for BFGS preconditioner
52: Implements Newton's Method with a line search approach for solving
53: unconstrained minimization problems. A More'-Thuente line search
54: is used to guarantee that the bfgs preconditioner remains positive
55: definite.
57: The method can shift the Hessian matrix. The shifting procedure is
58: adapted from the PATH algorithm for solving complementarity
59: problems.
61: The linear system solve should be done with a conjugate gradient
62: method, although any method can be used. */
64: #define NLS_NEWTON 0
65: #define NLS_BFGS 1
66: #define NLS_SCALED_GRADIENT 2
67: #define NLS_GRADIENT 3
71: static PetscErrorCode TaoSolve_NLS(Tao tao)
72: {
73: PetscErrorCode ierr;
74: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
75: PC pc;
76: KSPConvergedReason ksp_reason;
77: TaoLineSearchConvergedReason ls_reason;
78: TaoConvergedReason reason;
80: PetscReal fmin, ftrial, f_full, prered, actred, kappa, sigma;
81: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
82: PetscReal f, fold, gdx, gnorm, pert;
83: PetscReal step = 1.0;
84: PetscReal delta;
85: PetscReal norm_d = 0.0, e_min;
87: PetscInt stepType;
88: PetscInt bfgsUpdates = 0;
89: PetscInt n,N,kspits;
90: PetscInt needH = 1;
92: PetscInt i_max = 5;
93: PetscInt j_max = 1;
94: PetscInt i, j;
97: if (tao->XL || tao->XU || tao->ops->computebounds) {
98: PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
99: }
101: /* Initialized variables */
102: pert = nlsP->sval;
104: /* Number of times ksp stopped because of these reasons */
105: nlsP->ksp_atol = 0;
106: nlsP->ksp_rtol = 0;
107: nlsP->ksp_dtol = 0;
108: nlsP->ksp_ctol = 0;
109: nlsP->ksp_negc = 0;
110: nlsP->ksp_iter = 0;
111: nlsP->ksp_othr = 0;
113: /* Modify the linear solver to a trust region method if desired */
114: switch(nlsP->ksp_type) {
115: case NLS_KSP_CG:
116: KSPSetType(tao->ksp, KSPCG);
117: KSPSetFromOptions(tao->ksp);
118: break;
120: case NLS_KSP_NASH:
121: KSPSetType(tao->ksp, KSPNASH);
122: KSPSetFromOptions(tao->ksp);
123: break;
125: case NLS_KSP_STCG:
126: KSPSetType(tao->ksp, KSPSTCG);
127: KSPSetFromOptions(tao->ksp);
128: break;
130: case NLS_KSP_GLTR:
131: KSPSetType(tao->ksp, KSPGLTR);
132: KSPSetFromOptions(tao->ksp);
133: break;
135: default:
136: /* Use the method set by the ksp_type */
137: break;
138: }
140: /* Initialize trust-region radius when using nash, stcg, or gltr
141: Will be reset during the first iteration */
142: if (NLS_KSP_NASH == nlsP->ksp_type) {
143: KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
144: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
145: KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
146: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
147: KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
148: }
150: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
151: tao->trust = tao->trust0;
153: if (tao->trust < 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial radius negative");
155: /* Modify the radius if it is too large or small */
156: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
157: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
158: }
160: /* Get vectors we will need */
161: if (NLS_PC_BFGS == nlsP->pc_type && !nlsP->M) {
162: VecGetLocalSize(tao->solution,&n);
163: VecGetSize(tao->solution,&N);
164: MatCreateLMVM(((PetscObject)tao)->comm,n,N,&nlsP->M);
165: MatLMVMAllocateVectors(nlsP->M,tao->solution);
166: }
168: /* Check convergence criteria */
169: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
170: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
171: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
173: TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
174: if (reason != TAO_CONTINUE_ITERATING) return(0);
176: /* create vectors for the limited memory preconditioner */
177: if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_BFGS != nlsP->bfgs_scale_type)) {
178: if (!nlsP->Diag) {
179: VecDuplicate(tao->solution,&nlsP->Diag);
180: }
181: }
183: /* Modify the preconditioner to use the bfgs approximation */
184: KSPGetPC(tao->ksp, &pc);
185: switch(nlsP->pc_type) {
186: case NLS_PC_NONE:
187: PCSetType(pc, PCNONE);
188: PCSetFromOptions(pc);
189: break;
191: case NLS_PC_AHESS:
192: PCSetType(pc, PCJACOBI);
193: PCSetFromOptions(pc);
194: PCJacobiSetUseAbs(pc,PETSC_TRUE);
195: break;
197: case NLS_PC_BFGS:
198: PCSetType(pc, PCSHELL);
199: PCSetFromOptions(pc);
200: PCShellSetName(pc, "bfgs");
201: PCShellSetContext(pc, nlsP->M);
202: PCShellSetApply(pc, MatLMVMSolveShell);
203: break;
205: default:
206: /* Use the pc method set by pc_type */
207: break;
208: }
210: /* Initialize trust-region radius. The initialization is only performed
211: when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
212: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
213: switch(nlsP->init_type) {
214: case NLS_INIT_CONSTANT:
215: /* Use the initial radius specified */
216: break;
218: case NLS_INIT_INTERPOLATION:
219: /* Use the initial radius specified */
220: max_radius = 0.0;
222: for (j = 0; j < j_max; ++j) {
223: fmin = f;
224: sigma = 0.0;
226: if (needH) {
227: TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
228: needH = 0;
229: }
231: for (i = 0; i < i_max; ++i) {
232: VecCopy(tao->solution,nlsP->W);
233: VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
234: TaoComputeObjective(tao, nlsP->W, &ftrial);
235: if (PetscIsInfOrNanReal(ftrial)) {
236: tau = nlsP->gamma1_i;
237: } else {
238: if (ftrial < fmin) {
239: fmin = ftrial;
240: sigma = -tao->trust / gnorm;
241: }
243: MatMult(tao->hessian, tao->gradient, nlsP->D);
244: VecDot(tao->gradient, nlsP->D, &prered);
246: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
247: actred = f - ftrial;
248: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
249: kappa = 1.0;
250: } else {
251: kappa = actred / prered;
252: }
254: tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
255: tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
256: tau_min = PetscMin(tau_1, tau_2);
257: tau_max = PetscMax(tau_1, tau_2);
259: if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu1_i) {
260: /* Great agreement */
261: max_radius = PetscMax(max_radius, tao->trust);
263: if (tau_max < 1.0) {
264: tau = nlsP->gamma3_i;
265: } else if (tau_max > nlsP->gamma4_i) {
266: tau = nlsP->gamma4_i;
267: } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
268: tau = tau_1;
269: } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
270: tau = tau_2;
271: } else {
272: tau = tau_max;
273: }
274: } else if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu2_i) {
275: /* Good agreement */
276: max_radius = PetscMax(max_radius, tao->trust);
278: if (tau_max < nlsP->gamma2_i) {
279: tau = nlsP->gamma2_i;
280: } else if (tau_max > nlsP->gamma3_i) {
281: tau = nlsP->gamma3_i;
282: } else {
283: tau = tau_max;
284: }
285: } else {
286: /* Not good agreement */
287: if (tau_min > 1.0) {
288: tau = nlsP->gamma2_i;
289: } else if (tau_max < nlsP->gamma1_i) {
290: tau = nlsP->gamma1_i;
291: } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
292: tau = nlsP->gamma1_i;
293: } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
294: tau = tau_1;
295: } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
296: tau = tau_2;
297: } else {
298: tau = tau_max;
299: }
300: }
301: }
302: tao->trust = tau * tao->trust;
303: }
305: if (fmin < f) {
306: f = fmin;
307: VecAXPY(tao->solution,sigma,tao->gradient);
308: TaoComputeGradient(tao,tao->solution,tao->gradient);
310: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
311: if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute gradient generated Inf or NaN");
312: needH = 1;
314: TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
315: if (reason != TAO_CONTINUE_ITERATING) return(0);
316: }
317: }
318: tao->trust = PetscMax(tao->trust, max_radius);
320: /* Modify the radius if it is too large or small */
321: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
322: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
323: break;
325: default:
326: /* Norm of the first direction will initialize radius */
327: tao->trust = 0.0;
328: break;
329: }
330: }
332: /* Set initial scaling for the BFGS preconditioner
333: This step is done after computing the initial trust-region radius
334: since the function value may have decreased */
335: if (NLS_PC_BFGS == nlsP->pc_type) {
336: if (f != 0.0) {
337: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
338: } else {
339: delta = 2.0 / (gnorm*gnorm);
340: }
341: MatLMVMSetDelta(nlsP->M,delta);
342: }
344: /* Set counter for gradient/reset steps*/
345: nlsP->newt = 0;
346: nlsP->bfgs = 0;
347: nlsP->sgrad = 0;
348: nlsP->grad = 0;
350: /* Have not converged; continue with Newton method */
351: while (reason == TAO_CONTINUE_ITERATING) {
352: ++tao->niter;
353: tao->ksp_its=0;
355: /* Compute the Hessian */
356: if (needH) {
357: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
358: }
360: if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_AHESS == nlsP->bfgs_scale_type)) {
361: /* Obtain diagonal for the bfgs preconditioner */
362: MatGetDiagonal(tao->hessian, nlsP->Diag);
363: VecAbs(nlsP->Diag);
364: VecReciprocal(nlsP->Diag);
365: MatLMVMSetScale(nlsP->M,nlsP->Diag);
366: }
368: /* Shift the Hessian matrix */
369: if (pert > 0) {
370: MatShift(tao->hessian, pert);
371: if (tao->hessian != tao->hessian_pre) {
372: MatShift(tao->hessian_pre, pert);
373: }
374: }
376: if (NLS_PC_BFGS == nlsP->pc_type) {
377: if (BFGS_SCALE_PHESS == nlsP->bfgs_scale_type) {
378: /* Obtain diagonal for the bfgs preconditioner */
379: MatGetDiagonal(tao->hessian, nlsP->Diag);
380: VecAbs(nlsP->Diag);
381: VecReciprocal(nlsP->Diag);
382: MatLMVMSetScale(nlsP->M,nlsP->Diag);
383: }
384: /* Update the limited memory preconditioner */
385: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
386: ++bfgsUpdates;
387: }
389: /* Solve the Newton system of equations */
390: KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
391: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
393: if (NLS_KSP_NASH == nlsP->ksp_type) {
394: KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
395: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
396: KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
397: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
398: KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
399: }
401: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
402: KSPGetIterationNumber(tao->ksp,&kspits);
403: tao->ksp_its+=kspits;
404: tao->ksp_tot_its+=kspits;
406: if (NLS_KSP_NASH == nlsP->ksp_type) {
407: KSPNASHGetNormD(tao->ksp,&norm_d);
408: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
409: KSPSTCGGetNormD(tao->ksp,&norm_d);
410: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
411: KSPGLTRGetNormD(tao->ksp,&norm_d);
412: }
414: if (0.0 == tao->trust) {
415: /* Radius was uninitialized; use the norm of the direction */
416: if (norm_d > 0.0) {
417: tao->trust = norm_d;
419: /* Modify the radius if it is too large or small */
420: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
421: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
422: } else {
423: /* The direction was bad; set radius to default value and re-solve
424: the trust-region subproblem to get a direction */
425: tao->trust = tao->trust0;
427: /* Modify the radius if it is too large or small */
428: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
429: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
431: if (NLS_KSP_NASH == nlsP->ksp_type) {
432: KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
433: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
434: KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
435: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
436: KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
437: }
439: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
440: KSPGetIterationNumber(tao->ksp,&kspits);
441: tao->ksp_its+=kspits;
442: tao->ksp_tot_its+=kspits;
443: if (NLS_KSP_NASH == nlsP->ksp_type) {
444: KSPNASHGetNormD(tao->ksp,&norm_d);
445: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
446: KSPSTCGGetNormD(tao->ksp,&norm_d);
447: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
448: KSPGLTRGetNormD(tao->ksp,&norm_d);
449: }
451: if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
452: }
453: }
454: } else {
455: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
456: KSPGetIterationNumber(tao->ksp, &kspits);
457: tao->ksp_its += kspits;
458: tao->ksp_tot_its+=kspits;
459: }
460: VecScale(nlsP->D, -1.0);
461: KSPGetConvergedReason(tao->ksp, &ksp_reason);
462: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NLS_PC_BFGS == nlsP->pc_type) && (bfgsUpdates > 1)) {
463: /* Preconditioner is numerically indefinite; reset the
464: approximate if using BFGS preconditioning. */
466: if (f != 0.0) {
467: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
468: } else {
469: delta = 2.0 / (gnorm*gnorm);
470: }
471: MatLMVMSetDelta(nlsP->M,delta);
472: MatLMVMReset(nlsP->M);
473: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
474: bfgsUpdates = 1;
475: }
477: if (KSP_CONVERGED_ATOL == ksp_reason) {
478: ++nlsP->ksp_atol;
479: } else if (KSP_CONVERGED_RTOL == ksp_reason) {
480: ++nlsP->ksp_rtol;
481: } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
482: ++nlsP->ksp_ctol;
483: } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
484: ++nlsP->ksp_negc;
485: } else if (KSP_DIVERGED_DTOL == ksp_reason) {
486: ++nlsP->ksp_dtol;
487: } else if (KSP_DIVERGED_ITS == ksp_reason) {
488: ++nlsP->ksp_iter;
489: } else {
490: ++nlsP->ksp_othr;
491: }
493: /* Check for success (descent direction) */
494: VecDot(nlsP->D, tao->gradient, &gdx);
495: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
496: /* Newton step is not descent or direction produced Inf or NaN
497: Update the perturbation for next time */
498: if (pert <= 0.0) {
499: /* Initialize the perturbation */
500: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
501: if (NLS_KSP_GLTR == nlsP->ksp_type) {
502: KSPGLTRGetMinEig(tao->ksp,&e_min);
503: pert = PetscMax(pert, -e_min);
504: }
505: } else {
506: /* Increase the perturbation */
507: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
508: }
510: if (NLS_PC_BFGS != nlsP->pc_type) {
511: /* We don't have the bfgs matrix around and updated
512: Must use gradient direction in this case */
513: VecCopy(tao->gradient, nlsP->D);
514: VecScale(nlsP->D, -1.0);
515: ++nlsP->grad;
516: stepType = NLS_GRADIENT;
517: } else {
518: /* Attempt to use the BFGS direction */
519: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
520: VecScale(nlsP->D, -1.0);
522: /* Check for success (descent direction) */
523: VecDot(tao->gradient, nlsP->D, &gdx);
524: if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
525: /* BFGS direction is not descent or direction produced not a number
526: We can assert bfgsUpdates > 1 in this case because
527: the first solve produces the scaled gradient direction,
528: which is guaranteed to be descent */
530: /* Use steepest descent direction (scaled) */
532: if (f != 0.0) {
533: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
534: } else {
535: delta = 2.0 / (gnorm*gnorm);
536: }
537: MatLMVMSetDelta(nlsP->M, delta);
538: MatLMVMReset(nlsP->M);
539: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
540: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
541: VecScale(nlsP->D, -1.0);
543: bfgsUpdates = 1;
544: ++nlsP->sgrad;
545: stepType = NLS_SCALED_GRADIENT;
546: } else {
547: if (1 == bfgsUpdates) {
548: /* The first BFGS direction is always the scaled gradient */
549: ++nlsP->sgrad;
550: stepType = NLS_SCALED_GRADIENT;
551: } else {
552: ++nlsP->bfgs;
553: stepType = NLS_BFGS;
554: }
555: }
556: }
557: } else {
558: /* Computed Newton step is descent */
559: switch (ksp_reason) {
560: case KSP_DIVERGED_NANORINF:
561: case KSP_DIVERGED_BREAKDOWN:
562: case KSP_DIVERGED_INDEFINITE_MAT:
563: case KSP_DIVERGED_INDEFINITE_PC:
564: case KSP_CONVERGED_CG_NEG_CURVE:
565: /* Matrix or preconditioner is indefinite; increase perturbation */
566: if (pert <= 0.0) {
567: /* Initialize the perturbation */
568: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
569: if (NLS_KSP_GLTR == nlsP->ksp_type) {
570: KSPGLTRGetMinEig(tao->ksp, &e_min);
571: pert = PetscMax(pert, -e_min);
572: }
573: } else {
574: /* Increase the perturbation */
575: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
576: }
577: break;
579: default:
580: /* Newton step computation is good; decrease perturbation */
581: pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
582: if (pert < nlsP->pmin) {
583: pert = 0.0;
584: }
585: break;
586: }
588: ++nlsP->newt;
589: stepType = NLS_NEWTON;
590: }
592: /* Perform the linesearch */
593: fold = f;
594: VecCopy(tao->solution, nlsP->Xold);
595: VecCopy(tao->gradient, nlsP->Gold);
597: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
598: TaoAddLineSearchCounts(tao);
600: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) { /* Linesearch failed */
601: f = fold;
602: VecCopy(nlsP->Xold, tao->solution);
603: VecCopy(nlsP->Gold, tao->gradient);
605: switch(stepType) {
606: case NLS_NEWTON:
607: /* Failed to obtain acceptable iterate with Newton 1step
608: Update the perturbation for next time */
609: if (pert <= 0.0) {
610: /* Initialize the perturbation */
611: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
612: if (NLS_KSP_GLTR == nlsP->ksp_type) {
613: KSPGLTRGetMinEig(tao->ksp,&e_min);
614: pert = PetscMax(pert, -e_min);
615: }
616: } else {
617: /* Increase the perturbation */
618: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
619: }
621: if (NLS_PC_BFGS != nlsP->pc_type) {
622: /* We don't have the bfgs matrix around and being updated
623: Must use gradient direction in this case */
624: VecCopy(tao->gradient, nlsP->D);
625: ++nlsP->grad;
626: stepType = NLS_GRADIENT;
627: } else {
628: /* Attempt to use the BFGS direction */
629: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
630: /* Check for success (descent direction) */
631: VecDot(tao->solution, nlsP->D, &gdx);
632: if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
633: /* BFGS direction is not descent or direction produced not a number
634: We can assert bfgsUpdates > 1 in this case
635: Use steepest descent direction (scaled) */
637: if (f != 0.0) {
638: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
639: } else {
640: delta = 2.0 / (gnorm*gnorm);
641: }
642: MatLMVMSetDelta(nlsP->M, delta);
643: MatLMVMReset(nlsP->M);
644: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
645: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
647: bfgsUpdates = 1;
648: ++nlsP->sgrad;
649: stepType = NLS_SCALED_GRADIENT;
650: } else {
651: if (1 == bfgsUpdates) {
652: /* The first BFGS direction is always the scaled gradient */
653: ++nlsP->sgrad;
654: stepType = NLS_SCALED_GRADIENT;
655: } else {
656: ++nlsP->bfgs;
657: stepType = NLS_BFGS;
658: }
659: }
660: }
661: break;
663: case NLS_BFGS:
664: /* Can only enter if pc_type == NLS_PC_BFGS
665: Failed to obtain acceptable iterate with BFGS step
666: Attempt to use the scaled gradient direction */
668: if (f != 0.0) {
669: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
670: } else {
671: delta = 2.0 / (gnorm*gnorm);
672: }
673: MatLMVMSetDelta(nlsP->M, delta);
674: MatLMVMReset(nlsP->M);
675: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
676: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
678: bfgsUpdates = 1;
679: ++nlsP->sgrad;
680: stepType = NLS_SCALED_GRADIENT;
681: break;
683: case NLS_SCALED_GRADIENT:
684: /* Can only enter if pc_type == NLS_PC_BFGS
685: The scaled gradient step did not produce a new iterate;
686: attemp to use the gradient direction.
687: Need to make sure we are not using a different diagonal scaling */
689: MatLMVMSetScale(nlsP->M,0);
690: MatLMVMSetDelta(nlsP->M,1.0);
691: MatLMVMReset(nlsP->M);
692: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
693: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
695: bfgsUpdates = 1;
696: ++nlsP->grad;
697: stepType = NLS_GRADIENT;
698: break;
699: }
700: VecScale(nlsP->D, -1.0);
702: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
703: TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
704: TaoAddLineSearchCounts(tao);
705: }
707: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
708: /* Failed to find an improving point */
709: f = fold;
710: VecCopy(nlsP->Xold, tao->solution);
711: VecCopy(nlsP->Gold, tao->gradient);
712: step = 0.0;
713: reason = TAO_DIVERGED_LS_FAILURE;
714: tao->reason = TAO_DIVERGED_LS_FAILURE;
715: break;
716: }
718: /* Update trust region radius */
719: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
720: switch(nlsP->update_type) {
721: case NLS_UPDATE_STEP:
722: if (stepType == NLS_NEWTON) {
723: if (step < nlsP->nu1) {
724: /* Very bad step taken; reduce radius */
725: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
726: } else if (step < nlsP->nu2) {
727: /* Reasonably bad step taken; reduce radius */
728: tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
729: } else if (step < nlsP->nu3) {
730: /* Reasonable step was taken; leave radius alone */
731: if (nlsP->omega3 < 1.0) {
732: tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
733: } else if (nlsP->omega3 > 1.0) {
734: tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
735: }
736: } else if (step < nlsP->nu4) {
737: /* Full step taken; increase the radius */
738: tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
739: } else {
740: /* More than full step taken; increase the radius */
741: tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
742: }
743: } else {
744: /* Newton step was not good; reduce the radius */
745: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
746: }
747: break;
749: case NLS_UPDATE_REDUCTION:
750: if (stepType == NLS_NEWTON) {
751: /* Get predicted reduction */
753: if (NLS_KSP_STCG == nlsP->ksp_type) {
754: KSPSTCGGetObjFcn(tao->ksp,&prered);
755: } else if (NLS_KSP_NASH == nlsP->ksp_type) {
756: KSPNASHGetObjFcn(tao->ksp,&prered);
757: } else {
758: KSPGLTRGetObjFcn(tao->ksp,&prered);
759: }
761: if (prered >= 0.0) {
762: /* The predicted reduction has the wrong sign. This cannot */
763: /* happen in infinite precision arithmetic. Step should */
764: /* be rejected! */
765: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
766: } else {
767: if (PetscIsInfOrNanReal(f_full)) {
768: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
769: } else {
770: /* Compute and actual reduction */
771: actred = fold - f_full;
772: prered = -prered;
773: if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
774: (PetscAbsScalar(prered) <= nlsP->epsilon)) {
775: kappa = 1.0;
776: } else {
777: kappa = actred / prered;
778: }
780: /* Accept of reject the step and update radius */
781: if (kappa < nlsP->eta1) {
782: /* Very bad step */
783: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
784: } else if (kappa < nlsP->eta2) {
785: /* Marginal bad step */
786: tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
787: } else if (kappa < nlsP->eta3) {
788: /* Reasonable step */
789: if (nlsP->alpha3 < 1.0) {
790: tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
791: } else if (nlsP->alpha3 > 1.0) {
792: tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
793: }
794: } else if (kappa < nlsP->eta4) {
795: /* Good step */
796: tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
797: } else {
798: /* Very good step */
799: tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
800: }
801: }
802: }
803: } else {
804: /* Newton step was not good; reduce the radius */
805: tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
806: }
807: break;
809: default:
810: if (stepType == NLS_NEWTON) {
812: if (NLS_KSP_STCG == nlsP->ksp_type) {
813: KSPSTCGGetObjFcn(tao->ksp,&prered);
814: } else if (NLS_KSP_NASH == nlsP->ksp_type) {
815: KSPNASHGetObjFcn(tao->ksp,&prered);
816: } else {
817: KSPGLTRGetObjFcn(tao->ksp,&prered);
818: }
819: if (prered >= 0.0) {
820: /* The predicted reduction has the wrong sign. This cannot */
821: /* happen in infinite precision arithmetic. Step should */
822: /* be rejected! */
823: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
824: } else {
825: if (PetscIsInfOrNanReal(f_full)) {
826: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
827: } else {
828: actred = fold - f_full;
829: prered = -prered;
830: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
831: kappa = 1.0;
832: } else {
833: kappa = actred / prered;
834: }
836: tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
837: tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
838: tau_min = PetscMin(tau_1, tau_2);
839: tau_max = PetscMax(tau_1, tau_2);
841: if (kappa >= 1.0 - nlsP->mu1) {
842: /* Great agreement */
843: if (tau_max < 1.0) {
844: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
845: } else if (tau_max > nlsP->gamma4) {
846: tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
847: } else {
848: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
849: }
850: } else if (kappa >= 1.0 - nlsP->mu2) {
851: /* Good agreement */
853: if (tau_max < nlsP->gamma2) {
854: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
855: } else if (tau_max > nlsP->gamma3) {
856: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
857: } else if (tau_max < 1.0) {
858: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
859: } else {
860: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
861: }
862: } else {
863: /* Not good agreement */
864: if (tau_min > 1.0) {
865: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
866: } else if (tau_max < nlsP->gamma1) {
867: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
868: } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
869: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
870: } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
871: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
872: } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
873: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
874: } else {
875: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
876: }
877: }
878: }
879: }
880: } else {
881: /* Newton step was not good; reduce the radius */
882: tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
883: }
884: break;
885: }
887: /* The radius may have been increased; modify if it is too large */
888: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
889: }
891: /* Check for termination */
892: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
893: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
894: needH = 1;
895: TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step, &reason);
896: }
897: return(0);
898: }
900: /* ---------------------------------------------------------- */
903: static PetscErrorCode TaoSetUp_NLS(Tao tao)
904: {
905: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
909: if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
910: if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
911: if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
912: if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
913: if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
914: if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
915: nlsP->Diag = 0;
916: nlsP->M = 0;
917: return(0);
918: }
920: /*------------------------------------------------------------*/
923: static PetscErrorCode TaoDestroy_NLS(Tao tao)
924: {
925: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
929: if (tao->setupcalled) {
930: VecDestroy(&nlsP->D);
931: VecDestroy(&nlsP->W);
932: VecDestroy(&nlsP->Xold);
933: VecDestroy(&nlsP->Gold);
934: }
935: VecDestroy(&nlsP->Diag);
936: MatDestroy(&nlsP->M);
937: PetscFree(tao->data);
938: return(0);
939: }
941: /*------------------------------------------------------------*/
944: static PetscErrorCode TaoSetFromOptions_NLS(PetscOptionItems *PetscOptionsObject,Tao tao)
945: {
946: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
950: PetscOptionsHead(PetscOptionsObject,"Newton line search method for unconstrained optimization");
951: PetscOptionsEList("-tao_nls_ksp_type", "ksp type", "", NLS_KSP, NLS_KSP_TYPES, NLS_KSP[nlsP->ksp_type], &nlsP->ksp_type, 0);
952: PetscOptionsEList("-tao_nls_pc_type", "pc type", "", NLS_PC, NLS_PC_TYPES, NLS_PC[nlsP->pc_type], &nlsP->pc_type, 0);
953: PetscOptionsEList("-tao_nls_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[nlsP->bfgs_scale_type], &nlsP->bfgs_scale_type, 0);
954: PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
955: PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
956: PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval,NULL);
957: PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin,NULL);
958: PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax,NULL);
959: PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac,NULL);
960: PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin,NULL);
961: PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax,NULL);
962: PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac,NULL);
963: PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac,NULL);
964: PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac,NULL);
965: PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac,NULL);
966: PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1,NULL);
967: PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2,NULL);
968: PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3,NULL);
969: PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4,NULL);
970: PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1,NULL);
971: PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2,NULL);
972: PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3,NULL);
973: PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4,NULL);
974: PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5,NULL);
975: PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1,NULL);
976: PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2,NULL);
977: PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3,NULL);
978: PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4,NULL);
979: PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1,NULL);
980: PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2,NULL);
981: PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3,NULL);
982: PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4,NULL);
983: PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5,NULL);
984: PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i,NULL);
985: PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i,NULL);
986: PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i,NULL);
987: PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i,NULL);
988: PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i,NULL);
989: PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i,NULL);
990: PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i,NULL);
991: PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1,NULL);
992: PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2,NULL);
993: PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1,NULL);
994: PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2,NULL);
995: PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3,NULL);
996: PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4,NULL);
997: PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta,NULL);
998: PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius,NULL);
999: PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius,NULL);
1000: PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon,NULL);
1001: PetscOptionsTail();
1002: TaoLineSearchSetFromOptions(tao->linesearch);
1003: KSPSetFromOptions(tao->ksp);
1004: return(0);
1005: }
1008: /*------------------------------------------------------------*/
1011: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
1012: {
1013: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
1014: PetscInt nrejects;
1015: PetscBool isascii;
1019: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
1020: if (isascii) {
1021: PetscViewerASCIIPushTab(viewer);
1022: if (NLS_PC_BFGS == nlsP->pc_type && nlsP->M) {
1023: MatLMVMGetRejects(nlsP->M,&nrejects);
1024: PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n",nrejects);
1025: }
1026: PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
1027: PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
1028: PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", nlsP->sgrad);
1029: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);
1031: PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
1032: PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
1033: PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
1034: PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
1035: PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
1036: PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
1037: PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
1038: PetscViewerASCIIPopTab(viewer);
1039: }
1040: return(0);
1041: }
1043: /* ---------------------------------------------------------- */
1044: /*MC
1045: TAONLS - Newton's method with linesearch for unconstrained minimization.
1046: At each iteration, the Newton line search method solves the symmetric
1047: system of equations to obtain the step diretion dk:
1048: Hk dk = -gk
1049: a More-Thuente line search is applied on the direction dk to approximately
1050: solve
1051: min_t f(xk + t d_k)
1053: Options Database Keys:
1054: + -tao_nls_ksp_type - "cg","nash","stcg","gltr","petsc"
1055: . -tao_nls_pc_type - "none","ahess","bfgs","petsc"
1056: . -tao_nls_bfgs_scale_type - "ahess","phess","bfgs"
1057: . -tao_nls_init_type - "constant","direction","interpolation"
1058: . -tao_nls_update_type - "step","direction","interpolation"
1059: . -tao_nls_sval - perturbation starting value
1060: . -tao_nls_imin - minimum initial perturbation
1061: . -tao_nls_imax - maximum initial perturbation
1062: . -tao_nls_pmin - minimum perturbation
1063: . -tao_nls_pmax - maximum perturbation
1064: . -tao_nls_pgfac - growth factor
1065: . -tao_nls_psfac - shrink factor
1066: . -tao_nls_imfac - initial merit factor
1067: . -tao_nls_pmgfac - merit growth factor
1068: . -tao_nls_pmsfac - merit shrink factor
1069: . -tao_nls_eta1 - poor steplength; reduce radius
1070: . -tao_nls_eta2 - reasonable steplength; leave radius
1071: . -tao_nls_eta3 - good steplength; increase readius
1072: . -tao_nls_eta4 - excellent steplength; greatly increase radius
1073: . -tao_nls_alpha1 - alpha1 reduction
1074: . -tao_nls_alpha2 - alpha2 reduction
1075: . -tao_nls_alpha3 - alpha3 reduction
1076: . -tao_nls_alpha4 - alpha4 reduction
1077: . -tao_nls_alpha - alpha5 reduction
1078: . -tao_nls_mu1 - mu1 interpolation update
1079: . -tao_nls_mu2 - mu2 interpolation update
1080: . -tao_nls_gamma1 - gamma1 interpolation update
1081: . -tao_nls_gamma2 - gamma2 interpolation update
1082: . -tao_nls_gamma3 - gamma3 interpolation update
1083: . -tao_nls_gamma4 - gamma4 interpolation update
1084: . -tao_nls_theta - theta interpolation update
1085: . -tao_nls_omega1 - omega1 step update
1086: . -tao_nls_omega2 - omega2 step update
1087: . -tao_nls_omega3 - omega3 step update
1088: . -tao_nls_omega4 - omega4 step update
1089: . -tao_nls_omega5 - omega5 step update
1090: . -tao_nls_mu1_i - mu1 interpolation init factor
1091: . -tao_nls_mu2_i - mu2 interpolation init factor
1092: . -tao_nls_gamma1_i - gamma1 interpolation init factor
1093: . -tao_nls_gamma2_i - gamma2 interpolation init factor
1094: . -tao_nls_gamma3_i - gamma3 interpolation init factor
1095: . -tao_nls_gamma4_i - gamma4 interpolation init factor
1096: - -tao_nls_theta_i - theta interpolation init factor
1098: Level: beginner
1099: M*/
1103: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
1104: {
1105: TAO_NLS *nlsP;
1106: const char *morethuente_type = TAOLINESEARCHMT;
1110: PetscNewLog(tao,&nlsP);
1112: tao->ops->setup = TaoSetUp_NLS;
1113: tao->ops->solve = TaoSolve_NLS;
1114: tao->ops->view = TaoView_NLS;
1115: tao->ops->setfromoptions = TaoSetFromOptions_NLS;
1116: tao->ops->destroy = TaoDestroy_NLS;
1118: /* Override default settings (unless already changed) */
1119: if (!tao->max_it_changed) tao->max_it = 50;
1120: if (!tao->trust0_changed) tao->trust0 = 100.0;
1122: tao->data = (void*)nlsP;
1124: nlsP->sval = 0.0;
1125: nlsP->imin = 1.0e-4;
1126: nlsP->imax = 1.0e+2;
1127: nlsP->imfac = 1.0e-1;
1129: nlsP->pmin = 1.0e-12;
1130: nlsP->pmax = 1.0e+2;
1131: nlsP->pgfac = 1.0e+1;
1132: nlsP->psfac = 4.0e-1;
1133: nlsP->pmgfac = 1.0e-1;
1134: nlsP->pmsfac = 1.0e-1;
1136: /* Default values for trust-region radius update based on steplength */
1137: nlsP->nu1 = 0.25;
1138: nlsP->nu2 = 0.50;
1139: nlsP->nu3 = 1.00;
1140: nlsP->nu4 = 1.25;
1142: nlsP->omega1 = 0.25;
1143: nlsP->omega2 = 0.50;
1144: nlsP->omega3 = 1.00;
1145: nlsP->omega4 = 2.00;
1146: nlsP->omega5 = 4.00;
1148: /* Default values for trust-region radius update based on reduction */
1149: nlsP->eta1 = 1.0e-4;
1150: nlsP->eta2 = 0.25;
1151: nlsP->eta3 = 0.50;
1152: nlsP->eta4 = 0.90;
1154: nlsP->alpha1 = 0.25;
1155: nlsP->alpha2 = 0.50;
1156: nlsP->alpha3 = 1.00;
1157: nlsP->alpha4 = 2.00;
1158: nlsP->alpha5 = 4.00;
1160: /* Default values for trust-region radius update based on interpolation */
1161: nlsP->mu1 = 0.10;
1162: nlsP->mu2 = 0.50;
1164: nlsP->gamma1 = 0.25;
1165: nlsP->gamma2 = 0.50;
1166: nlsP->gamma3 = 2.00;
1167: nlsP->gamma4 = 4.00;
1169: nlsP->theta = 0.05;
1171: /* Default values for trust region initialization based on interpolation */
1172: nlsP->mu1_i = 0.35;
1173: nlsP->mu2_i = 0.50;
1175: nlsP->gamma1_i = 0.0625;
1176: nlsP->gamma2_i = 0.5;
1177: nlsP->gamma3_i = 2.0;
1178: nlsP->gamma4_i = 5.0;
1180: nlsP->theta_i = 0.25;
1182: /* Remaining parameters */
1183: nlsP->min_radius = 1.0e-10;
1184: nlsP->max_radius = 1.0e10;
1185: nlsP->epsilon = 1.0e-6;
1187: nlsP->ksp_type = NLS_KSP_STCG;
1188: nlsP->pc_type = NLS_PC_BFGS;
1189: nlsP->bfgs_scale_type = BFGS_SCALE_PHESS;
1190: nlsP->init_type = NLS_INIT_INTERPOLATION;
1191: nlsP->update_type = NLS_UPDATE_STEP;
1193: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
1194: TaoLineSearchSetType(tao->linesearch,morethuente_type);
1195: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
1196: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
1198: /* Set linear solver to default for symmetric matrices */
1199: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
1200: KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix);
1201: return(0);
1202: }
1206: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
1207: {
1209: Mat M;
1215: PCShellGetContext(pc,(void**)&M);
1216: MatLMVMSolve(M, b, x);
1217: return(0);
1218: }