Actual source code: nls.c
petsc-3.5.4 2015-05-23
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/matrix/lmvmmat.h>
3: #include <../src/tao/unconstrained/impls/nls/nls.h>
5: #include <petscksp.h>
6: #include <petscpc.h>
7: #include <petsc-private/kspimpl.h>
8: #include <petsc-private/pcimpl.h>
10: #define NLS_KSP_CG 0
11: #define NLS_KSP_NASH 1
12: #define NLS_KSP_STCG 2
13: #define NLS_KSP_GLTR 3
14: #define NLS_KSP_PETSC 4
15: #define NLS_KSP_TYPES 5
17: #define NLS_PC_NONE 0
18: #define NLS_PC_AHESS 1
19: #define NLS_PC_BFGS 2
20: #define NLS_PC_PETSC 3
21: #define NLS_PC_TYPES 4
23: #define BFGS_SCALE_AHESS 0
24: #define BFGS_SCALE_PHESS 1
25: #define BFGS_SCALE_BFGS 2
26: #define BFGS_SCALE_TYPES 3
28: #define NLS_INIT_CONSTANT 0
29: #define NLS_INIT_DIRECTION 1
30: #define NLS_INIT_INTERPOLATION 2
31: #define NLS_INIT_TYPES 3
33: #define NLS_UPDATE_STEP 0
34: #define NLS_UPDATE_REDUCTION 1
35: #define NLS_UPDATE_INTERPOLATION 2
36: #define NLS_UPDATE_TYPES 3
38: static const char *NLS_KSP[64] = {"cg", "nash", "stcg", "gltr", "petsc"};
40: static const char *NLS_PC[64] = {"none", "ahess", "bfgs", "petsc"};
42: static const char *BFGS_SCALE[64] = {"ahess", "phess", "bfgs"};
44: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};
46: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};
48: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x);
49: /* Routine for BFGS preconditioner
52: Implements Newton's Method with a line search approach for solving
53: unconstrained minimization problems. A More'-Thuente line search
54: is used to guarantee that the bfgs preconditioner remains positive
55: definite.
57: The method can shift the Hessian matrix. The shifting procedure is
58: adapted from the PATH algorithm for solving complementarity
59: problems.
61: The linear system solve should be done with a conjugate gradient
62: method, although any method can be used. */
64: #define NLS_NEWTON 0
65: #define NLS_BFGS 1
66: #define NLS_SCALED_GRADIENT 2
67: #define NLS_GRADIENT 3
71: static PetscErrorCode TaoSolve_NLS(Tao tao)
72: {
73: PetscErrorCode ierr;
74: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
75: PC pc;
76: KSPConvergedReason ksp_reason;
77: TaoLineSearchConvergedReason ls_reason;
78: TaoConvergedReason reason;
80: PetscReal fmin, ftrial, f_full, prered, actred, kappa, sigma;
81: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
82: PetscReal f, fold, gdx, gnorm, pert;
83: PetscReal step = 1.0;
84: PetscReal delta;
85: PetscReal norm_d = 0.0, e_min;
87: PetscInt stepType;
88: PetscInt iter = 0;
89: PetscInt bfgsUpdates = 0;
90: PetscInt n,N,kspits;
91: PetscInt needH;
93: PetscInt i_max = 5;
94: PetscInt j_max = 1;
95: PetscInt i, j;
98: if (tao->XL || tao->XU || tao->ops->computebounds) {
99: PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
100: }
102: /* Initialized variables */
103: pert = nlsP->sval;
105: nlsP->ksp_atol = 0;
106: nlsP->ksp_rtol = 0;
107: nlsP->ksp_dtol = 0;
108: nlsP->ksp_ctol = 0;
109: nlsP->ksp_negc = 0;
110: nlsP->ksp_iter = 0;
111: nlsP->ksp_othr = 0;
113: /* Modify the linear solver to a trust region method if desired */
114: switch(nlsP->ksp_type) {
115: case NLS_KSP_CG:
116: KSPSetType(tao->ksp, KSPCG);
117: KSPSetFromOptions(tao->ksp);
118: break;
120: case NLS_KSP_NASH:
121: KSPSetType(tao->ksp, KSPNASH);
122: KSPSetFromOptions(tao->ksp);
123: break;
125: case NLS_KSP_STCG:
126: KSPSetType(tao->ksp, KSPSTCG);
127: KSPSetFromOptions(tao->ksp);
128: break;
130: case NLS_KSP_GLTR:
131: KSPSetType(tao->ksp, KSPGLTR);
132: KSPSetFromOptions(tao->ksp);
133: break;
135: default:
136: /* Use the method set by the ksp_type */
137: break;
138: }
140: /* Initialize trust-region radius when using nash, stcg, or gltr
141: Will be reset during the first iteration */
142: if (NLS_KSP_NASH == nlsP->ksp_type) {
143: KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
144: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
145: KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
146: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
147: KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
148: }
150: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
151: tao->trust = tao->trust0;
153: if (tao->trust < 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial radius negative");
155: /* Modify the radius if it is too large or small */
156: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
157: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
158: }
160: /* Get vectors we will need */
161: if (NLS_PC_BFGS == nlsP->pc_type && !nlsP->M) {
162: VecGetLocalSize(tao->solution,&n);
163: VecGetSize(tao->solution,&N);
164: MatCreateLMVM(((PetscObject)tao)->comm,n,N,&nlsP->M);
165: MatLMVMAllocateVectors(nlsP->M,tao->solution);
166: }
168: /* Check convergence criteria */
169: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
170: VecNorm(tao->gradient,NORM_2,&gnorm);
171: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
172: needH = 1;
174: TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);
175: if (reason != TAO_CONTINUE_ITERATING) return(0);
177: /* create vectors for the limited memory preconditioner */
178: if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_BFGS != nlsP->bfgs_scale_type)) {
179: if (!nlsP->Diag) {
180: VecDuplicate(tao->solution,&nlsP->Diag);
181: }
182: }
184: /* Modify the preconditioner to use the bfgs approximation */
185: KSPGetPC(tao->ksp, &pc);
186: switch(nlsP->pc_type) {
187: case NLS_PC_NONE:
188: PCSetType(pc, PCNONE);
189: if (pc->ops->setfromoptions) {
190: (*pc->ops->setfromoptions)(pc);
191: }
192: break;
194: case NLS_PC_AHESS:
195: PCSetType(pc, PCJACOBI);
196: if (pc->ops->setfromoptions) {
197: (*pc->ops->setfromoptions)(pc);
198: }
199: PCJacobiSetUseAbs(pc);
200: break;
202: case NLS_PC_BFGS:
203: PCSetType(pc, PCSHELL);
204: if (pc->ops->setfromoptions) {
205: (*pc->ops->setfromoptions)(pc);
206: }
207: PCShellSetName(pc, "bfgs");
208: PCShellSetContext(pc, nlsP->M);
209: PCShellSetApply(pc, MatLMVMSolveShell);
210: break;
212: default:
213: /* Use the pc method set by pc_type */
214: break;
215: }
217: /* Initialize trust-region radius. The initialization is only performed
218: when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
219: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
220: switch(nlsP->init_type) {
221: case NLS_INIT_CONSTANT:
222: /* Use the initial radius specified */
223: break;
225: case NLS_INIT_INTERPOLATION:
226: /* Use the initial radius specified */
227: max_radius = 0.0;
229: for (j = 0; j < j_max; ++j) {
230: fmin = f;
231: sigma = 0.0;
233: if (needH) {
234: TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
235: needH = 0;
236: }
238: for (i = 0; i < i_max; ++i) {
239: VecCopy(tao->solution,nlsP->W);
240: VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
241: TaoComputeObjective(tao, nlsP->W, &ftrial);
242: if (PetscIsInfOrNanReal(ftrial)) {
243: tau = nlsP->gamma1_i;
244: } else {
245: if (ftrial < fmin) {
246: fmin = ftrial;
247: sigma = -tao->trust / gnorm;
248: }
250: MatMult(tao->hessian, tao->gradient, nlsP->D);
251: VecDot(tao->gradient, nlsP->D, &prered);
253: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
254: actred = f - ftrial;
255: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
256: kappa = 1.0;
257: } else {
258: kappa = actred / prered;
259: }
261: tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
262: tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
263: tau_min = PetscMin(tau_1, tau_2);
264: tau_max = PetscMax(tau_1, tau_2);
266: if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu1_i) {
267: /* Great agreement */
268: max_radius = PetscMax(max_radius, tao->trust);
270: if (tau_max < 1.0) {
271: tau = nlsP->gamma3_i;
272: } else if (tau_max > nlsP->gamma4_i) {
273: tau = nlsP->gamma4_i;
274: } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
275: tau = tau_1;
276: } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
277: tau = tau_2;
278: } else {
279: tau = tau_max;
280: }
281: } else if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu2_i) {
282: /* Good agreement */
283: max_radius = PetscMax(max_radius, tao->trust);
285: if (tau_max < nlsP->gamma2_i) {
286: tau = nlsP->gamma2_i;
287: } else if (tau_max > nlsP->gamma3_i) {
288: tau = nlsP->gamma3_i;
289: } else {
290: tau = tau_max;
291: }
292: } else {
293: /* Not good agreement */
294: if (tau_min > 1.0) {
295: tau = nlsP->gamma2_i;
296: } else if (tau_max < nlsP->gamma1_i) {
297: tau = nlsP->gamma1_i;
298: } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
299: tau = nlsP->gamma1_i;
300: } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
301: tau = tau_1;
302: } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
303: tau = tau_2;
304: } else {
305: tau = tau_max;
306: }
307: }
308: }
309: tao->trust = tau * tao->trust;
310: }
312: if (fmin < f) {
313: f = fmin;
314: VecAXPY(tao->solution,sigma,tao->gradient);
315: TaoComputeGradient(tao,tao->solution,tao->gradient);
317: VecNorm(tao->gradient,NORM_2,&gnorm);
318: if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute gradient generated Inf or NaN");
319: needH = 1;
321: TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);
322: if (reason != TAO_CONTINUE_ITERATING) return(0);
323: }
324: }
325: tao->trust = PetscMax(tao->trust, max_radius);
327: /* Modify the radius if it is too large or small */
328: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
329: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
330: break;
332: default:
333: /* Norm of the first direction will initialize radius */
334: tao->trust = 0.0;
335: break;
336: }
337: }
339: /* Set initial scaling for the BFGS preconditioner
340: This step is done after computing the initial trust-region radius
341: since the function value may have decreased */
342: if (NLS_PC_BFGS == nlsP->pc_type) {
343: if (f != 0.0) {
344: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
345: } else {
346: delta = 2.0 / (gnorm*gnorm);
347: }
348: MatLMVMSetDelta(nlsP->M,delta);
349: }
351: /* Set counter for gradient/reset steps*/
352: nlsP->newt = 0;
353: nlsP->bfgs = 0;
354: nlsP->sgrad = 0;
355: nlsP->grad = 0;
357: /* Have not converged; continue with Newton method */
358: while (reason == TAO_CONTINUE_ITERATING) {
359: ++iter;
361: /* Compute the Hessian */
362: if (needH) {
363: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
364: needH = 0;
365: }
367: if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_AHESS == nlsP->bfgs_scale_type)) {
368: /* Obtain diagonal for the bfgs preconditioner */
369: MatGetDiagonal(tao->hessian, nlsP->Diag);
370: VecAbs(nlsP->Diag);
371: VecReciprocal(nlsP->Diag);
372: MatLMVMSetScale(nlsP->M,nlsP->Diag);
373: }
375: /* Shift the Hessian matrix */
376: if (pert > 0) {
377: MatShift(tao->hessian, pert);
378: if (tao->hessian != tao->hessian_pre) {
379: MatShift(tao->hessian_pre, pert);
380: }
381: }
383: if (NLS_PC_BFGS == nlsP->pc_type) {
384: if (BFGS_SCALE_PHESS == nlsP->bfgs_scale_type) {
385: /* Obtain diagonal for the bfgs preconditioner */
386: MatGetDiagonal(tao->hessian, nlsP->Diag);
387: VecAbs(nlsP->Diag);
388: VecReciprocal(nlsP->Diag);
389: MatLMVMSetScale(nlsP->M,nlsP->Diag);
390: }
391: /* Update the limited memory preconditioner */
392: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
393: ++bfgsUpdates;
394: }
396: /* Solve the Newton system of equations */
397: KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
398: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
400: if (NLS_KSP_NASH == nlsP->ksp_type) {
401: KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
402: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
403: KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
404: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
405: KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
406: }
408: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
409: KSPGetIterationNumber(tao->ksp,&kspits);
410: tao->ksp_its+=kspits;
412: if (NLS_KSP_NASH == nlsP->ksp_type) {
413: KSPNASHGetNormD(tao->ksp,&norm_d);
414: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
415: KSPSTCGGetNormD(tao->ksp,&norm_d);
416: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
417: KSPGLTRGetNormD(tao->ksp,&norm_d);
418: }
420: if (0.0 == tao->trust) {
421: /* Radius was uninitialized; use the norm of the direction */
422: if (norm_d > 0.0) {
423: tao->trust = norm_d;
425: /* Modify the radius if it is too large or small */
426: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
427: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
428: } else {
429: /* The direction was bad; set radius to default value and re-solve
430: the trust-region subproblem to get a direction */
431: tao->trust = tao->trust0;
433: /* Modify the radius if it is too large or small */
434: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
435: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
437: if (NLS_KSP_NASH == nlsP->ksp_type) {
438: KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
439: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
440: KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
441: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
442: KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
443: }
445: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
446: KSPGetIterationNumber(tao->ksp,&kspits);
447: tao->ksp_its+=kspits;
448: if (NLS_KSP_NASH == nlsP->ksp_type) {
449: KSPNASHGetNormD(tao->ksp,&norm_d);
450: } else if (NLS_KSP_STCG == nlsP->ksp_type) {
451: KSPSTCGGetNormD(tao->ksp,&norm_d);
452: } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
453: KSPGLTRGetNormD(tao->ksp,&norm_d);
454: }
456: if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
457: }
458: }
459: } else {
460: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
461: KSPGetIterationNumber(tao->ksp, &kspits);
462: tao->ksp_its += kspits;
463: }
464: VecScale(nlsP->D, -1.0);
465: KSPGetConvergedReason(tao->ksp, &ksp_reason);
466: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NLS_PC_BFGS == nlsP->pc_type) && (bfgsUpdates > 1)) {
467: /* Preconditioner is numerically indefinite; reset the
468: approximate if using BFGS preconditioning. */
470: if (f != 0.0) {
471: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
472: } else {
473: delta = 2.0 / (gnorm*gnorm);
474: }
475: MatLMVMSetDelta(nlsP->M,delta);
476: MatLMVMReset(nlsP->M);
477: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
478: bfgsUpdates = 1;
479: }
481: if (KSP_CONVERGED_ATOL == ksp_reason) {
482: ++nlsP->ksp_atol;
483: } else if (KSP_CONVERGED_RTOL == ksp_reason) {
484: ++nlsP->ksp_rtol;
485: } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
486: ++nlsP->ksp_ctol;
487: } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
488: ++nlsP->ksp_negc;
489: } else if (KSP_DIVERGED_DTOL == ksp_reason) {
490: ++nlsP->ksp_dtol;
491: } else if (KSP_DIVERGED_ITS == ksp_reason) {
492: ++nlsP->ksp_iter;
493: } else {
494: ++nlsP->ksp_othr;
495: }
497: /* Check for success (descent direction) */
498: VecDot(nlsP->D, tao->gradient, &gdx);
499: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
500: /* Newton step is not descent or direction produced Inf or NaN
501: Update the perturbation for next time */
502: if (pert <= 0.0) {
503: /* Initialize the perturbation */
504: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
505: if (NLS_KSP_GLTR == nlsP->ksp_type) {
506: KSPGLTRGetMinEig(tao->ksp,&e_min);
507: pert = PetscMax(pert, -e_min);
508: }
509: } else {
510: /* Increase the perturbation */
511: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
512: }
514: if (NLS_PC_BFGS != nlsP->pc_type) {
515: /* We don't have the bfgs matrix around and updated
516: Must use gradient direction in this case */
517: VecCopy(tao->gradient, nlsP->D);
518: VecScale(nlsP->D, -1.0);
519: ++nlsP->grad;
520: stepType = NLS_GRADIENT;
521: } else {
522: /* Attempt to use the BFGS direction */
523: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
524: VecScale(nlsP->D, -1.0);
526: /* Check for success (descent direction) */
527: VecDot(tao->gradient, nlsP->D, &gdx);
528: if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
529: /* BFGS direction is not descent or direction produced not a number
530: We can assert bfgsUpdates > 1 in this case because
531: the first solve produces the scaled gradient direction,
532: which is guaranteed to be descent */
534: /* Use steepest descent direction (scaled) */
536: if (f != 0.0) {
537: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
538: } else {
539: delta = 2.0 / (gnorm*gnorm);
540: }
541: MatLMVMSetDelta(nlsP->M, delta);
542: MatLMVMReset(nlsP->M);
543: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
544: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
545: VecScale(nlsP->D, -1.0);
547: bfgsUpdates = 1;
548: ++nlsP->sgrad;
549: stepType = NLS_SCALED_GRADIENT;
550: } else {
551: if (1 == bfgsUpdates) {
552: /* The first BFGS direction is always the scaled gradient */
553: ++nlsP->sgrad;
554: stepType = NLS_SCALED_GRADIENT;
555: } else {
556: ++nlsP->bfgs;
557: stepType = NLS_BFGS;
558: }
559: }
560: }
561: } else {
562: /* Computed Newton step is descent */
563: switch (ksp_reason) {
564: case KSP_DIVERGED_NANORINF:
565: case KSP_DIVERGED_BREAKDOWN:
566: case KSP_DIVERGED_INDEFINITE_MAT:
567: case KSP_DIVERGED_INDEFINITE_PC:
568: case KSP_CONVERGED_CG_NEG_CURVE:
569: /* Matrix or preconditioner is indefinite; increase perturbation */
570: if (pert <= 0.0) {
571: /* Initialize the perturbation */
572: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
573: if (NLS_KSP_GLTR == nlsP->ksp_type) {
574: KSPGLTRGetMinEig(tao->ksp, &e_min);
575: pert = PetscMax(pert, -e_min);
576: }
577: } else {
578: /* Increase the perturbation */
579: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
580: }
581: break;
583: default:
584: /* Newton step computation is good; decrease perturbation */
585: pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
586: if (pert < nlsP->pmin) {
587: pert = 0.0;
588: }
589: break;
590: }
592: ++nlsP->newt;
593: stepType = NLS_NEWTON;
594: }
596: /* Perform the linesearch */
597: fold = f;
598: VecCopy(tao->solution, nlsP->Xold);
599: VecCopy(tao->gradient, nlsP->Gold);
601: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
602: TaoAddLineSearchCounts(tao);
604: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) { /* Linesearch failed */
605: f = fold;
606: VecCopy(nlsP->Xold, tao->solution);
607: VecCopy(nlsP->Gold, tao->gradient);
609: switch(stepType) {
610: case NLS_NEWTON:
611: /* Failed to obtain acceptable iterate with Newton 1step
612: Update the perturbation for next time */
613: if (pert <= 0.0) {
614: /* Initialize the perturbation */
615: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
616: if (NLS_KSP_GLTR == nlsP->ksp_type) {
617: KSPGLTRGetMinEig(tao->ksp,&e_min);
618: pert = PetscMax(pert, -e_min);
619: }
620: } else {
621: /* Increase the perturbation */
622: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
623: }
625: if (NLS_PC_BFGS != nlsP->pc_type) {
626: /* We don't have the bfgs matrix around and being updated
627: Must use gradient direction in this case */
628: VecCopy(tao->gradient, nlsP->D);
629: ++nlsP->grad;
630: stepType = NLS_GRADIENT;
631: } else {
632: /* Attempt to use the BFGS direction */
633: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
634: /* Check for success (descent direction) */
635: VecDot(tao->solution, nlsP->D, &gdx);
636: if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
637: /* BFGS direction is not descent or direction produced not a number
638: We can assert bfgsUpdates > 1 in this case
639: Use steepest descent direction (scaled) */
641: if (f != 0.0) {
642: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
643: } else {
644: delta = 2.0 / (gnorm*gnorm);
645: }
646: MatLMVMSetDelta(nlsP->M, delta);
647: MatLMVMReset(nlsP->M);
648: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
649: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
651: bfgsUpdates = 1;
652: ++nlsP->sgrad;
653: stepType = NLS_SCALED_GRADIENT;
654: } else {
655: if (1 == bfgsUpdates) {
656: /* The first BFGS direction is always the scaled gradient */
657: ++nlsP->sgrad;
658: stepType = NLS_SCALED_GRADIENT;
659: } else {
660: ++nlsP->bfgs;
661: stepType = NLS_BFGS;
662: }
663: }
664: }
665: break;
667: case NLS_BFGS:
668: /* Can only enter if pc_type == NLS_PC_BFGS
669: Failed to obtain acceptable iterate with BFGS step
670: Attempt to use the scaled gradient direction */
672: if (f != 0.0) {
673: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
674: } else {
675: delta = 2.0 / (gnorm*gnorm);
676: }
677: MatLMVMSetDelta(nlsP->M, delta);
678: MatLMVMReset(nlsP->M);
679: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
680: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
682: bfgsUpdates = 1;
683: ++nlsP->sgrad;
684: stepType = NLS_SCALED_GRADIENT;
685: break;
687: case NLS_SCALED_GRADIENT:
688: /* Can only enter if pc_type == NLS_PC_BFGS
689: The scaled gradient step did not produce a new iterate;
690: attemp to use the gradient direction.
691: Need to make sure we are not using a different diagonal scaling */
693: MatLMVMSetScale(nlsP->M,0);
694: MatLMVMSetDelta(nlsP->M,1.0);
695: MatLMVMReset(nlsP->M);
696: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
697: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
699: bfgsUpdates = 1;
700: ++nlsP->grad;
701: stepType = NLS_GRADIENT;
702: break;
703: }
704: VecScale(nlsP->D, -1.0);
706: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
707: TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
708: TaoAddLineSearchCounts(tao);
709: }
711: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
712: /* Failed to find an improving point */
713: f = fold;
714: VecCopy(nlsP->Xold, tao->solution);
715: VecCopy(nlsP->Gold, tao->gradient);
716: step = 0.0;
717: reason = TAO_DIVERGED_LS_FAILURE;
718: tao->reason = TAO_DIVERGED_LS_FAILURE;
719: break;
720: }
722: /* Update trust region radius */
723: if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
724: switch(nlsP->update_type) {
725: case NLS_UPDATE_STEP:
726: if (stepType == NLS_NEWTON) {
727: if (step < nlsP->nu1) {
728: /* Very bad step taken; reduce radius */
729: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
730: } else if (step < nlsP->nu2) {
731: /* Reasonably bad step taken; reduce radius */
732: tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
733: } else if (step < nlsP->nu3) {
734: /* Reasonable step was taken; leave radius alone */
735: if (nlsP->omega3 < 1.0) {
736: tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
737: } else if (nlsP->omega3 > 1.0) {
738: tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
739: }
740: } else if (step < nlsP->nu4) {
741: /* Full step taken; increase the radius */
742: tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
743: } else {
744: /* More than full step taken; increase the radius */
745: tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
746: }
747: } else {
748: /* Newton step was not good; reduce the radius */
749: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
750: }
751: break;
753: case NLS_UPDATE_REDUCTION:
754: if (stepType == NLS_NEWTON) {
755: /* Get predicted reduction */
757: if (NLS_KSP_STCG == nlsP->ksp_type) {
758: KSPSTCGGetObjFcn(tao->ksp,&prered);
759: } else if (NLS_KSP_NASH == nlsP->ksp_type) {
760: KSPNASHGetObjFcn(tao->ksp,&prered);
761: } else {
762: KSPGLTRGetObjFcn(tao->ksp,&prered);
763: }
765: if (prered >= 0.0) {
766: /* The predicted reduction has the wrong sign. This cannot */
767: /* happen in infinite precision arithmetic. Step should */
768: /* be rejected! */
769: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
770: } else {
771: if (PetscIsInfOrNanReal(f_full)) {
772: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
773: } else {
774: /* Compute and actual reduction */
775: actred = fold - f_full;
776: prered = -prered;
777: if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
778: (PetscAbsScalar(prered) <= nlsP->epsilon)) {
779: kappa = 1.0;
780: } else {
781: kappa = actred / prered;
782: }
784: /* Accept of reject the step and update radius */
785: if (kappa < nlsP->eta1) {
786: /* Very bad step */
787: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
788: } else if (kappa < nlsP->eta2) {
789: /* Marginal bad step */
790: tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
791: } else if (kappa < nlsP->eta3) {
792: /* Reasonable step */
793: if (nlsP->alpha3 < 1.0) {
794: tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
795: } else if (nlsP->alpha3 > 1.0) {
796: tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
797: }
798: } else if (kappa < nlsP->eta4) {
799: /* Good step */
800: tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
801: } else {
802: /* Very good step */
803: tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
804: }
805: }
806: }
807: } else {
808: /* Newton step was not good; reduce the radius */
809: tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
810: }
811: break;
813: default:
814: if (stepType == NLS_NEWTON) {
816: if (NLS_KSP_STCG == nlsP->ksp_type) {
817: KSPSTCGGetObjFcn(tao->ksp,&prered);
818: } else if (NLS_KSP_NASH == nlsP->ksp_type) {
819: KSPNASHGetObjFcn(tao->ksp,&prered);
820: } else {
821: KSPGLTRGetObjFcn(tao->ksp,&prered);
822: }
823: if (prered >= 0.0) {
824: /* The predicted reduction has the wrong sign. This cannot */
825: /* happen in infinite precision arithmetic. Step should */
826: /* be rejected! */
827: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
828: } else {
829: if (PetscIsInfOrNanReal(f_full)) {
830: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
831: } else {
832: actred = fold - f_full;
833: prered = -prered;
834: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
835: kappa = 1.0;
836: } else {
837: kappa = actred / prered;
838: }
840: tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
841: tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
842: tau_min = PetscMin(tau_1, tau_2);
843: tau_max = PetscMax(tau_1, tau_2);
845: if (kappa >= 1.0 - nlsP->mu1) {
846: /* Great agreement */
847: if (tau_max < 1.0) {
848: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
849: } else if (tau_max > nlsP->gamma4) {
850: tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
851: } else {
852: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
853: }
854: } else if (kappa >= 1.0 - nlsP->mu2) {
855: /* Good agreement */
857: if (tau_max < nlsP->gamma2) {
858: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
859: } else if (tau_max > nlsP->gamma3) {
860: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
861: } else if (tau_max < 1.0) {
862: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
863: } else {
864: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
865: }
866: } else {
867: /* Not good agreement */
868: if (tau_min > 1.0) {
869: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
870: } else if (tau_max < nlsP->gamma1) {
871: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
872: } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
873: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
874: } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
875: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
876: } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
877: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
878: } else {
879: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
880: }
881: }
882: }
883: }
884: } else {
885: /* Newton step was not good; reduce the radius */
886: tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
887: }
888: break;
889: }
891: /* The radius may have been increased; modify if it is too large */
892: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
893: }
895: /* Check for termination */
896: VecNorm(tao->gradient, NORM_2, &gnorm);
897: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
898: needH = 1;
899: TaoMonitor(tao, iter, f, gnorm, 0.0, step, &reason);
900: }
901: return(0);
902: }
904: /* ---------------------------------------------------------- */
907: static PetscErrorCode TaoSetUp_NLS(Tao tao)
908: {
909: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
913: if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
914: if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
915: if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
916: if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
917: if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
918: if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
919: nlsP->Diag = 0;
920: nlsP->M = 0;
921: return(0);
922: }
924: /*------------------------------------------------------------*/
927: static PetscErrorCode TaoDestroy_NLS(Tao tao)
928: {
929: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
933: if (tao->setupcalled) {
934: VecDestroy(&nlsP->D);
935: VecDestroy(&nlsP->W);
936: VecDestroy(&nlsP->Xold);
937: VecDestroy(&nlsP->Gold);
938: }
939: VecDestroy(&nlsP->Diag);
940: MatDestroy(&nlsP->M);
941: PetscFree(tao->data);
942: return(0);
943: }
945: /*------------------------------------------------------------*/
948: static PetscErrorCode TaoSetFromOptions_NLS(Tao tao)
949: {
950: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
954: PetscOptionsHead("Newton line search method for unconstrained optimization");
955: PetscOptionsEList("-tao_nls_ksp_type", "ksp type", "", NLS_KSP, NLS_KSP_TYPES, NLS_KSP[nlsP->ksp_type], &nlsP->ksp_type, 0);
956: PetscOptionsEList("-tao_nls_pc_type", "pc type", "", NLS_PC, NLS_PC_TYPES, NLS_PC[nlsP->pc_type], &nlsP->pc_type, 0);
957: PetscOptionsEList("-tao_nls_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[nlsP->bfgs_scale_type], &nlsP->bfgs_scale_type, 0);
958: PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
959: PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
960: PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval, 0);
961: PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin, 0);
962: PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax, 0);
963: PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac, 0);
964: PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin, 0);
965: PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax, 0);
966: PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac, 0);
967: PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac, 0);
968: PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac, 0);
969: PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac, 0);
970: PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1, 0);
971: PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2, 0);
972: PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3, 0);
973: PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4, 0);
974: PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1, 0);
975: PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2, 0);
976: PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3, 0);
977: PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4, 0);
978: PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5, 0);
979: PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1, 0);
980: PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2, 0);
981: PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3, 0);
982: PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4, 0);
983: PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1, 0);
984: PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2, 0);
985: PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3, 0);
986: PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4, 0);
987: PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5, 0);
988: PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i, 0);
989: PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i, 0);
990: PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i, 0);
991: PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i, 0);
992: PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i, 0);
993: PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i, 0);
994: PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i, 0);
995: PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1, 0);
996: PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2, 0);
997: PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1, 0);
998: PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2, 0);
999: PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3, 0);
1000: PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4, 0);
1001: PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta, 0);
1002: PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius, 0);
1003: PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius, 0);
1004: PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon, 0);
1005: PetscOptionsTail();
1006: TaoLineSearchSetFromOptions(tao->linesearch);
1007: KSPSetFromOptions(tao->ksp);
1008: return(0);
1009: }
1012: /*------------------------------------------------------------*/
1015: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
1016: {
1017: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
1018: PetscInt nrejects;
1019: PetscBool isascii;
1023: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
1024: if (isascii) {
1025: PetscViewerASCIIPushTab(viewer);
1026: if (NLS_PC_BFGS == nlsP->pc_type && nlsP->M) {
1027: MatLMVMGetRejects(nlsP->M,&nrejects);
1028: PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n",nrejects);
1029: }
1030: PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
1031: PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
1032: PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", nlsP->sgrad);
1033: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);
1035: PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
1036: PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
1037: PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
1038: PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
1039: PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
1040: PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
1041: PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
1042: PetscViewerASCIIPopTab(viewer);
1043: }
1044: return(0);
1045: }
1047: /* ---------------------------------------------------------- */
1048: /*MC
1049: TAONLS - Newton's method with linesearch for unconstrained minimization.
1050: At each iteration, the Newton line search method solves the symmetric
1051: system of equations to obtain the step diretion dk:
1052: Hk dk = -gk
1053: a More-Thuente line search is applied on the direction dk to approximately
1054: solve
1055: min_t f(xk + t d_k)
1057: Options Database Keys:
1058: + -tao_nls_ksp_type - "cg","nash","stcg","gltr","petsc"
1059: . -tao_nls_pc_type - "none","ahess","bfgs","petsc"
1060: . -tao_nls_bfgs_scale_type - "ahess","phess","bfgs"
1061: . -tao_nls_init_type - "constant","direction","interpolation"
1062: . -tao_nls_update_type - "step","direction","interpolation"
1063: . -tao_nls_sval - perturbation starting value
1064: . -tao_nls_imin - minimum initial perturbation
1065: . -tao_nls_imax - maximum initial perturbation
1066: . -tao_nls_pmin - minimum perturbation
1067: . -tao_nls_pmax - maximum perturbation
1068: . -tao_nls_pgfac - growth factor
1069: . -tao_nls_psfac - shrink factor
1070: . -tao_nls_imfac - initial merit factor
1071: . -tao_nls_pmgfac - merit growth factor
1072: . -tao_nls_pmsfac - merit shrink factor
1073: . -tao_nls_eta1 - poor steplength; reduce radius
1074: . -tao_nls_eta2 - reasonable steplength; leave radius
1075: . -tao_nls_eta3 - good steplength; increase readius
1076: . -tao_nls_eta4 - excellent steplength; greatly increase radius
1077: . -tao_nls_alpha1 - alpha1 reduction
1078: . -tao_nls_alpha2 - alpha2 reduction
1079: . -tao_nls_alpha3 - alpha3 reduction
1080: . -tao_nls_alpha4 - alpha4 reduction
1081: . -tao_nls_alpha - alpha5 reduction
1082: . -tao_nls_mu1 - mu1 interpolation update
1083: . -tao_nls_mu2 - mu2 interpolation update
1084: . -tao_nls_gamma1 - gamma1 interpolation update
1085: . -tao_nls_gamma2 - gamma2 interpolation update
1086: . -tao_nls_gamma3 - gamma3 interpolation update
1087: . -tao_nls_gamma4 - gamma4 interpolation update
1088: . -tao_nls_theta - theta interpolation update
1089: . -tao_nls_omega1 - omega1 step update
1090: . -tao_nls_omega2 - omega2 step update
1091: . -tao_nls_omega3 - omega3 step update
1092: . -tao_nls_omega4 - omega4 step update
1093: . -tao_nls_omega5 - omega5 step update
1094: . -tao_nls_mu1_i - mu1 interpolation init factor
1095: . -tao_nls_mu2_i - mu2 interpolation init factor
1096: . -tao_nls_gamma1_i - gamma1 interpolation init factor
1097: . -tao_nls_gamma2_i - gamma2 interpolation init factor
1098: . -tao_nls_gamma3_i - gamma3 interpolation init factor
1099: . -tao_nls_gamma4_i - gamma4 interpolation init factor
1100: - -tao_nls_theta_i - theta interpolation init factor
1102: Level: beginner
1103: M*/
1105: EXTERN_C_BEGIN
1108: PetscErrorCode TaoCreate_NLS(Tao tao)
1109: {
1110: TAO_NLS *nlsP;
1111: const char *morethuente_type = TAOLINESEARCHMT;
1115: PetscNewLog(tao,&nlsP);
1117: tao->ops->setup = TaoSetUp_NLS;
1118: tao->ops->solve = TaoSolve_NLS;
1119: tao->ops->view = TaoView_NLS;
1120: tao->ops->setfromoptions = TaoSetFromOptions_NLS;
1121: tao->ops->destroy = TaoDestroy_NLS;
1123: tao->max_it = 50;
1124: #if defined(PETSC_USE_REAL_SINGLE)
1125: tao->fatol = 1e-5;
1126: tao->frtol = 1e-5;
1127: #else
1128: tao->fatol = 1e-10;
1129: tao->frtol = 1e-10;
1130: #endif
1131: tao->data = (void*)nlsP;
1132: tao->trust0 = 100.0;
1134: nlsP->sval = 0.0;
1135: nlsP->imin = 1.0e-4;
1136: nlsP->imax = 1.0e+2;
1137: nlsP->imfac = 1.0e-1;
1139: nlsP->pmin = 1.0e-12;
1140: nlsP->pmax = 1.0e+2;
1141: nlsP->pgfac = 1.0e+1;
1142: nlsP->psfac = 4.0e-1;
1143: nlsP->pmgfac = 1.0e-1;
1144: nlsP->pmsfac = 1.0e-1;
1146: /* Default values for trust-region radius update based on steplength */
1147: nlsP->nu1 = 0.25;
1148: nlsP->nu2 = 0.50;
1149: nlsP->nu3 = 1.00;
1150: nlsP->nu4 = 1.25;
1152: nlsP->omega1 = 0.25;
1153: nlsP->omega2 = 0.50;
1154: nlsP->omega3 = 1.00;
1155: nlsP->omega4 = 2.00;
1156: nlsP->omega5 = 4.00;
1158: /* Default values for trust-region radius update based on reduction */
1159: nlsP->eta1 = 1.0e-4;
1160: nlsP->eta2 = 0.25;
1161: nlsP->eta3 = 0.50;
1162: nlsP->eta4 = 0.90;
1164: nlsP->alpha1 = 0.25;
1165: nlsP->alpha2 = 0.50;
1166: nlsP->alpha3 = 1.00;
1167: nlsP->alpha4 = 2.00;
1168: nlsP->alpha5 = 4.00;
1170: /* Default values for trust-region radius update based on interpolation */
1171: nlsP->mu1 = 0.10;
1172: nlsP->mu2 = 0.50;
1174: nlsP->gamma1 = 0.25;
1175: nlsP->gamma2 = 0.50;
1176: nlsP->gamma3 = 2.00;
1177: nlsP->gamma4 = 4.00;
1179: nlsP->theta = 0.05;
1181: /* Default values for trust region initialization based on interpolation */
1182: nlsP->mu1_i = 0.35;
1183: nlsP->mu2_i = 0.50;
1185: nlsP->gamma1_i = 0.0625;
1186: nlsP->gamma2_i = 0.5;
1187: nlsP->gamma3_i = 2.0;
1188: nlsP->gamma4_i = 5.0;
1190: nlsP->theta_i = 0.25;
1192: /* Remaining parameters */
1193: nlsP->min_radius = 1.0e-10;
1194: nlsP->max_radius = 1.0e10;
1195: nlsP->epsilon = 1.0e-6;
1197: nlsP->ksp_type = NLS_KSP_STCG;
1198: nlsP->pc_type = NLS_PC_BFGS;
1199: nlsP->bfgs_scale_type = BFGS_SCALE_PHESS;
1200: nlsP->init_type = NLS_INIT_INTERPOLATION;
1201: nlsP->update_type = NLS_UPDATE_STEP;
1203: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
1204: TaoLineSearchSetType(tao->linesearch,morethuente_type);
1205: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
1207: /* Set linear solver to default for symmetric matrices */
1208: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
1209: return(0);
1210: }
1211: EXTERN_C_END
1215: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
1216: {
1218: Mat M;
1224: PCShellGetContext(pc,(void**)&M);
1225: MatLMVMSolve(M, b, x);
1226: return(0);
1227: }