Actual source code: nls.c
petsc-3.9.4 2018-09-11
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/matrix/lmvmmat.h>
3: #include <../src/tao/unconstrained/impls/nls/nlsimpl.h>
5: #include <petscksp.h>
7: #define NLS_PC_NONE 0
8: #define NLS_PC_AHESS 1
9: #define NLS_PC_BFGS 2
10: #define NLS_PC_PETSC 3
11: #define NLS_PC_TYPES 4
13: #define BFGS_SCALE_AHESS 0
14: #define BFGS_SCALE_PHESS 1
15: #define BFGS_SCALE_BFGS 2
16: #define BFGS_SCALE_TYPES 3
18: #define NLS_INIT_CONSTANT 0
19: #define NLS_INIT_DIRECTION 1
20: #define NLS_INIT_INTERPOLATION 2
21: #define NLS_INIT_TYPES 3
23: #define NLS_UPDATE_STEP 0
24: #define NLS_UPDATE_REDUCTION 1
25: #define NLS_UPDATE_INTERPOLATION 2
26: #define NLS_UPDATE_TYPES 3
28: static const char *NLS_PC[64] = {"none", "ahess", "bfgs", "petsc"};
30: static const char *BFGS_SCALE[64] = {"ahess", "phess", "bfgs"};
32: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};
34: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};
36: /* Routine for BFGS preconditioner */
38: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
39: {
41: Mat M;
47: PCShellGetContext(pc,(void**)&M);
48: MatLMVMSolve(M, b, x);
49: return(0);
50: }
52: /*
53: Implements Newton's Method with a line search approach for solving
54: unconstrained minimization problems. A More'-Thuente line search
55: is used to guarantee that the bfgs preconditioner remains positive
56: definite.
58: The method can shift the Hessian matrix. The shifting procedure is
59: adapted from the PATH algorithm for solving complementarity
60: problems.
62: The linear system solve should be done with a conjugate gradient
63: method, although any method can be used.
64: */
66: #define NLS_NEWTON 0
67: #define NLS_BFGS 1
68: #define NLS_SCALED_GRADIENT 2
69: #define NLS_GRADIENT 3
71: static PetscErrorCode TaoSolve_NLS(Tao tao)
72: {
73: PetscErrorCode ierr;
74: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
75: KSPType ksp_type;
76: PetscBool is_nash,is_stcg,is_gltr;
77: KSPConvergedReason ksp_reason;
78: PC pc;
79: TaoLineSearchConvergedReason ls_reason;
81: PetscReal fmin, ftrial, f_full, prered, actred, kappa, sigma;
82: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
83: PetscReal f, fold, gdx, gnorm, pert;
84: PetscReal step = 1.0;
85: PetscReal delta;
86: PetscReal norm_d = 0.0, e_min;
88: PetscInt stepType;
89: PetscInt bfgsUpdates = 0;
90: PetscInt n,N,kspits;
91: PetscInt needH = 1;
93: PetscInt i_max = 5;
94: PetscInt j_max = 1;
95: PetscInt i, j;
98: if (tao->XL || tao->XU || tao->ops->computebounds) {
99: PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
100: }
102: /* Initialized variables */
103: pert = nlsP->sval;
105: /* Number of times ksp stopped because of these reasons */
106: nlsP->ksp_atol = 0;
107: nlsP->ksp_rtol = 0;
108: nlsP->ksp_dtol = 0;
109: nlsP->ksp_ctol = 0;
110: nlsP->ksp_negc = 0;
111: nlsP->ksp_iter = 0;
112: nlsP->ksp_othr = 0;
114: /* Initialize trust-region radius when using nash, stcg, or gltr
115: Command automatically ignored for other methods
116: Will be reset during the first iteration
117: */
118: KSPGetType(tao->ksp,&ksp_type);
119: PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);
120: PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);
121: PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);
123: KSPCGSetRadius(tao->ksp,nlsP->max_radius);
125: if (is_nash || is_stcg || is_gltr) {
126: if (tao->trust0 < 0.0) SETERRQ(PETSC_COMM_SELF,1,"Initial radius negative");
127: tao->trust = tao->trust0;
128: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
129: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
130: }
132: /* Get vectors we will need */
133: if (NLS_PC_BFGS == nlsP->pc_type && !nlsP->M) {
134: VecGetLocalSize(tao->solution,&n);
135: VecGetSize(tao->solution,&N);
136: MatCreateLMVM(((PetscObject)tao)->comm,n,N,&nlsP->M);
137: MatLMVMAllocateVectors(nlsP->M,tao->solution);
138: }
140: /* Check convergence criteria */
141: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
142: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
143: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
144:
145: tao->reason = TAO_CONTINUE_ITERATING;
146: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
147: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
148: (*tao->ops->convergencetest)(tao,tao->cnvP);
149: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
151: /* create vectors for the limited memory preconditioner */
152: if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_BFGS != nlsP->bfgs_scale_type)) {
153: if (!nlsP->Diag) {
154: VecDuplicate(tao->solution,&nlsP->Diag);
155: }
156: }
158: /* Modify the preconditioner to use the bfgs approximation */
159: KSPGetPC(tao->ksp, &pc);
160: switch(nlsP->pc_type) {
161: case NLS_PC_NONE:
162: PCSetType(pc, PCNONE);
163: PCSetFromOptions(pc);
164: break;
166: case NLS_PC_AHESS:
167: PCSetType(pc, PCJACOBI);
168: PCSetFromOptions(pc);
169: PCJacobiSetUseAbs(pc,PETSC_TRUE);
170: break;
172: case NLS_PC_BFGS:
173: PCSetType(pc, PCSHELL);
174: PCSetFromOptions(pc);
175: PCShellSetName(pc, "bfgs");
176: PCShellSetContext(pc, nlsP->M);
177: PCShellSetApply(pc, MatLMVMSolveShell);
178: break;
180: default:
181: /* Use the pc method set by pc_type */
182: break;
183: }
185: /* Initialize trust-region radius. The initialization is only performed
186: when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
187: if (is_nash || is_stcg || is_gltr) {
188: switch(nlsP->init_type) {
189: case NLS_INIT_CONSTANT:
190: /* Use the initial radius specified */
191: break;
193: case NLS_INIT_INTERPOLATION:
194: /* Use the initial radius specified */
195: max_radius = 0.0;
197: for (j = 0; j < j_max; ++j) {
198: fmin = f;
199: sigma = 0.0;
201: if (needH) {
202: TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
203: needH = 0;
204: }
206: for (i = 0; i < i_max; ++i) {
207: VecCopy(tao->solution,nlsP->W);
208: VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
209: TaoComputeObjective(tao, nlsP->W, &ftrial);
210: if (PetscIsInfOrNanReal(ftrial)) {
211: tau = nlsP->gamma1_i;
212: } else {
213: if (ftrial < fmin) {
214: fmin = ftrial;
215: sigma = -tao->trust / gnorm;
216: }
218: MatMult(tao->hessian, tao->gradient, nlsP->D);
219: VecDot(tao->gradient, nlsP->D, &prered);
221: prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
222: actred = f - ftrial;
223: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
224: kappa = 1.0;
225: } else {
226: kappa = actred / prered;
227: }
229: tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
230: tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
231: tau_min = PetscMin(tau_1, tau_2);
232: tau_max = PetscMax(tau_1, tau_2);
234: if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu1_i) {
235: /* Great agreement */
236: max_radius = PetscMax(max_radius, tao->trust);
238: if (tau_max < 1.0) {
239: tau = nlsP->gamma3_i;
240: } else if (tau_max > nlsP->gamma4_i) {
241: tau = nlsP->gamma4_i;
242: } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
243: tau = tau_1;
244: } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
245: tau = tau_2;
246: } else {
247: tau = tau_max;
248: }
249: } else if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu2_i) {
250: /* Good agreement */
251: max_radius = PetscMax(max_radius, tao->trust);
253: if (tau_max < nlsP->gamma2_i) {
254: tau = nlsP->gamma2_i;
255: } else if (tau_max > nlsP->gamma3_i) {
256: tau = nlsP->gamma3_i;
257: } else {
258: tau = tau_max;
259: }
260: } else {
261: /* Not good agreement */
262: if (tau_min > 1.0) {
263: tau = nlsP->gamma2_i;
264: } else if (tau_max < nlsP->gamma1_i) {
265: tau = nlsP->gamma1_i;
266: } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
267: tau = nlsP->gamma1_i;
268: } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
269: tau = tau_1;
270: } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
271: tau = tau_2;
272: } else {
273: tau = tau_max;
274: }
275: }
276: }
277: tao->trust = tau * tao->trust;
278: }
280: if (fmin < f) {
281: f = fmin;
282: VecAXPY(tao->solution,sigma,tao->gradient);
283: TaoComputeGradient(tao,tao->solution,tao->gradient);
285: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
286: if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute gradient generated Inf or NaN");
287: needH = 1;
289: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
290: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
291: (*tao->ops->convergencetest)(tao,tao->cnvP);
292: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
293: }
294: }
295: tao->trust = PetscMax(tao->trust, max_radius);
297: /* Modify the radius if it is too large or small */
298: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
299: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
300: break;
302: default:
303: /* Norm of the first direction will initialize radius */
304: tao->trust = 0.0;
305: break;
306: }
307: }
309: /* Set initial scaling for the BFGS preconditioner
310: This step is done after computing the initial trust-region radius
311: since the function value may have decreased */
312: if (NLS_PC_BFGS == nlsP->pc_type) {
313: if (f != 0.0) {
314: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
315: } else {
316: delta = 2.0 / (gnorm*gnorm);
317: }
318: MatLMVMSetDelta(nlsP->M,delta);
319: }
321: /* Set counter for gradient/reset steps*/
322: nlsP->newt = 0;
323: nlsP->bfgs = 0;
324: nlsP->sgrad = 0;
325: nlsP->grad = 0;
327: /* Have not converged; continue with Newton method */
328: while (tao->reason == TAO_CONTINUE_ITERATING) {
329: ++tao->niter;
330: tao->ksp_its=0;
332: /* Compute the Hessian */
333: if (needH) {
334: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
335: }
337: if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_AHESS == nlsP->bfgs_scale_type)) {
338: /* Obtain diagonal for the bfgs preconditioner */
339: MatGetDiagonal(tao->hessian, nlsP->Diag);
340: VecAbs(nlsP->Diag);
341: VecReciprocal(nlsP->Diag);
342: MatLMVMSetScale(nlsP->M,nlsP->Diag);
343: }
345: /* Shift the Hessian matrix */
346: if (pert > 0) {
347: MatShift(tao->hessian, pert);
348: if (tao->hessian != tao->hessian_pre) {
349: MatShift(tao->hessian_pre, pert);
350: }
351: }
353: if (NLS_PC_BFGS == nlsP->pc_type) {
354: if (BFGS_SCALE_PHESS == nlsP->bfgs_scale_type) {
355: /* Obtain diagonal for the bfgs preconditioner */
356: MatGetDiagonal(tao->hessian, nlsP->Diag);
357: VecAbs(nlsP->Diag);
358: VecReciprocal(nlsP->Diag);
359: MatLMVMSetScale(nlsP->M,nlsP->Diag);
360: }
361: /* Update the limited memory preconditioner */
362: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
363: ++bfgsUpdates;
364: }
366: /* Solve the Newton system of equations */
367: KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
368: if (is_nash || is_stcg || is_gltr) {
369: KSPCGSetRadius(tao->ksp,nlsP->max_radius);
370: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
371: KSPGetIterationNumber(tao->ksp,&kspits);
372: tao->ksp_its+=kspits;
373: tao->ksp_tot_its+=kspits;
374: KSPCGGetNormD(tao->ksp,&norm_d);
376: if (0.0 == tao->trust) {
377: /* Radius was uninitialized; use the norm of the direction */
378: if (norm_d > 0.0) {
379: tao->trust = norm_d;
381: /* Modify the radius if it is too large or small */
382: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
383: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
384: } else {
385: /* The direction was bad; set radius to default value and re-solve
386: the trust-region subproblem to get a direction */
387: tao->trust = tao->trust0;
389: /* Modify the radius if it is too large or small */
390: tao->trust = PetscMax(tao->trust, nlsP->min_radius);
391: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
393: KSPCGSetRadius(tao->ksp,nlsP->max_radius);
394: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
395: KSPGetIterationNumber(tao->ksp,&kspits);
396: tao->ksp_its+=kspits;
397: tao->ksp_tot_its+=kspits;
398: KSPCGGetNormD(tao->ksp,&norm_d);
400: if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
401: }
402: }
403: } else {
404: KSPSolve(tao->ksp, tao->gradient, nlsP->D);
405: KSPGetIterationNumber(tao->ksp, &kspits);
406: tao->ksp_its += kspits;
407: tao->ksp_tot_its+=kspits;
408: }
409: VecScale(nlsP->D, -1.0);
410: KSPGetConvergedReason(tao->ksp, &ksp_reason);
411: if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NLS_PC_BFGS == nlsP->pc_type) && (bfgsUpdates > 1)) {
412: /* Preconditioner is numerically indefinite; reset the
413: approximate if using BFGS preconditioning. */
415: if (f != 0.0) {
416: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
417: } else {
418: delta = 2.0 / (gnorm*gnorm);
419: }
420: MatLMVMSetDelta(nlsP->M,delta);
421: MatLMVMReset(nlsP->M);
422: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
423: bfgsUpdates = 1;
424: }
426: if (KSP_CONVERGED_ATOL == ksp_reason) {
427: ++nlsP->ksp_atol;
428: } else if (KSP_CONVERGED_RTOL == ksp_reason) {
429: ++nlsP->ksp_rtol;
430: } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
431: ++nlsP->ksp_ctol;
432: } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
433: ++nlsP->ksp_negc;
434: } else if (KSP_DIVERGED_DTOL == ksp_reason) {
435: ++nlsP->ksp_dtol;
436: } else if (KSP_DIVERGED_ITS == ksp_reason) {
437: ++nlsP->ksp_iter;
438: } else {
439: ++nlsP->ksp_othr;
440: }
442: /* Check for success (descent direction) */
443: VecDot(nlsP->D, tao->gradient, &gdx);
444: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
445: /* Newton step is not descent or direction produced Inf or NaN
446: Update the perturbation for next time */
447: if (pert <= 0.0) {
448: /* Initialize the perturbation */
449: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
450: if (is_gltr) {
451: KSPCGGLTRGetMinEig(tao->ksp,&e_min);
452: pert = PetscMax(pert, -e_min);
453: }
454: } else {
455: /* Increase the perturbation */
456: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
457: }
459: if (NLS_PC_BFGS != nlsP->pc_type) {
460: /* We don't have the bfgs matrix around and updated
461: Must use gradient direction in this case */
462: VecCopy(tao->gradient, nlsP->D);
463: VecScale(nlsP->D, -1.0);
464: ++nlsP->grad;
465: stepType = NLS_GRADIENT;
466: } else {
467: /* Attempt to use the BFGS direction */
468: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
469: VecScale(nlsP->D, -1.0);
471: /* Check for success (descent direction) */
472: VecDot(tao->gradient, nlsP->D, &gdx);
473: if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
474: /* BFGS direction is not descent or direction produced not a number
475: We can assert bfgsUpdates > 1 in this case because
476: the first solve produces the scaled gradient direction,
477: which is guaranteed to be descent */
479: /* Use steepest descent direction (scaled) */
481: if (f != 0.0) {
482: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
483: } else {
484: delta = 2.0 / (gnorm*gnorm);
485: }
486: MatLMVMSetDelta(nlsP->M, delta);
487: MatLMVMReset(nlsP->M);
488: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
489: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
490: VecScale(nlsP->D, -1.0);
492: bfgsUpdates = 1;
493: ++nlsP->sgrad;
494: stepType = NLS_SCALED_GRADIENT;
495: } else {
496: if (1 == bfgsUpdates) {
497: /* The first BFGS direction is always the scaled gradient */
498: ++nlsP->sgrad;
499: stepType = NLS_SCALED_GRADIENT;
500: } else {
501: ++nlsP->bfgs;
502: stepType = NLS_BFGS;
503: }
504: }
505: }
506: } else {
507: /* Computed Newton step is descent */
508: switch (ksp_reason) {
509: case KSP_DIVERGED_NANORINF:
510: case KSP_DIVERGED_BREAKDOWN:
511: case KSP_DIVERGED_INDEFINITE_MAT:
512: case KSP_DIVERGED_INDEFINITE_PC:
513: case KSP_CONVERGED_CG_NEG_CURVE:
514: /* Matrix or preconditioner is indefinite; increase perturbation */
515: if (pert <= 0.0) {
516: /* Initialize the perturbation */
517: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
518: if (is_gltr) {
519: KSPCGGLTRGetMinEig(tao->ksp, &e_min);
520: pert = PetscMax(pert, -e_min);
521: }
522: } else {
523: /* Increase the perturbation */
524: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
525: }
526: break;
528: default:
529: /* Newton step computation is good; decrease perturbation */
530: pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
531: if (pert < nlsP->pmin) {
532: pert = 0.0;
533: }
534: break;
535: }
537: ++nlsP->newt;
538: stepType = NLS_NEWTON;
539: }
541: /* Perform the linesearch */
542: fold = f;
543: VecCopy(tao->solution, nlsP->Xold);
544: VecCopy(tao->gradient, nlsP->Gold);
546: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
547: TaoAddLineSearchCounts(tao);
549: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) { /* Linesearch failed */
550: f = fold;
551: VecCopy(nlsP->Xold, tao->solution);
552: VecCopy(nlsP->Gold, tao->gradient);
554: switch(stepType) {
555: case NLS_NEWTON:
556: /* Failed to obtain acceptable iterate with Newton 1step
557: Update the perturbation for next time */
558: if (pert <= 0.0) {
559: /* Initialize the perturbation */
560: pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
561: if (is_gltr) {
562: KSPCGGLTRGetMinEig(tao->ksp,&e_min);
563: pert = PetscMax(pert, -e_min);
564: }
565: } else {
566: /* Increase the perturbation */
567: pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
568: }
570: if (NLS_PC_BFGS != nlsP->pc_type) {
571: /* We don't have the bfgs matrix around and being updated
572: Must use gradient direction in this case */
573: VecCopy(tao->gradient, nlsP->D);
574: ++nlsP->grad;
575: stepType = NLS_GRADIENT;
576: } else {
577: /* Attempt to use the BFGS direction */
578: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
579: /* Check for success (descent direction) */
580: VecDot(tao->solution, nlsP->D, &gdx);
581: if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
582: /* BFGS direction is not descent or direction produced not a number
583: We can assert bfgsUpdates > 1 in this case
584: Use steepest descent direction (scaled) */
586: if (f != 0.0) {
587: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
588: } else {
589: delta = 2.0 / (gnorm*gnorm);
590: }
591: MatLMVMSetDelta(nlsP->M, delta);
592: MatLMVMReset(nlsP->M);
593: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
594: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
596: bfgsUpdates = 1;
597: ++nlsP->sgrad;
598: stepType = NLS_SCALED_GRADIENT;
599: } else {
600: if (1 == bfgsUpdates) {
601: /* The first BFGS direction is always the scaled gradient */
602: ++nlsP->sgrad;
603: stepType = NLS_SCALED_GRADIENT;
604: } else {
605: ++nlsP->bfgs;
606: stepType = NLS_BFGS;
607: }
608: }
609: }
610: break;
612: case NLS_BFGS:
613: /* Can only enter if pc_type == NLS_PC_BFGS
614: Failed to obtain acceptable iterate with BFGS step
615: Attempt to use the scaled gradient direction */
617: if (f != 0.0) {
618: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
619: } else {
620: delta = 2.0 / (gnorm*gnorm);
621: }
622: MatLMVMSetDelta(nlsP->M, delta);
623: MatLMVMReset(nlsP->M);
624: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
625: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
627: bfgsUpdates = 1;
628: ++nlsP->sgrad;
629: stepType = NLS_SCALED_GRADIENT;
630: break;
632: case NLS_SCALED_GRADIENT:
633: /* Can only enter if pc_type == NLS_PC_BFGS
634: The scaled gradient step did not produce a new iterate;
635: attemp to use the gradient direction.
636: Need to make sure we are not using a different diagonal scaling */
638: MatLMVMSetScale(nlsP->M,0);
639: MatLMVMSetDelta(nlsP->M,1.0);
640: MatLMVMReset(nlsP->M);
641: MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
642: MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
644: bfgsUpdates = 1;
645: ++nlsP->grad;
646: stepType = NLS_GRADIENT;
647: break;
648: }
649: VecScale(nlsP->D, -1.0);
651: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
652: TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
653: TaoAddLineSearchCounts(tao);
654: }
656: if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
657: /* Failed to find an improving point */
658: f = fold;
659: VecCopy(nlsP->Xold, tao->solution);
660: VecCopy(nlsP->Gold, tao->gradient);
661: step = 0.0;
662: tao->reason = TAO_DIVERGED_LS_FAILURE;
663: break;
664: }
666: /* Update trust region radius */
667: if (is_nash || is_stcg || is_gltr) {
668: switch(nlsP->update_type) {
669: case NLS_UPDATE_STEP:
670: if (stepType == NLS_NEWTON) {
671: if (step < nlsP->nu1) {
672: /* Very bad step taken; reduce radius */
673: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
674: } else if (step < nlsP->nu2) {
675: /* Reasonably bad step taken; reduce radius */
676: tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
677: } else if (step < nlsP->nu3) {
678: /* Reasonable step was taken; leave radius alone */
679: if (nlsP->omega3 < 1.0) {
680: tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
681: } else if (nlsP->omega3 > 1.0) {
682: tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
683: }
684: } else if (step < nlsP->nu4) {
685: /* Full step taken; increase the radius */
686: tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
687: } else {
688: /* More than full step taken; increase the radius */
689: tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
690: }
691: } else {
692: /* Newton step was not good; reduce the radius */
693: tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
694: }
695: break;
697: case NLS_UPDATE_REDUCTION:
698: if (stepType == NLS_NEWTON) {
699: /* Get predicted reduction */
701: KSPCGGetObjFcn(tao->ksp,&prered);
702: if (prered >= 0.0) {
703: /* The predicted reduction has the wrong sign. This cannot */
704: /* happen in infinite precision arithmetic. Step should */
705: /* be rejected! */
706: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
707: } else {
708: if (PetscIsInfOrNanReal(f_full)) {
709: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
710: } else {
711: /* Compute and actual reduction */
712: actred = fold - f_full;
713: prered = -prered;
714: if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
715: (PetscAbsScalar(prered) <= nlsP->epsilon)) {
716: kappa = 1.0;
717: } else {
718: kappa = actred / prered;
719: }
721: /* Accept of reject the step and update radius */
722: if (kappa < nlsP->eta1) {
723: /* Very bad step */
724: tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
725: } else if (kappa < nlsP->eta2) {
726: /* Marginal bad step */
727: tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
728: } else if (kappa < nlsP->eta3) {
729: /* Reasonable step */
730: if (nlsP->alpha3 < 1.0) {
731: tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
732: } else if (nlsP->alpha3 > 1.0) {
733: tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
734: }
735: } else if (kappa < nlsP->eta4) {
736: /* Good step */
737: tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
738: } else {
739: /* Very good step */
740: tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
741: }
742: }
743: }
744: } else {
745: /* Newton step was not good; reduce the radius */
746: tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
747: }
748: break;
750: default:
751: if (stepType == NLS_NEWTON) {
752: KSPCGGetObjFcn(tao->ksp,&prered);
753: if (prered >= 0.0) {
754: /* The predicted reduction has the wrong sign. This cannot */
755: /* happen in infinite precision arithmetic. Step should */
756: /* be rejected! */
757: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
758: } else {
759: if (PetscIsInfOrNanReal(f_full)) {
760: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
761: } else {
762: actred = fold - f_full;
763: prered = -prered;
764: if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
765: kappa = 1.0;
766: } else {
767: kappa = actred / prered;
768: }
770: tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
771: tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
772: tau_min = PetscMin(tau_1, tau_2);
773: tau_max = PetscMax(tau_1, tau_2);
775: if (kappa >= 1.0 - nlsP->mu1) {
776: /* Great agreement */
777: if (tau_max < 1.0) {
778: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
779: } else if (tau_max > nlsP->gamma4) {
780: tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
781: } else {
782: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
783: }
784: } else if (kappa >= 1.0 - nlsP->mu2) {
785: /* Good agreement */
787: if (tau_max < nlsP->gamma2) {
788: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
789: } else if (tau_max > nlsP->gamma3) {
790: tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
791: } else if (tau_max < 1.0) {
792: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
793: } else {
794: tao->trust = PetscMax(tao->trust, tau_max * norm_d);
795: }
796: } else {
797: /* Not good agreement */
798: if (tau_min > 1.0) {
799: tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
800: } else if (tau_max < nlsP->gamma1) {
801: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
802: } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
803: tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
804: } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
805: tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
806: } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
807: tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
808: } else {
809: tao->trust = tau_max * PetscMin(tao->trust, norm_d);
810: }
811: }
812: }
813: }
814: } else {
815: /* Newton step was not good; reduce the radius */
816: tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
817: }
818: break;
819: }
821: /* The radius may have been increased; modify if it is too large */
822: tao->trust = PetscMin(tao->trust, nlsP->max_radius);
823: }
825: /* Check for termination */
826: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
827: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
828: needH = 1;
829: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
830: TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
831: (*tao->ops->convergencetest)(tao,tao->cnvP);
832: }
833: return(0);
834: }
836: /* ---------------------------------------------------------- */
837: static PetscErrorCode TaoSetUp_NLS(Tao tao)
838: {
839: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
843: if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
844: if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
845: if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
846: if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
847: if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
848: if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
849: nlsP->Diag = 0;
850: nlsP->M = 0;
851: return(0);
852: }
854: /*------------------------------------------------------------*/
855: static PetscErrorCode TaoDestroy_NLS(Tao tao)
856: {
857: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
861: if (tao->setupcalled) {
862: VecDestroy(&nlsP->D);
863: VecDestroy(&nlsP->W);
864: VecDestroy(&nlsP->Xold);
865: VecDestroy(&nlsP->Gold);
866: }
867: VecDestroy(&nlsP->Diag);
868: MatDestroy(&nlsP->M);
869: PetscFree(tao->data);
870: return(0);
871: }
873: /*------------------------------------------------------------*/
874: static PetscErrorCode TaoSetFromOptions_NLS(PetscOptionItems *PetscOptionsObject,Tao tao)
875: {
876: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
880: PetscOptionsHead(PetscOptionsObject,"Newton line search method for unconstrained optimization");
881: PetscOptionsEList("-tao_nls_pc_type", "pc type", "", NLS_PC, NLS_PC_TYPES, NLS_PC[nlsP->pc_type], &nlsP->pc_type, 0);
882: PetscOptionsEList("-tao_nls_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[nlsP->bfgs_scale_type], &nlsP->bfgs_scale_type, 0);
883: PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
884: PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
885: PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval,NULL);
886: PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin,NULL);
887: PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax,NULL);
888: PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac,NULL);
889: PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin,NULL);
890: PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax,NULL);
891: PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac,NULL);
892: PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac,NULL);
893: PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac,NULL);
894: PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac,NULL);
895: PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1,NULL);
896: PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2,NULL);
897: PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3,NULL);
898: PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4,NULL);
899: PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1,NULL);
900: PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2,NULL);
901: PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3,NULL);
902: PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4,NULL);
903: PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5,NULL);
904: PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1,NULL);
905: PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2,NULL);
906: PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3,NULL);
907: PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4,NULL);
908: PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1,NULL);
909: PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2,NULL);
910: PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3,NULL);
911: PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4,NULL);
912: PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5,NULL);
913: PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i,NULL);
914: PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i,NULL);
915: PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i,NULL);
916: PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i,NULL);
917: PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i,NULL);
918: PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i,NULL);
919: PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i,NULL);
920: PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1,NULL);
921: PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2,NULL);
922: PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1,NULL);
923: PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2,NULL);
924: PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3,NULL);
925: PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4,NULL);
926: PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta,NULL);
927: PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius,NULL);
928: PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius,NULL);
929: PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon,NULL);
930: PetscOptionsTail();
931: TaoLineSearchSetFromOptions(tao->linesearch);
932: KSPSetFromOptions(tao->ksp);
933: return(0);
934: }
937: /*------------------------------------------------------------*/
938: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
939: {
940: TAO_NLS *nlsP = (TAO_NLS *)tao->data;
941: PetscInt nrejects;
942: PetscBool isascii;
946: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
947: if (isascii) {
948: PetscViewerASCIIPushTab(viewer);
949: if (NLS_PC_BFGS == nlsP->pc_type && nlsP->M) {
950: MatLMVMGetRejects(nlsP->M,&nrejects);
951: PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n",nrejects);
952: }
953: PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
954: PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
955: PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", nlsP->sgrad);
956: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);
958: PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
959: PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
960: PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
961: PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
962: PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
963: PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
964: PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
965: PetscViewerASCIIPopTab(viewer);
966: }
967: return(0);
968: }
970: /* ---------------------------------------------------------- */
971: /*MC
972: TAONLS - Newton's method with linesearch for unconstrained minimization.
973: At each iteration, the Newton line search method solves the symmetric
974: system of equations to obtain the step diretion dk:
975: Hk dk = -gk
976: a More-Thuente line search is applied on the direction dk to approximately
977: solve
978: min_t f(xk + t d_k)
980: Options Database Keys:
981: + -tao_nls_pc_type - "none","ahess","bfgs","petsc"
982: . -tao_nls_bfgs_scale_type - "ahess","phess","bfgs"
983: . -tao_nls_init_type - "constant","direction","interpolation"
984: . -tao_nls_update_type - "step","direction","interpolation"
985: . -tao_nls_sval - perturbation starting value
986: . -tao_nls_imin - minimum initial perturbation
987: . -tao_nls_imax - maximum initial perturbation
988: . -tao_nls_pmin - minimum perturbation
989: . -tao_nls_pmax - maximum perturbation
990: . -tao_nls_pgfac - growth factor
991: . -tao_nls_psfac - shrink factor
992: . -tao_nls_imfac - initial merit factor
993: . -tao_nls_pmgfac - merit growth factor
994: . -tao_nls_pmsfac - merit shrink factor
995: . -tao_nls_eta1 - poor steplength; reduce radius
996: . -tao_nls_eta2 - reasonable steplength; leave radius
997: . -tao_nls_eta3 - good steplength; increase readius
998: . -tao_nls_eta4 - excellent steplength; greatly increase radius
999: . -tao_nls_alpha1 - alpha1 reduction
1000: . -tao_nls_alpha2 - alpha2 reduction
1001: . -tao_nls_alpha3 - alpha3 reduction
1002: . -tao_nls_alpha4 - alpha4 reduction
1003: . -tao_nls_alpha - alpha5 reduction
1004: . -tao_nls_mu1 - mu1 interpolation update
1005: . -tao_nls_mu2 - mu2 interpolation update
1006: . -tao_nls_gamma1 - gamma1 interpolation update
1007: . -tao_nls_gamma2 - gamma2 interpolation update
1008: . -tao_nls_gamma3 - gamma3 interpolation update
1009: . -tao_nls_gamma4 - gamma4 interpolation update
1010: . -tao_nls_theta - theta interpolation update
1011: . -tao_nls_omega1 - omega1 step update
1012: . -tao_nls_omega2 - omega2 step update
1013: . -tao_nls_omega3 - omega3 step update
1014: . -tao_nls_omega4 - omega4 step update
1015: . -tao_nls_omega5 - omega5 step update
1016: . -tao_nls_mu1_i - mu1 interpolation init factor
1017: . -tao_nls_mu2_i - mu2 interpolation init factor
1018: . -tao_nls_gamma1_i - gamma1 interpolation init factor
1019: . -tao_nls_gamma2_i - gamma2 interpolation init factor
1020: . -tao_nls_gamma3_i - gamma3 interpolation init factor
1021: . -tao_nls_gamma4_i - gamma4 interpolation init factor
1022: - -tao_nls_theta_i - theta interpolation init factor
1024: Level: beginner
1025: M*/
1027: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
1028: {
1029: TAO_NLS *nlsP;
1030: const char *morethuente_type = TAOLINESEARCHMT;
1034: PetscNewLog(tao,&nlsP);
1036: tao->ops->setup = TaoSetUp_NLS;
1037: tao->ops->solve = TaoSolve_NLS;
1038: tao->ops->view = TaoView_NLS;
1039: tao->ops->setfromoptions = TaoSetFromOptions_NLS;
1040: tao->ops->destroy = TaoDestroy_NLS;
1042: /* Override default settings (unless already changed) */
1043: if (!tao->max_it_changed) tao->max_it = 50;
1044: if (!tao->trust0_changed) tao->trust0 = 100.0;
1046: tao->data = (void*)nlsP;
1048: nlsP->sval = 0.0;
1049: nlsP->imin = 1.0e-4;
1050: nlsP->imax = 1.0e+2;
1051: nlsP->imfac = 1.0e-1;
1053: nlsP->pmin = 1.0e-12;
1054: nlsP->pmax = 1.0e+2;
1055: nlsP->pgfac = 1.0e+1;
1056: nlsP->psfac = 4.0e-1;
1057: nlsP->pmgfac = 1.0e-1;
1058: nlsP->pmsfac = 1.0e-1;
1060: /* Default values for trust-region radius update based on steplength */
1061: nlsP->nu1 = 0.25;
1062: nlsP->nu2 = 0.50;
1063: nlsP->nu3 = 1.00;
1064: nlsP->nu4 = 1.25;
1066: nlsP->omega1 = 0.25;
1067: nlsP->omega2 = 0.50;
1068: nlsP->omega3 = 1.00;
1069: nlsP->omega4 = 2.00;
1070: nlsP->omega5 = 4.00;
1072: /* Default values for trust-region radius update based on reduction */
1073: nlsP->eta1 = 1.0e-4;
1074: nlsP->eta2 = 0.25;
1075: nlsP->eta3 = 0.50;
1076: nlsP->eta4 = 0.90;
1078: nlsP->alpha1 = 0.25;
1079: nlsP->alpha2 = 0.50;
1080: nlsP->alpha3 = 1.00;
1081: nlsP->alpha4 = 2.00;
1082: nlsP->alpha5 = 4.00;
1084: /* Default values for trust-region radius update based on interpolation */
1085: nlsP->mu1 = 0.10;
1086: nlsP->mu2 = 0.50;
1088: nlsP->gamma1 = 0.25;
1089: nlsP->gamma2 = 0.50;
1090: nlsP->gamma3 = 2.00;
1091: nlsP->gamma4 = 4.00;
1093: nlsP->theta = 0.05;
1095: /* Default values for trust region initialization based on interpolation */
1096: nlsP->mu1_i = 0.35;
1097: nlsP->mu2_i = 0.50;
1099: nlsP->gamma1_i = 0.0625;
1100: nlsP->gamma2_i = 0.5;
1101: nlsP->gamma3_i = 2.0;
1102: nlsP->gamma4_i = 5.0;
1104: nlsP->theta_i = 0.25;
1106: /* Remaining parameters */
1107: nlsP->min_radius = 1.0e-10;
1108: nlsP->max_radius = 1.0e10;
1109: nlsP->epsilon = 1.0e-6;
1111: nlsP->pc_type = NLS_PC_BFGS;
1112: nlsP->bfgs_scale_type = BFGS_SCALE_PHESS;
1113: nlsP->init_type = NLS_INIT_INTERPOLATION;
1114: nlsP->update_type = NLS_UPDATE_STEP;
1116: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
1117: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
1118: TaoLineSearchSetType(tao->linesearch,morethuente_type);
1119: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
1120: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
1122: /* Set linear solver to default for symmetric matrices */
1123: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
1124: PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1);
1125: KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
1126: KSPSetType(tao->ksp,KSPCGSTCG);
1127: return(0);
1128: }