Actual source code: bnk.c
petsc-3.13.6 2020-09-29
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/bound/impls/bnk/bnk.h>
3: #include <petscksp.h>
5: static const char *BNK_INIT[64] = {"constant", "direction", "interpolation"};
6: static const char *BNK_UPDATE[64] = {"step", "reduction", "interpolation"};
7: static const char *BNK_AS[64] = {"none", "bertsekas"};
9: /*------------------------------------------------------------*/
11: /* Routine for initializing the KSP solver, the BFGS preconditioner, and the initial trust radius estimation */
13: PetscErrorCode TaoBNKInitialize(Tao tao, PetscInt initType, PetscBool *needH)
14: {
15: PetscErrorCode ierr;
16: TAO_BNK *bnk = (TAO_BNK *)tao->data;
17: PC pc;
19: PetscReal f_min, ftrial, prered, actred, kappa, sigma, resnorm;
20: PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
21: PetscBool is_bfgs, is_jacobi, is_symmetric, sym_set;
22: PetscInt n, N, nDiff;
23: PetscInt i_max = 5;
24: PetscInt j_max = 1;
25: PetscInt i, j;
28: /* Project the current point onto the feasible set */
29: TaoComputeVariableBounds(tao);
30: TaoSetVariableBounds(bnk->bncg, tao->XL, tao->XU);
31: if (tao->bounded) {
32: TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);
33: }
35: /* Project the initial point onto the feasible region */
36: TaoBoundSolution(tao->solution, tao->XL,tao->XU, 0.0, &nDiff, tao->solution);
38: /* Check convergence criteria */
39: TaoComputeObjectiveAndGradient(tao, tao->solution, &bnk->f, bnk->unprojected_gradient);
40: TaoBNKEstimateActiveSet(tao, bnk->as_type);
41: VecCopy(bnk->unprojected_gradient, tao->gradient);
42: VecISSet(tao->gradient, bnk->active_idx, 0.0);
43: TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm);
45: /* Test the initial point for convergence */
46: VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->W);
47: VecNorm(bnk->W, NORM_2, &resnorm);
48: if (PetscIsInfOrNanReal(bnk->f) || PetscIsInfOrNanReal(resnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
49: TaoLogConvergenceHistory(tao,bnk->f,resnorm,0.0,tao->ksp_its);
50: TaoMonitor(tao,tao->niter,bnk->f,resnorm,0.0,1.0);
51: (*tao->ops->convergencetest)(tao,tao->cnvP);
52: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
53:
54: /* Reset KSP stopping reason counters */
55: bnk->ksp_atol = 0;
56: bnk->ksp_rtol = 0;
57: bnk->ksp_dtol = 0;
58: bnk->ksp_ctol = 0;
59: bnk->ksp_negc = 0;
60: bnk->ksp_iter = 0;
61: bnk->ksp_othr = 0;
62:
63: /* Reset accepted step type counters */
64: bnk->tot_cg_its = 0;
65: bnk->newt = 0;
66: bnk->bfgs = 0;
67: bnk->sgrad = 0;
68: bnk->grad = 0;
69:
70: /* Initialize the Hessian perturbation */
71: bnk->pert = bnk->sval;
72:
73: /* Reset initial steplength to zero (this helps BNCG reset its direction internally) */
74: VecSet(tao->stepdirection, 0.0);
76: /* Allocate the vectors needed for the BFGS approximation */
77: KSPGetPC(tao->ksp, &pc);
78: PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
79: PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
80: if (is_bfgs) {
81: bnk->bfgs_pre = pc;
82: PCLMVMGetMatLMVM(bnk->bfgs_pre, &bnk->M);
83: VecGetLocalSize(tao->solution, &n);
84: VecGetSize(tao->solution, &N);
85: MatSetSizes(bnk->M, n, n, N, N);
86: MatLMVMAllocate(bnk->M, tao->solution, bnk->unprojected_gradient);
87: MatIsSymmetricKnown(bnk->M, &sym_set, &is_symmetric);
88: if (!sym_set || !is_symmetric) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
89: } else if (is_jacobi) {
90: PCJacobiSetUseAbs(pc,PETSC_TRUE);
91: }
92:
93: /* Prepare the min/max vectors for safeguarding diagonal scales */
94: VecSet(bnk->Diag_min, bnk->dmin);
95: VecSet(bnk->Diag_max, bnk->dmax);
97: /* Initialize trust-region radius. The initialization is only performed
98: when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
99: *needH = PETSC_TRUE;
100: if (bnk->is_nash || bnk->is_stcg || bnk->is_gltr) {
101: switch(initType) {
102: case BNK_INIT_CONSTANT:
103: /* Use the initial radius specified */
104: tao->trust = tao->trust0;
105: break;
107: case BNK_INIT_INTERPOLATION:
108: /* Use interpolation based on the initial Hessian */
109: max_radius = 0.0;
110: tao->trust = tao->trust0;
111: for (j = 0; j < j_max; ++j) {
112: f_min = bnk->f;
113: sigma = 0.0;
115: if (*needH) {
116: /* Compute the Hessian at the new step, and extract the inactive subsystem */
117: bnk->computehessian(tao);
118: TaoBNKEstimateActiveSet(tao, BNK_AS_NONE);
119: MatDestroy(&bnk->H_inactive);
120: if (bnk->active_idx) {
121: MatCreateSubMatrix(tao->hessian, bnk->inactive_idx, bnk->inactive_idx, MAT_INITIAL_MATRIX, &bnk->H_inactive);
122: } else {
123: MatDuplicate(tao->hessian, MAT_COPY_VALUES, &bnk->H_inactive);
124: }
125: *needH = PETSC_FALSE;
126: }
128: for (i = 0; i < i_max; ++i) {
129: /* Take a steepest descent step and snap it to bounds */
130: VecCopy(tao->solution, bnk->Xold);
131: VecAXPY(tao->solution, -tao->trust/bnk->gnorm, tao->gradient);
132: TaoBoundSolution(tao->solution, tao->XL,tao->XU, 0.0, &nDiff, tao->solution);
133: /* Compute the step we actually accepted */
134: VecCopy(tao->solution, bnk->W);
135: VecAXPY(bnk->W, -1.0, bnk->Xold);
136: /* Compute the objective at the trial */
137: TaoComputeObjective(tao, tao->solution, &ftrial);
138: if (PetscIsInfOrNanReal(bnk->f)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
139: VecCopy(bnk->Xold, tao->solution);
140: if (PetscIsInfOrNanReal(ftrial)) {
141: tau = bnk->gamma1_i;
142: } else {
143: if (ftrial < f_min) {
144: f_min = ftrial;
145: sigma = -tao->trust / bnk->gnorm;
146: }
147:
148: /* Compute the predicted and actual reduction */
149: if (bnk->active_idx) {
150: VecGetSubVector(bnk->W, bnk->inactive_idx, &bnk->X_inactive);
151: VecGetSubVector(bnk->Xwork, bnk->inactive_idx, &bnk->inactive_work);
152: } else {
153: bnk->X_inactive = bnk->W;
154: bnk->inactive_work = bnk->Xwork;
155: }
156: MatMult(bnk->H_inactive, bnk->X_inactive, bnk->inactive_work);
157: VecDot(bnk->X_inactive, bnk->inactive_work, &prered);
158: if (bnk->active_idx) {
159: VecRestoreSubVector(bnk->W, bnk->inactive_idx, &bnk->X_inactive);
160: VecRestoreSubVector(bnk->Xwork, bnk->inactive_idx, &bnk->inactive_work);
161: }
162: prered = tao->trust * (bnk->gnorm - 0.5 * tao->trust * prered / (bnk->gnorm * bnk->gnorm));
163: actred = bnk->f - ftrial;
164: if ((PetscAbsScalar(actred) <= bnk->epsilon) && (PetscAbsScalar(prered) <= bnk->epsilon)) {
165: kappa = 1.0;
166: } else {
167: kappa = actred / prered;
168: }
170: tau_1 = bnk->theta_i * bnk->gnorm * tao->trust / (bnk->theta_i * bnk->gnorm * tao->trust + (1.0 - bnk->theta_i) * prered - actred);
171: tau_2 = bnk->theta_i * bnk->gnorm * tao->trust / (bnk->theta_i * bnk->gnorm * tao->trust - (1.0 + bnk->theta_i) * prered + actred);
172: tau_min = PetscMin(tau_1, tau_2);
173: tau_max = PetscMax(tau_1, tau_2);
175: if (PetscAbsScalar(kappa - (PetscReal)1.0) <= bnk->mu1_i) {
176: /* Great agreement */
177: max_radius = PetscMax(max_radius, tao->trust);
179: if (tau_max < 1.0) {
180: tau = bnk->gamma3_i;
181: } else if (tau_max > bnk->gamma4_i) {
182: tau = bnk->gamma4_i;
183: } else {
184: tau = tau_max;
185: }
186: } else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= bnk->mu2_i) {
187: /* Good agreement */
188: max_radius = PetscMax(max_radius, tao->trust);
190: if (tau_max < bnk->gamma2_i) {
191: tau = bnk->gamma2_i;
192: } else if (tau_max > bnk->gamma3_i) {
193: tau = bnk->gamma3_i;
194: } else {
195: tau = tau_max;
196: }
197: } else {
198: /* Not good agreement */
199: if (tau_min > 1.0) {
200: tau = bnk->gamma2_i;
201: } else if (tau_max < bnk->gamma1_i) {
202: tau = bnk->gamma1_i;
203: } else if ((tau_min < bnk->gamma1_i) && (tau_max >= 1.0)) {
204: tau = bnk->gamma1_i;
205: } else if ((tau_1 >= bnk->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < bnk->gamma1_i) || (tau_2 >= 1.0))) {
206: tau = tau_1;
207: } else if ((tau_2 >= bnk->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < bnk->gamma1_i) || (tau_2 >= 1.0))) {
208: tau = tau_2;
209: } else {
210: tau = tau_max;
211: }
212: }
213: }
214: tao->trust = tau * tao->trust;
215: }
217: if (f_min < bnk->f) {
218: /* We accidentally found a solution better than the initial, so accept it */
219: bnk->f = f_min;
220: VecCopy(tao->solution, bnk->Xold);
221: VecAXPY(tao->solution,sigma,tao->gradient);
222: TaoBoundSolution(tao->solution, tao->XL,tao->XU, 0.0, &nDiff, tao->solution);
223: VecCopy(tao->solution, tao->stepdirection);
224: VecAXPY(tao->stepdirection, -1.0, bnk->Xold);
225: TaoComputeGradient(tao,tao->solution,bnk->unprojected_gradient);
226: TaoBNKEstimateActiveSet(tao, bnk->as_type);
227: VecCopy(bnk->unprojected_gradient, tao->gradient);
228: VecISSet(tao->gradient, bnk->active_idx, 0.0);
229: /* Compute gradient at the new iterate and flip switch to compute the Hessian later */
230: TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm);
231: *needH = PETSC_TRUE;
232: /* Test the new step for convergence */
233: VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->W);
234: VecNorm(bnk->W, NORM_2, &resnorm);
235: if (PetscIsInfOrNanReal(resnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
236: TaoLogConvergenceHistory(tao,bnk->f,resnorm,0.0,tao->ksp_its);
237: TaoMonitor(tao,tao->niter,bnk->f,resnorm,0.0,1.0);
238: (*tao->ops->convergencetest)(tao,tao->cnvP);
239: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
240: /* active BNCG recycling early because we have a stepdirection computed */
241: TaoBNCGSetRecycleFlag(bnk->bncg, PETSC_TRUE);
242: }
243: }
244: tao->trust = PetscMax(tao->trust, max_radius);
245:
246: /* Ensure that the trust radius is within the limits */
247: tao->trust = PetscMax(tao->trust, bnk->min_radius);
248: tao->trust = PetscMin(tao->trust, bnk->max_radius);
249: break;
251: default:
252: /* Norm of the first direction will initialize radius */
253: tao->trust = 0.0;
254: break;
255: }
256: }
257: return(0);
258: }
260: /*------------------------------------------------------------*/
262: /* Routine for computing the exact Hessian and preparing the preconditioner at the new iterate */
264: PetscErrorCode TaoBNKComputeHessian(Tao tao)
265: {
266: PetscErrorCode ierr;
267: TAO_BNK *bnk = (TAO_BNK *)tao->data;
270: /* Compute the Hessian */
271: TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
272: /* Add a correction to the BFGS preconditioner */
273: if (bnk->M) {
274: MatLMVMUpdate(bnk->M, tao->solution, bnk->unprojected_gradient);
275: }
276: /* Prepare the reduced sub-matrices for the inactive set */
277: if (bnk->Hpre_inactive) {
278: MatDestroy(&bnk->Hpre_inactive);
279: }
280: if (bnk->H_inactive) {
281: MatDestroy(&bnk->H_inactive);
282: }
283: if (bnk->active_idx) {
284: MatCreateSubMatrix(tao->hessian, bnk->inactive_idx, bnk->inactive_idx, MAT_INITIAL_MATRIX, &bnk->H_inactive);
285: if (tao->hessian == tao->hessian_pre) {
286: PetscObjectReference((PetscObject)bnk->H_inactive);
287: bnk->Hpre_inactive = bnk->H_inactive;
288: } else {
289: MatCreateSubMatrix(tao->hessian_pre, bnk->inactive_idx, bnk->inactive_idx, MAT_INITIAL_MATRIX, &bnk->Hpre_inactive);
290: }
291: if (bnk->bfgs_pre) {
292: PCLMVMSetIS(bnk->bfgs_pre, bnk->inactive_idx);
293: }
294: } else {
295: MatDuplicate(tao->hessian, MAT_COPY_VALUES, &bnk->H_inactive);
296: if (tao->hessian == tao->hessian_pre) {
297: PetscObjectReference((PetscObject)bnk->H_inactive);
298: bnk->Hpre_inactive = bnk->H_inactive;
299: } else {
300: MatDuplicate(tao->hessian_pre, MAT_COPY_VALUES, &bnk->Hpre_inactive);
301: }
302: if (bnk->bfgs_pre) {
303: PCLMVMClearIS(bnk->bfgs_pre);
304: }
305: }
306: return(0);
307: }
309: /*------------------------------------------------------------*/
311: /* Routine for estimating the active set */
313: PetscErrorCode TaoBNKEstimateActiveSet(Tao tao, PetscInt asType)
314: {
315: PetscErrorCode ierr;
316: TAO_BNK *bnk = (TAO_BNK *)tao->data;
317: PetscBool hessComputed, diagExists;
320: switch (asType) {
321: case BNK_AS_NONE:
322: ISDestroy(&bnk->inactive_idx);
323: VecWhichInactive(tao->XL, tao->solution, bnk->unprojected_gradient, tao->XU, PETSC_TRUE, &bnk->inactive_idx);
324: ISDestroy(&bnk->active_idx);
325: ISComplementVec(bnk->inactive_idx, tao->solution, &bnk->active_idx);
326: break;
328: case BNK_AS_BERTSEKAS:
329: /* Compute the trial step vector with which we will estimate the active set at the next iteration */
330: if (bnk->M) {
331: /* If the BFGS preconditioner matrix is available, we will construct a trial step with it */
332: MatSolve(bnk->M, bnk->unprojected_gradient, bnk->W);
333: } else {
334: hessComputed = diagExists = PETSC_FALSE;
335: if (tao->hessian) {
336: MatAssembled(tao->hessian, &hessComputed);
337: }
338: if (hessComputed) {
339: MatHasOperation(tao->hessian, MATOP_GET_DIAGONAL, &diagExists);
340: }
341: if (diagExists) {
342: /* BFGS preconditioner doesn't exist so let's invert the absolute diagonal of the Hessian instead onto the gradient */
343: MatGetDiagonal(tao->hessian, bnk->Xwork);
344: VecAbs(bnk->Xwork);
345: VecMedian(bnk->Diag_min, bnk->Xwork, bnk->Diag_max, bnk->Xwork);
346: VecReciprocal(bnk->Xwork);
347: VecPointwiseMult(bnk->W, bnk->Xwork, bnk->unprojected_gradient);
348: } else {
349: /* If the Hessian or its diagonal does not exist, we will simply use gradient step */
350: VecCopy(bnk->unprojected_gradient, bnk->W);
351: }
352: }
353: VecScale(bnk->W, -1.0);
354: TaoEstimateActiveBounds(tao->solution, tao->XL, tao->XU, bnk->unprojected_gradient, bnk->W, bnk->Xwork, bnk->as_step, &bnk->as_tol,
355: &bnk->active_lower, &bnk->active_upper, &bnk->active_fixed, &bnk->active_idx, &bnk->inactive_idx);
356: break;
357:
358: default:
359: break;
360: }
361: return(0);
362: }
364: /*------------------------------------------------------------*/
366: /* Routine for bounding the step direction */
368: PetscErrorCode TaoBNKBoundStep(Tao tao, PetscInt asType, Vec step)
369: {
370: PetscErrorCode ierr;
371: TAO_BNK *bnk = (TAO_BNK *)tao->data;
372:
374: switch (asType) {
375: case BNK_AS_NONE:
376: VecISSet(step, bnk->active_idx, 0.0);
377: break;
379: case BNK_AS_BERTSEKAS:
380: TaoBoundStep(tao->solution, tao->XL, tao->XU, bnk->active_lower, bnk->active_upper, bnk->active_fixed, 1.0, step);
381: break;
383: default:
384: break;
385: }
386: return(0);
387: }
389: /*------------------------------------------------------------*/
391: /* Routine for taking a finite number of BNCG iterations to
392: accelerate Newton convergence.
393:
394: In practice, this approach simply trades off Hessian evaluations
395: for more gradient evaluations.
396: */
398: PetscErrorCode TaoBNKTakeCGSteps(Tao tao, PetscBool *terminate)
399: {
400: TAO_BNK *bnk = (TAO_BNK *)tao->data;
401: PetscErrorCode ierr;
402:
404: *terminate = PETSC_FALSE;
405: if (bnk->max_cg_its > 0) {
406: /* Copy the current function value (important vectors are already shared) */
407: bnk->bncg_ctx->f = bnk->f;
408: /* Take some small finite number of BNCG iterations */
409: TaoSolve(bnk->bncg);
410: /* Add the number of gradient and function evaluations to the total */
411: tao->nfuncs += bnk->bncg->nfuncs;
412: tao->nfuncgrads += bnk->bncg->nfuncgrads;
413: tao->ngrads += bnk->bncg->ngrads;
414: tao->nhess += bnk->bncg->nhess;
415: bnk->tot_cg_its += bnk->bncg->niter;
416: /* Extract the BNCG function value out and save it into BNK */
417: bnk->f = bnk->bncg_ctx->f;
418: if (bnk->bncg->reason == TAO_CONVERGED_GATOL || bnk->bncg->reason == TAO_CONVERGED_GRTOL || bnk->bncg->reason == TAO_CONVERGED_GTTOL || bnk->bncg->reason == TAO_CONVERGED_MINF) {
419: *terminate = PETSC_TRUE;
420: } else {
421: TaoBNKEstimateActiveSet(tao, bnk->as_type);
422: }
423: }
424: return(0);
425: }
427: /*------------------------------------------------------------*/
429: /* Routine for computing the Newton step. */
431: PetscErrorCode TaoBNKComputeStep(Tao tao, PetscBool shift, KSPConvergedReason *ksp_reason, PetscInt *step_type)
432: {
433: PetscErrorCode ierr;
434: TAO_BNK *bnk = (TAO_BNK *)tao->data;
435: PetscInt bfgsUpdates = 0;
436: PetscInt kspits;
437: PetscBool is_lmvm;
438:
440: /* If there are no inactive variables left, save some computation and return an adjusted zero step
441: that has (l-x) and (u-x) for lower and upper bounded variables. */
442: if (!bnk->inactive_idx) {
443: VecSet(tao->stepdirection, 0.0);
444: TaoBNKBoundStep(tao, bnk->as_type, tao->stepdirection);
445: return(0);
446: }
447:
448: /* Shift the reduced Hessian matrix */
449: if ((shift) && (bnk->pert > 0)) {
450: PetscObjectTypeCompare((PetscObject)tao->hessian, MATLMVM, &is_lmvm);
451: if (is_lmvm) {
452: MatShift(tao->hessian, bnk->pert);
453: } else {
454: MatShift(bnk->H_inactive, bnk->pert);
455: if (bnk->H_inactive != bnk->Hpre_inactive) {
456: MatShift(bnk->Hpre_inactive, bnk->pert);
457: }
458: }
459: }
460:
461: /* Solve the Newton system of equations */
462: tao->ksp_its = 0;
463: VecSet(tao->stepdirection, 0.0);
464: KSPReset(tao->ksp);
465: KSPResetFromOptions(tao->ksp);
466: KSPSetOperators(tao->ksp,bnk->H_inactive,bnk->Hpre_inactive);
467: VecCopy(bnk->unprojected_gradient, bnk->Gwork);
468: if (bnk->active_idx) {
469: VecGetSubVector(bnk->Gwork, bnk->inactive_idx, &bnk->G_inactive);
470: VecGetSubVector(tao->stepdirection, bnk->inactive_idx, &bnk->X_inactive);
471: } else {
472: bnk->G_inactive = bnk->unprojected_gradient;
473: bnk->X_inactive = tao->stepdirection;
474: }
475: if (bnk->is_nash || bnk->is_stcg || bnk->is_gltr) {
476: KSPCGSetRadius(tao->ksp,tao->trust);
477: KSPSolve(tao->ksp, bnk->G_inactive, bnk->X_inactive);
478: KSPGetIterationNumber(tao->ksp,&kspits);
479: tao->ksp_its+=kspits;
480: tao->ksp_tot_its+=kspits;
481: KSPCGGetNormD(tao->ksp,&bnk->dnorm);
483: if (0.0 == tao->trust) {
484: /* Radius was uninitialized; use the norm of the direction */
485: if (bnk->dnorm > 0.0) {
486: tao->trust = bnk->dnorm;
488: /* Modify the radius if it is too large or small */
489: tao->trust = PetscMax(tao->trust, bnk->min_radius);
490: tao->trust = PetscMin(tao->trust, bnk->max_radius);
491: } else {
492: /* The direction was bad; set radius to default value and re-solve
493: the trust-region subproblem to get a direction */
494: tao->trust = tao->trust0;
496: /* Modify the radius if it is too large or small */
497: tao->trust = PetscMax(tao->trust, bnk->min_radius);
498: tao->trust = PetscMin(tao->trust, bnk->max_radius);
500: KSPCGSetRadius(tao->ksp,tao->trust);
501: KSPSolve(tao->ksp, bnk->G_inactive, bnk->X_inactive);
502: KSPGetIterationNumber(tao->ksp,&kspits);
503: tao->ksp_its+=kspits;
504: tao->ksp_tot_its+=kspits;
505: KSPCGGetNormD(tao->ksp,&bnk->dnorm);
507: if (bnk->dnorm == 0.0) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_PLIB, "Initial direction zero");
508: }
509: }
510: } else {
511: KSPSolve(tao->ksp, bnk->G_inactive, bnk->X_inactive);
512: KSPGetIterationNumber(tao->ksp, &kspits);
513: tao->ksp_its += kspits;
514: tao->ksp_tot_its+=kspits;
515: }
516: /* Restore sub vectors back */
517: if (bnk->active_idx) {
518: VecRestoreSubVector(bnk->Gwork, bnk->inactive_idx, &bnk->G_inactive);
519: VecRestoreSubVector(tao->stepdirection, bnk->inactive_idx, &bnk->X_inactive);
520: }
521: /* Make sure the safeguarded fall-back step is zero for actively bounded variables */
522: VecScale(tao->stepdirection, -1.0);
523: TaoBNKBoundStep(tao, bnk->as_type, tao->stepdirection);
524:
525: /* Record convergence reasons */
526: KSPGetConvergedReason(tao->ksp, ksp_reason);
527: if (KSP_CONVERGED_ATOL == *ksp_reason) {
528: ++bnk->ksp_atol;
529: } else if (KSP_CONVERGED_RTOL == *ksp_reason) {
530: ++bnk->ksp_rtol;
531: } else if (KSP_CONVERGED_CG_CONSTRAINED == *ksp_reason) {
532: ++bnk->ksp_ctol;
533: } else if (KSP_CONVERGED_CG_NEG_CURVE == *ksp_reason) {
534: ++bnk->ksp_negc;
535: } else if (KSP_DIVERGED_DTOL == *ksp_reason) {
536: ++bnk->ksp_dtol;
537: } else if (KSP_DIVERGED_ITS == *ksp_reason) {
538: ++bnk->ksp_iter;
539: } else {
540: ++bnk->ksp_othr;
541: }
542:
543: /* Make sure the BFGS preconditioner is healthy */
544: if (bnk->M) {
545: MatLMVMGetUpdateCount(bnk->M, &bfgsUpdates);
546: if ((KSP_DIVERGED_INDEFINITE_PC == *ksp_reason) && (bfgsUpdates > 0)) {
547: /* Preconditioner is numerically indefinite; reset the approximation. */
548: MatLMVMReset(bnk->M, PETSC_FALSE);
549: MatLMVMUpdate(bnk->M, tao->solution, bnk->unprojected_gradient);
550: }
551: }
552: *step_type = BNK_NEWTON;
553: return(0);
554: }
556: /*------------------------------------------------------------*/
558: /* Routine for recomputing the predicted reduction for a given step vector */
560: PetscErrorCode TaoBNKRecomputePred(Tao tao, Vec S, PetscReal *prered)
561: {
562: PetscErrorCode ierr;
563: TAO_BNK *bnk = (TAO_BNK *)tao->data;
564:
566: /* Extract subvectors associated with the inactive set */
567: if (bnk->active_idx){
568: VecGetSubVector(tao->stepdirection, bnk->inactive_idx, &bnk->X_inactive);
569: VecGetSubVector(bnk->Xwork, bnk->inactive_idx, &bnk->inactive_work);
570: VecGetSubVector(bnk->Gwork, bnk->inactive_idx, &bnk->G_inactive);
571: } else {
572: bnk->X_inactive = tao->stepdirection;
573: bnk->inactive_work = bnk->Xwork;
574: bnk->G_inactive = bnk->Gwork;
575: }
576: /* Recompute the predicted decrease based on the quadratic model */
577: MatMult(bnk->H_inactive, bnk->X_inactive, bnk->inactive_work);
578: VecAYPX(bnk->inactive_work, -0.5, bnk->G_inactive);
579: VecDot(bnk->inactive_work, bnk->X_inactive, prered);
580: /* Restore the sub vectors */
581: if (bnk->active_idx){
582: VecRestoreSubVector(tao->stepdirection, bnk->inactive_idx, &bnk->X_inactive);
583: VecRestoreSubVector(bnk->Xwork, bnk->inactive_idx, &bnk->inactive_work);
584: VecRestoreSubVector(bnk->Gwork, bnk->inactive_idx, &bnk->G_inactive);
585: }
586: return(0);
587: }
589: /*------------------------------------------------------------*/
591: /* Routine for ensuring that the Newton step is a descent direction.
593: The step direction falls back onto BFGS, scaled gradient and gradient steps
594: in the event that the Newton step fails the test.
595: */
597: PetscErrorCode TaoBNKSafeguardStep(Tao tao, KSPConvergedReason ksp_reason, PetscInt *stepType)
598: {
599: PetscErrorCode ierr;
600: TAO_BNK *bnk = (TAO_BNK *)tao->data;
601:
602: PetscReal gdx, e_min;
603: PetscInt bfgsUpdates;
604:
606: switch (*stepType) {
607: case BNK_NEWTON:
608: VecDot(tao->stepdirection, tao->gradient, &gdx);
609: if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
610: /* Newton step is not descent or direction produced Inf or NaN
611: Update the perturbation for next time */
612: if (bnk->pert <= 0.0) {
613: /* Initialize the perturbation */
614: bnk->pert = PetscMin(bnk->imax, PetscMax(bnk->imin, bnk->imfac * bnk->gnorm));
615: if (bnk->is_gltr) {
616: KSPGLTRGetMinEig(tao->ksp,&e_min);
617: bnk->pert = PetscMax(bnk->pert, -e_min);
618: }
619: } else {
620: /* Increase the perturbation */
621: bnk->pert = PetscMin(bnk->pmax, PetscMax(bnk->pgfac * bnk->pert, bnk->pmgfac * bnk->gnorm));
622: }
624: if (!bnk->M) {
625: /* We don't have the bfgs matrix around and updated
626: Must use gradient direction in this case */
627: VecCopy(tao->gradient, tao->stepdirection);
628: *stepType = BNK_GRADIENT;
629: } else {
630: /* Attempt to use the BFGS direction */
631: MatSolve(bnk->M, bnk->unprojected_gradient, tao->stepdirection);
633: /* Check for success (descent direction)
634: NOTE: Negative gdx here means not a descent direction because
635: the fall-back step is missing a negative sign. */
636: VecDot(tao->gradient, tao->stepdirection, &gdx);
637: if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
638: /* BFGS direction is not descent or direction produced not a number
639: We can assert bfgsUpdates > 1 in this case because
640: the first solve produces the scaled gradient direction,
641: which is guaranteed to be descent */
643: /* Use steepest descent direction (scaled) */
644: MatLMVMReset(bnk->M, PETSC_FALSE);
645: MatLMVMUpdate(bnk->M, tao->solution, bnk->unprojected_gradient);
646: MatSolve(bnk->M, bnk->unprojected_gradient, tao->stepdirection);
648: *stepType = BNK_SCALED_GRADIENT;
649: } else {
650: MatLMVMGetUpdateCount(bnk->M, &bfgsUpdates);
651: if (1 == bfgsUpdates) {
652: /* The first BFGS direction is always the scaled gradient */
653: *stepType = BNK_SCALED_GRADIENT;
654: } else {
655: *stepType = BNK_BFGS;
656: }
657: }
658: }
659: /* Make sure the safeguarded fall-back step is zero for actively bounded variables */
660: VecScale(tao->stepdirection, -1.0);
661: TaoBNKBoundStep(tao, bnk->as_type, tao->stepdirection);
662: } else {
663: /* Computed Newton step is descent */
664: switch (ksp_reason) {
665: case KSP_DIVERGED_NANORINF:
666: case KSP_DIVERGED_BREAKDOWN:
667: case KSP_DIVERGED_INDEFINITE_MAT:
668: case KSP_DIVERGED_INDEFINITE_PC:
669: case KSP_CONVERGED_CG_NEG_CURVE:
670: /* Matrix or preconditioner is indefinite; increase perturbation */
671: if (bnk->pert <= 0.0) {
672: /* Initialize the perturbation */
673: bnk->pert = PetscMin(bnk->imax, PetscMax(bnk->imin, bnk->imfac * bnk->gnorm));
674: if (bnk->is_gltr) {
675: KSPGLTRGetMinEig(tao->ksp, &e_min);
676: bnk->pert = PetscMax(bnk->pert, -e_min);
677: }
678: } else {
679: /* Increase the perturbation */
680: bnk->pert = PetscMin(bnk->pmax, PetscMax(bnk->pgfac * bnk->pert, bnk->pmgfac * bnk->gnorm));
681: }
682: break;
684: default:
685: /* Newton step computation is good; decrease perturbation */
686: bnk->pert = PetscMin(bnk->psfac * bnk->pert, bnk->pmsfac * bnk->gnorm);
687: if (bnk->pert < bnk->pmin) {
688: bnk->pert = 0.0;
689: }
690: break;
691: }
692: *stepType = BNK_NEWTON;
693: }
694: break;
695:
696: case BNK_BFGS:
697: /* Check for success (descent direction) */
698: VecDot(tao->stepdirection, tao->gradient, &gdx);
699: if (gdx >= 0 || PetscIsInfOrNanReal(gdx)) {
700: /* Step is not descent or solve was not successful
701: Use steepest descent direction (scaled) */
702: MatLMVMReset(bnk->M, PETSC_FALSE);
703: MatLMVMUpdate(bnk->M, tao->solution, bnk->unprojected_gradient);
704: MatSolve(bnk->M, tao->gradient, tao->stepdirection);
705: VecScale(tao->stepdirection,-1.0);
706: TaoBNKBoundStep(tao, bnk->as_type, tao->stepdirection);
707: *stepType = BNK_SCALED_GRADIENT;
708: } else {
709: *stepType = BNK_BFGS;
710: }
711: break;
712:
713: case BNK_SCALED_GRADIENT:
714: break;
715:
716: default:
717: break;
718: }
719:
720: return(0);
721: }
723: /*------------------------------------------------------------*/
725: /* Routine for performing a bound-projected More-Thuente line search.
727: Includes fallbacks to BFGS, scaled gradient, and unscaled gradient steps if the
728: Newton step does not produce a valid step length.
729: */
731: PetscErrorCode TaoBNKPerformLineSearch(Tao tao, PetscInt *stepType, PetscReal *steplen, TaoLineSearchConvergedReason *reason)
732: {
733: TAO_BNK *bnk = (TAO_BNK *)tao->data;
735: TaoLineSearchConvergedReason ls_reason;
736:
737: PetscReal e_min, gdx;
738: PetscInt bfgsUpdates;
739:
741: /* Perform the linesearch */
742: TaoLineSearchApply(tao->linesearch, tao->solution, &bnk->f, bnk->unprojected_gradient, tao->stepdirection, steplen, &ls_reason);
743: TaoAddLineSearchCounts(tao);
745: while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && *stepType != BNK_SCALED_GRADIENT && *stepType != BNK_GRADIENT) {
746: /* Linesearch failed, revert solution */
747: bnk->f = bnk->fold;
748: VecCopy(bnk->Xold, tao->solution);
749: VecCopy(bnk->unprojected_gradient_old, bnk->unprojected_gradient);
751: switch(*stepType) {
752: case BNK_NEWTON:
753: /* Failed to obtain acceptable iterate with Newton step
754: Update the perturbation for next time */
755: if (bnk->pert <= 0.0) {
756: /* Initialize the perturbation */
757: bnk->pert = PetscMin(bnk->imax, PetscMax(bnk->imin, bnk->imfac * bnk->gnorm));
758: if (bnk->is_gltr) {
759: KSPGLTRGetMinEig(tao->ksp,&e_min);
760: bnk->pert = PetscMax(bnk->pert, -e_min);
761: }
762: } else {
763: /* Increase the perturbation */
764: bnk->pert = PetscMin(bnk->pmax, PetscMax(bnk->pgfac * bnk->pert, bnk->pmgfac * bnk->gnorm));
765: }
767: if (!bnk->M) {
768: /* We don't have the bfgs matrix around and being updated
769: Must use gradient direction in this case */
770: VecCopy(bnk->unprojected_gradient, tao->stepdirection);
771: *stepType = BNK_GRADIENT;
772: } else {
773: /* Attempt to use the BFGS direction */
774: MatSolve(bnk->M, bnk->unprojected_gradient, tao->stepdirection);
775: /* Check for success (descent direction)
776: NOTE: Negative gdx means not a descent direction because the step here is missing a negative sign. */
777: VecDot(tao->gradient, tao->stepdirection, &gdx);
778: if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
779: /* BFGS direction is not descent or direction produced not a number
780: We can assert bfgsUpdates > 1 in this case
781: Use steepest descent direction (scaled) */
782: MatLMVMReset(bnk->M, PETSC_FALSE);
783: MatLMVMUpdate(bnk->M, tao->solution, bnk->unprojected_gradient);
784: MatSolve(bnk->M, bnk->unprojected_gradient, tao->stepdirection);
786: bfgsUpdates = 1;
787: *stepType = BNK_SCALED_GRADIENT;
788: } else {
789: MatLMVMGetUpdateCount(bnk->M, &bfgsUpdates);
790: if (1 == bfgsUpdates) {
791: /* The first BFGS direction is always the scaled gradient */
792: *stepType = BNK_SCALED_GRADIENT;
793: } else {
794: *stepType = BNK_BFGS;
795: }
796: }
797: }
798: break;
800: case BNK_BFGS:
801: /* Can only enter if pc_type == BNK_PC_BFGS
802: Failed to obtain acceptable iterate with BFGS step
803: Attempt to use the scaled gradient direction */
804: MatLMVMReset(bnk->M, PETSC_FALSE);
805: MatLMVMUpdate(bnk->M, tao->solution, bnk->unprojected_gradient);
806: MatSolve(bnk->M, bnk->unprojected_gradient, tao->stepdirection);
808: bfgsUpdates = 1;
809: *stepType = BNK_SCALED_GRADIENT;
810: break;
811: }
812: /* Make sure the safeguarded fall-back step is zero for actively bounded variables */
813: VecScale(tao->stepdirection, -1.0);
814: TaoBNKBoundStep(tao, bnk->as_type, tao->stepdirection);
815:
816: /* Perform one last line search with the fall-back step */
817: TaoLineSearchApply(tao->linesearch, tao->solution, &bnk->f, bnk->unprojected_gradient, tao->stepdirection, steplen, &ls_reason);
818: TaoAddLineSearchCounts(tao);
819: }
820: *reason = ls_reason;
821: return(0);
822: }
824: /*------------------------------------------------------------*/
826: /* Routine for updating the trust radius.
828: Function features three different update methods:
829: 1) Line-search step length based
830: 2) Predicted decrease on the CG quadratic model
831: 3) Interpolation
832: */
834: PetscErrorCode TaoBNKUpdateTrustRadius(Tao tao, PetscReal prered, PetscReal actred, PetscInt updateType, PetscInt stepType, PetscBool *accept)
835: {
836: TAO_BNK *bnk = (TAO_BNK *)tao->data;
838:
839: PetscReal step, kappa;
840: PetscReal gdx, tau_1, tau_2, tau_min, tau_max;
843: /* Update trust region radius */
844: *accept = PETSC_FALSE;
845: switch(updateType) {
846: case BNK_UPDATE_STEP:
847: *accept = PETSC_TRUE; /* always accept here because line search succeeded */
848: if (stepType == BNK_NEWTON) {
849: TaoLineSearchGetStepLength(tao->linesearch, &step);
850: if (step < bnk->nu1) {
851: /* Very bad step taken; reduce radius */
852: tao->trust = bnk->omega1 * PetscMin(bnk->dnorm, tao->trust);
853: } else if (step < bnk->nu2) {
854: /* Reasonably bad step taken; reduce radius */
855: tao->trust = bnk->omega2 * PetscMin(bnk->dnorm, tao->trust);
856: } else if (step < bnk->nu3) {
857: /* Reasonable step was taken; leave radius alone */
858: if (bnk->omega3 < 1.0) {
859: tao->trust = bnk->omega3 * PetscMin(bnk->dnorm, tao->trust);
860: } else if (bnk->omega3 > 1.0) {
861: tao->trust = PetscMax(bnk->omega3 * bnk->dnorm, tao->trust);
862: }
863: } else if (step < bnk->nu4) {
864: /* Full step taken; increase the radius */
865: tao->trust = PetscMax(bnk->omega4 * bnk->dnorm, tao->trust);
866: } else {
867: /* More than full step taken; increase the radius */
868: tao->trust = PetscMax(bnk->omega5 * bnk->dnorm, tao->trust);
869: }
870: } else {
871: /* Newton step was not good; reduce the radius */
872: tao->trust = bnk->omega1 * PetscMin(bnk->dnorm, tao->trust);
873: }
874: break;
876: case BNK_UPDATE_REDUCTION:
877: if (stepType == BNK_NEWTON) {
878: if ((prered < 0.0) || PetscIsInfOrNanReal(prered)) {
879: /* The predicted reduction has the wrong sign. This cannot
880: happen in infinite precision arithmetic. Step should
881: be rejected! */
882: tao->trust = bnk->alpha1 * PetscMin(tao->trust, bnk->dnorm);
883: } else {
884: if (PetscIsInfOrNanReal(actred)) {
885: tao->trust = bnk->alpha1 * PetscMin(tao->trust, bnk->dnorm);
886: } else {
887: if ((PetscAbsScalar(actred) <= PetscMax(1.0, PetscAbsScalar(bnk->f))*bnk->epsilon) && (PetscAbsScalar(prered) <= PetscMax(1.0, PetscAbsScalar(bnk->f))*bnk->epsilon)) {
888: kappa = 1.0;
889: } else {
890: kappa = actred / prered;
891: }
892: /* Accept or reject the step and update radius */
893: if (kappa < bnk->eta1) {
894: /* Reject the step */
895: tao->trust = bnk->alpha1 * PetscMin(tao->trust, bnk->dnorm);
896: } else {
897: /* Accept the step */
898: *accept = PETSC_TRUE;
899: /* Update the trust region radius only if the computed step is at the trust radius boundary */
900: if (bnk->dnorm == tao->trust) {
901: if (kappa < bnk->eta2) {
902: /* Marginal bad step */
903: tao->trust = bnk->alpha2 * tao->trust;
904: } else if (kappa < bnk->eta3) {
905: /* Reasonable step */
906: tao->trust = bnk->alpha3 * tao->trust;
907: } else if (kappa < bnk->eta4) {
908: /* Good step */
909: tao->trust = bnk->alpha4 * tao->trust;
910: } else {
911: /* Very good step */
912: tao->trust = bnk->alpha5 * tao->trust;
913: }
914: }
915: }
916: }
917: }
918: } else {
919: /* Newton step was not good; reduce the radius */
920: tao->trust = bnk->alpha1 * PetscMin(bnk->dnorm, tao->trust);
921: }
922: break;
924: default:
925: if (stepType == BNK_NEWTON) {
926: if (prered < 0.0) {
927: /* The predicted reduction has the wrong sign. This cannot */
928: /* happen in infinite precision arithmetic. Step should */
929: /* be rejected! */
930: tao->trust = bnk->gamma1 * PetscMin(tao->trust, bnk->dnorm);
931: } else {
932: if (PetscIsInfOrNanReal(actred)) {
933: tao->trust = bnk->gamma1 * PetscMin(tao->trust, bnk->dnorm);
934: } else {
935: if ((PetscAbsScalar(actred) <= bnk->epsilon) && (PetscAbsScalar(prered) <= bnk->epsilon)) {
936: kappa = 1.0;
937: } else {
938: kappa = actred / prered;
939: }
940:
941: VecDot(tao->gradient, tao->stepdirection, &gdx);
942: tau_1 = bnk->theta * gdx / (bnk->theta * gdx - (1.0 - bnk->theta) * prered + actred);
943: tau_2 = bnk->theta * gdx / (bnk->theta * gdx + (1.0 + bnk->theta) * prered - actred);
944: tau_min = PetscMin(tau_1, tau_2);
945: tau_max = PetscMax(tau_1, tau_2);
947: if (kappa >= 1.0 - bnk->mu1) {
948: /* Great agreement */
949: *accept = PETSC_TRUE;
950: if (tau_max < 1.0) {
951: tao->trust = PetscMax(tao->trust, bnk->gamma3 * bnk->dnorm);
952: } else if (tau_max > bnk->gamma4) {
953: tao->trust = PetscMax(tao->trust, bnk->gamma4 * bnk->dnorm);
954: } else {
955: tao->trust = PetscMax(tao->trust, tau_max * bnk->dnorm);
956: }
957: } else if (kappa >= 1.0 - bnk->mu2) {
958: /* Good agreement */
959: *accept = PETSC_TRUE;
960: if (tau_max < bnk->gamma2) {
961: tao->trust = bnk->gamma2 * PetscMin(tao->trust, bnk->dnorm);
962: } else if (tau_max > bnk->gamma3) {
963: tao->trust = PetscMax(tao->trust, bnk->gamma3 * bnk->dnorm);
964: } else if (tau_max < 1.0) {
965: tao->trust = tau_max * PetscMin(tao->trust, bnk->dnorm);
966: } else {
967: tao->trust = PetscMax(tao->trust, tau_max * bnk->dnorm);
968: }
969: } else {
970: /* Not good agreement */
971: if (tau_min > 1.0) {
972: tao->trust = bnk->gamma2 * PetscMin(tao->trust, bnk->dnorm);
973: } else if (tau_max < bnk->gamma1) {
974: tao->trust = bnk->gamma1 * PetscMin(tao->trust, bnk->dnorm);
975: } else if ((tau_min < bnk->gamma1) && (tau_max >= 1.0)) {
976: tao->trust = bnk->gamma1 * PetscMin(tao->trust, bnk->dnorm);
977: } else if ((tau_1 >= bnk->gamma1) && (tau_1 < 1.0) && ((tau_2 < bnk->gamma1) || (tau_2 >= 1.0))) {
978: tao->trust = tau_1 * PetscMin(tao->trust, bnk->dnorm);
979: } else if ((tau_2 >= bnk->gamma1) && (tau_2 < 1.0) && ((tau_1 < bnk->gamma1) || (tau_2 >= 1.0))) {
980: tao->trust = tau_2 * PetscMin(tao->trust, bnk->dnorm);
981: } else {
982: tao->trust = tau_max * PetscMin(tao->trust, bnk->dnorm);
983: }
984: }
985: }
986: }
987: } else {
988: /* Newton step was not good; reduce the radius */
989: tao->trust = bnk->gamma1 * PetscMin(bnk->dnorm, tao->trust);
990: }
991: break;
992: }
993: /* Make sure the radius does not violate min and max settings */
994: tao->trust = PetscMin(tao->trust, bnk->max_radius);
995: tao->trust = PetscMax(tao->trust, bnk->min_radius);
996: return(0);
997: }
999: /* ---------------------------------------------------------- */
1001: PetscErrorCode TaoBNKAddStepCounts(Tao tao, PetscInt stepType)
1002: {
1003: TAO_BNK *bnk = (TAO_BNK *)tao->data;
1004:
1006: switch (stepType) {
1007: case BNK_NEWTON:
1008: ++bnk->newt;
1009: break;
1010: case BNK_BFGS:
1011: ++bnk->bfgs;
1012: break;
1013: case BNK_SCALED_GRADIENT:
1014: ++bnk->sgrad;
1015: break;
1016: case BNK_GRADIENT:
1017: ++bnk->grad;
1018: break;
1019: default:
1020: break;
1021: }
1022: return(0);
1023: }
1025: /* ---------------------------------------------------------- */
1027: PetscErrorCode TaoSetUp_BNK(Tao tao)
1028: {
1029: TAO_BNK *bnk = (TAO_BNK *)tao->data;
1031: PetscInt i;
1034: if (!tao->gradient) {
1035: VecDuplicate(tao->solution,&tao->gradient);
1036: }
1037: if (!tao->stepdirection) {
1038: VecDuplicate(tao->solution,&tao->stepdirection);
1039: }
1040: if (!bnk->W) {
1041: VecDuplicate(tao->solution,&bnk->W);
1042: }
1043: if (!bnk->Xold) {
1044: VecDuplicate(tao->solution,&bnk->Xold);
1045: }
1046: if (!bnk->Gold) {
1047: VecDuplicate(tao->solution,&bnk->Gold);
1048: }
1049: if (!bnk->Xwork) {
1050: VecDuplicate(tao->solution,&bnk->Xwork);
1051: }
1052: if (!bnk->Gwork) {
1053: VecDuplicate(tao->solution,&bnk->Gwork);
1054: }
1055: if (!bnk->unprojected_gradient) {
1056: VecDuplicate(tao->solution,&bnk->unprojected_gradient);
1057: }
1058: if (!bnk->unprojected_gradient_old) {
1059: VecDuplicate(tao->solution,&bnk->unprojected_gradient_old);
1060: }
1061: if (!bnk->Diag_min) {
1062: VecDuplicate(tao->solution,&bnk->Diag_min);
1063: }
1064: if (!bnk->Diag_max) {
1065: VecDuplicate(tao->solution,&bnk->Diag_max);
1066: }
1067: if (bnk->max_cg_its > 0) {
1068: /* Ensure that the important common vectors are shared between BNK and embedded BNCG */
1069: bnk->bncg_ctx = (TAO_BNCG *)bnk->bncg->data;
1070: PetscObjectReference((PetscObject)(bnk->unprojected_gradient_old));
1071: VecDestroy(&bnk->bncg_ctx->unprojected_gradient_old);
1072: bnk->bncg_ctx->unprojected_gradient_old = bnk->unprojected_gradient_old;
1073: PetscObjectReference((PetscObject)(bnk->unprojected_gradient));
1074: VecDestroy(&bnk->bncg_ctx->unprojected_gradient);
1075: bnk->bncg_ctx->unprojected_gradient = bnk->unprojected_gradient;
1076: PetscObjectReference((PetscObject)(bnk->Gold));
1077: VecDestroy(&bnk->bncg_ctx->G_old);
1078: bnk->bncg_ctx->G_old = bnk->Gold;
1079: PetscObjectReference((PetscObject)(tao->gradient));
1080: VecDestroy(&bnk->bncg->gradient);
1081: bnk->bncg->gradient = tao->gradient;
1082: PetscObjectReference((PetscObject)(tao->stepdirection));
1083: VecDestroy(&bnk->bncg->stepdirection);
1084: bnk->bncg->stepdirection = tao->stepdirection;
1085: TaoSetInitialVector(bnk->bncg, tao->solution);
1086: /* Copy over some settings from BNK into BNCG */
1087: TaoSetMaximumIterations(bnk->bncg, bnk->max_cg_its);
1088: TaoSetTolerances(bnk->bncg, tao->gatol, tao->grtol, tao->gttol);
1089: TaoSetFunctionLowerBound(bnk->bncg, tao->fmin);
1090: TaoSetConvergenceTest(bnk->bncg, tao->ops->convergencetest, tao->cnvP);
1091: TaoSetObjectiveRoutine(bnk->bncg, tao->ops->computeobjective, tao->user_objP);
1092: TaoSetGradientRoutine(bnk->bncg, tao->ops->computegradient, tao->user_gradP);
1093: TaoSetObjectiveAndGradientRoutine(bnk->bncg, tao->ops->computeobjectiveandgradient, tao->user_objgradP);
1094: PetscObjectCopyFortranFunctionPointers((PetscObject)tao, (PetscObject)(bnk->bncg));
1095: for (i=0; i<tao->numbermonitors; ++i) {
1096: TaoSetMonitor(bnk->bncg, tao->monitor[i], tao->monitorcontext[i], tao->monitordestroy[i]);
1097: PetscObjectReference((PetscObject)(tao->monitorcontext[i]));
1098: }
1099: }
1100: bnk->X_inactive = 0;
1101: bnk->G_inactive = 0;
1102: bnk->inactive_work = 0;
1103: bnk->active_work = 0;
1104: bnk->inactive_idx = 0;
1105: bnk->active_idx = 0;
1106: bnk->active_lower = 0;
1107: bnk->active_upper = 0;
1108: bnk->active_fixed = 0;
1109: bnk->M = 0;
1110: bnk->H_inactive = 0;
1111: bnk->Hpre_inactive = 0;
1112: return(0);
1113: }
1115: /*------------------------------------------------------------*/
1117: PetscErrorCode TaoDestroy_BNK(Tao tao)
1118: {
1119: TAO_BNK *bnk = (TAO_BNK *)tao->data;
1123: if (tao->setupcalled) {
1124: VecDestroy(&bnk->W);
1125: VecDestroy(&bnk->Xold);
1126: VecDestroy(&bnk->Gold);
1127: VecDestroy(&bnk->Xwork);
1128: VecDestroy(&bnk->Gwork);
1129: VecDestroy(&bnk->unprojected_gradient);
1130: VecDestroy(&bnk->unprojected_gradient_old);
1131: VecDestroy(&bnk->Diag_min);
1132: VecDestroy(&bnk->Diag_max);
1133: }
1134: ISDestroy(&bnk->active_lower);
1135: ISDestroy(&bnk->active_upper);
1136: ISDestroy(&bnk->active_fixed);
1137: ISDestroy(&bnk->active_idx);
1138: ISDestroy(&bnk->inactive_idx);
1139: MatDestroy(&bnk->Hpre_inactive);
1140: MatDestroy(&bnk->H_inactive);
1141: TaoDestroy(&bnk->bncg);
1142: PetscFree(tao->data);
1143: return(0);
1144: }
1146: /*------------------------------------------------------------*/
1148: PetscErrorCode TaoSetFromOptions_BNK(PetscOptionItems *PetscOptionsObject,Tao tao)
1149: {
1150: TAO_BNK *bnk = (TAO_BNK *)tao->data;
1152: KSPType ksp_type;
1155: PetscOptionsHead(PetscOptionsObject,"Newton-Krylov method for bound constrained optimization");
1156: PetscOptionsEList("-tao_bnk_init_type", "radius initialization type", "", BNK_INIT, BNK_INIT_TYPES, BNK_INIT[bnk->init_type], &bnk->init_type, 0);
1157: PetscOptionsEList("-tao_bnk_update_type", "radius update type", "", BNK_UPDATE, BNK_UPDATE_TYPES, BNK_UPDATE[bnk->update_type], &bnk->update_type, 0);
1158: PetscOptionsEList("-tao_bnk_as_type", "active set estimation method", "", BNK_AS, BNK_AS_TYPES, BNK_AS[bnk->as_type], &bnk->as_type, 0);
1159: PetscOptionsReal("-tao_bnk_sval", "(developer) Hessian perturbation starting value", "", bnk->sval, &bnk->sval,NULL);
1160: PetscOptionsReal("-tao_bnk_imin", "(developer) minimum initial Hessian perturbation", "", bnk->imin, &bnk->imin,NULL);
1161: PetscOptionsReal("-tao_bnk_imax", "(developer) maximum initial Hessian perturbation", "", bnk->imax, &bnk->imax,NULL);
1162: PetscOptionsReal("-tao_bnk_imfac", "(developer) initial merit factor for Hessian perturbation", "", bnk->imfac, &bnk->imfac,NULL);
1163: PetscOptionsReal("-tao_bnk_pmin", "(developer) minimum Hessian perturbation", "", bnk->pmin, &bnk->pmin,NULL);
1164: PetscOptionsReal("-tao_bnk_pmax", "(developer) maximum Hessian perturbation", "", bnk->pmax, &bnk->pmax,NULL);
1165: PetscOptionsReal("-tao_bnk_pgfac", "(developer) Hessian perturbation growth factor", "", bnk->pgfac, &bnk->pgfac,NULL);
1166: PetscOptionsReal("-tao_bnk_psfac", "(developer) Hessian perturbation shrink factor", "", bnk->psfac, &bnk->psfac,NULL);
1167: PetscOptionsReal("-tao_bnk_pmgfac", "(developer) merit growth factor for Hessian perturbation", "", bnk->pmgfac, &bnk->pmgfac,NULL);
1168: PetscOptionsReal("-tao_bnk_pmsfac", "(developer) merit shrink factor for Hessian perturbation", "", bnk->pmsfac, &bnk->pmsfac,NULL);
1169: PetscOptionsReal("-tao_bnk_eta1", "(developer) threshold for rejecting step (-tao_bnk_update_type reduction)", "", bnk->eta1, &bnk->eta1,NULL);
1170: PetscOptionsReal("-tao_bnk_eta2", "(developer) threshold for accepting marginal step (-tao_bnk_update_type reduction)", "", bnk->eta2, &bnk->eta2,NULL);
1171: PetscOptionsReal("-tao_bnk_eta3", "(developer) threshold for accepting reasonable step (-tao_bnk_update_type reduction)", "", bnk->eta3, &bnk->eta3,NULL);
1172: PetscOptionsReal("-tao_bnk_eta4", "(developer) threshold for accepting good step (-tao_bnk_update_type reduction)", "", bnk->eta4, &bnk->eta4,NULL);
1173: PetscOptionsReal("-tao_bnk_alpha1", "(developer) radius reduction factor for rejected step (-tao_bnk_update_type reduction)", "", bnk->alpha1, &bnk->alpha1,NULL);
1174: PetscOptionsReal("-tao_bnk_alpha2", "(developer) radius reduction factor for marginally accepted bad step (-tao_bnk_update_type reduction)", "", bnk->alpha2, &bnk->alpha2,NULL);
1175: PetscOptionsReal("-tao_bnk_alpha3", "(developer) radius increase factor for reasonable accepted step (-tao_bnk_update_type reduction)", "", bnk->alpha3, &bnk->alpha3,NULL);
1176: PetscOptionsReal("-tao_bnk_alpha4", "(developer) radius increase factor for good accepted step (-tao_bnk_update_type reduction)", "", bnk->alpha4, &bnk->alpha4,NULL);
1177: PetscOptionsReal("-tao_bnk_alpha5", "(developer) radius increase factor for very good accepted step (-tao_bnk_update_type reduction)", "", bnk->alpha5, &bnk->alpha5,NULL);
1178: PetscOptionsReal("-tao_bnk_nu1", "(developer) threshold for small line-search step length (-tao_bnk_update_type step)", "", bnk->nu1, &bnk->nu1,NULL);
1179: PetscOptionsReal("-tao_bnk_nu2", "(developer) threshold for reasonable line-search step length (-tao_bnk_update_type step)", "", bnk->nu2, &bnk->nu2,NULL);
1180: PetscOptionsReal("-tao_bnk_nu3", "(developer) threshold for large line-search step length (-tao_bnk_update_type step)", "", bnk->nu3, &bnk->nu3,NULL);
1181: PetscOptionsReal("-tao_bnk_nu4", "(developer) threshold for very large line-search step length (-tao_bnk_update_type step)", "", bnk->nu4, &bnk->nu4,NULL);
1182: PetscOptionsReal("-tao_bnk_omega1", "(developer) radius reduction factor for very small line-search step length (-tao_bnk_update_type step)", "", bnk->omega1, &bnk->omega1,NULL);
1183: PetscOptionsReal("-tao_bnk_omega2", "(developer) radius reduction factor for small line-search step length (-tao_bnk_update_type step)", "", bnk->omega2, &bnk->omega2,NULL);
1184: PetscOptionsReal("-tao_bnk_omega3", "(developer) radius factor for decent line-search step length (-tao_bnk_update_type step)", "", bnk->omega3, &bnk->omega3,NULL);
1185: PetscOptionsReal("-tao_bnk_omega4", "(developer) radius increase factor for large line-search step length (-tao_bnk_update_type step)", "", bnk->omega4, &bnk->omega4,NULL);
1186: PetscOptionsReal("-tao_bnk_omega5", "(developer) radius increase factor for very large line-search step length (-tao_bnk_update_type step)", "", bnk->omega5, &bnk->omega5,NULL);
1187: PetscOptionsReal("-tao_bnk_mu1_i", "(developer) threshold for accepting very good step (-tao_bnk_init_type interpolation)", "", bnk->mu1_i, &bnk->mu1_i,NULL);
1188: PetscOptionsReal("-tao_bnk_mu2_i", "(developer) threshold for accepting good step (-tao_bnk_init_type interpolation)", "", bnk->mu2_i, &bnk->mu2_i,NULL);
1189: PetscOptionsReal("-tao_bnk_gamma1_i", "(developer) radius reduction factor for rejected very bad step (-tao_bnk_init_type interpolation)", "", bnk->gamma1_i, &bnk->gamma1_i,NULL);
1190: PetscOptionsReal("-tao_bnk_gamma2_i", "(developer) radius reduction factor for rejected bad step (-tao_bnk_init_type interpolation)", "", bnk->gamma2_i, &bnk->gamma2_i,NULL);
1191: PetscOptionsReal("-tao_bnk_gamma3_i", "(developer) radius increase factor for accepted good step (-tao_bnk_init_type interpolation)", "", bnk->gamma3_i, &bnk->gamma3_i,NULL);
1192: PetscOptionsReal("-tao_bnk_gamma4_i", "(developer) radius increase factor for accepted very good step (-tao_bnk_init_type interpolation)", "", bnk->gamma4_i, &bnk->gamma4_i,NULL);
1193: PetscOptionsReal("-tao_bnk_theta_i", "(developer) trust region interpolation factor (-tao_bnk_init_type interpolation)", "", bnk->theta_i, &bnk->theta_i,NULL);
1194: PetscOptionsReal("-tao_bnk_mu1", "(developer) threshold for accepting very good step (-tao_bnk_update_type interpolation)", "", bnk->mu1, &bnk->mu1,NULL);
1195: PetscOptionsReal("-tao_bnk_mu2", "(developer) threshold for accepting good step (-tao_bnk_update_type interpolation)", "", bnk->mu2, &bnk->mu2,NULL);
1196: PetscOptionsReal("-tao_bnk_gamma1", "(developer) radius reduction factor for rejected very bad step (-tao_bnk_update_type interpolation)", "", bnk->gamma1, &bnk->gamma1,NULL);
1197: PetscOptionsReal("-tao_bnk_gamma2", "(developer) radius reduction factor for rejected bad step (-tao_bnk_update_type interpolation)", "", bnk->gamma2, &bnk->gamma2,NULL);
1198: PetscOptionsReal("-tao_bnk_gamma3", "(developer) radius increase factor for accepted good step (-tao_bnk_update_type interpolation)", "", bnk->gamma3, &bnk->gamma3,NULL);
1199: PetscOptionsReal("-tao_bnk_gamma4", "(developer) radius increase factor for accepted very good step (-tao_bnk_update_type interpolation)", "", bnk->gamma4, &bnk->gamma4,NULL);
1200: PetscOptionsReal("-tao_bnk_theta", "(developer) trust region interpolation factor (-tao_bnk_update_type interpolation)", "", bnk->theta, &bnk->theta,NULL);
1201: PetscOptionsReal("-tao_bnk_min_radius", "(developer) lower bound on initial radius", "", bnk->min_radius, &bnk->min_radius,NULL);
1202: PetscOptionsReal("-tao_bnk_max_radius", "(developer) upper bound on radius", "", bnk->max_radius, &bnk->max_radius,NULL);
1203: PetscOptionsReal("-tao_bnk_epsilon", "(developer) tolerance used when computing actual and predicted reduction", "", bnk->epsilon, &bnk->epsilon,NULL);
1204: PetscOptionsReal("-tao_bnk_as_tol", "(developer) initial tolerance used when estimating actively bounded variables", "", bnk->as_tol, &bnk->as_tol,NULL);
1205: PetscOptionsReal("-tao_bnk_as_step", "(developer) step length used when estimating actively bounded variables", "", bnk->as_step, &bnk->as_step,NULL);
1206: PetscOptionsInt("-tao_bnk_max_cg_its", "number of BNCG iterations to take for each Newton step", "", bnk->max_cg_its, &bnk->max_cg_its,NULL);
1207: PetscOptionsTail();
1208: TaoSetFromOptions(bnk->bncg);
1209: TaoLineSearchSetFromOptions(tao->linesearch);
1210: KSPSetFromOptions(tao->ksp);
1211: KSPGetType(tao->ksp,&ksp_type);
1212: PetscStrcmp(ksp_type,KSPNASH,&bnk->is_nash);
1213: PetscStrcmp(ksp_type,KSPSTCG,&bnk->is_stcg);
1214: PetscStrcmp(ksp_type,KSPGLTR,&bnk->is_gltr);
1215: return(0);
1216: }
1218: /*------------------------------------------------------------*/
1220: PetscErrorCode TaoView_BNK(Tao tao, PetscViewer viewer)
1221: {
1222: TAO_BNK *bnk = (TAO_BNK *)tao->data;
1223: PetscInt nrejects;
1224: PetscBool isascii;
1228: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
1229: if (isascii) {
1230: PetscViewerASCIIPushTab(viewer);
1231: if (bnk->M) {
1232: MatLMVMGetRejectCount(bnk->M,&nrejects);
1233: PetscViewerASCIIPrintf(viewer, "Rejected BFGS updates: %D\n",nrejects);
1234: }
1235: PetscViewerASCIIPrintf(viewer, "CG steps: %D\n", bnk->tot_cg_its);
1236: PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", bnk->newt);
1237: if (bnk->M) {
1238: PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", bnk->bfgs);
1239: }
1240: PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", bnk->sgrad);
1241: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", bnk->grad);
1242: PetscViewerASCIIPrintf(viewer, "KSP termination reasons:\n");
1243: PetscViewerASCIIPrintf(viewer, " atol: %D\n", bnk->ksp_atol);
1244: PetscViewerASCIIPrintf(viewer, " rtol: %D\n", bnk->ksp_rtol);
1245: PetscViewerASCIIPrintf(viewer, " ctol: %D\n", bnk->ksp_ctol);
1246: PetscViewerASCIIPrintf(viewer, " negc: %D\n", bnk->ksp_negc);
1247: PetscViewerASCIIPrintf(viewer, " dtol: %D\n", bnk->ksp_dtol);
1248: PetscViewerASCIIPrintf(viewer, " iter: %D\n", bnk->ksp_iter);
1249: PetscViewerASCIIPrintf(viewer, " othr: %D\n", bnk->ksp_othr);
1250: PetscViewerASCIIPopTab(viewer);
1251: }
1252: return(0);
1253: }
1255: /* ---------------------------------------------------------- */
1257: /*MC
1258: TAOBNK - Shared base-type for Bounded Newton-Krylov type algorithms.
1259: At each iteration, the BNK methods solve the symmetric
1260: system of equations to obtain the step diretion dk:
1261: Hk dk = -gk
1262: for free variables only. The step can be globalized either through
1263: trust-region methods, or a line search, or a heuristic mixture of both.
1265: Options Database Keys:
1266: + -max_cg_its - maximum number of bounded conjugate-gradient iterations taken in each Newton loop
1267: . -init_type - trust radius initialization method ("constant", "direction", "interpolation")
1268: . -update_type - trust radius update method ("step", "direction", "interpolation")
1269: . -as_type - active-set estimation method ("none", "bertsekas")
1270: . -as_tol - (developer) initial tolerance used in estimating bounded active variables (-as_type bertsekas)
1271: . -as_step - (developer) trial step length used in estimating bounded active variables (-as_type bertsekas)
1272: . -sval - (developer) Hessian perturbation starting value
1273: . -imin - (developer) minimum initial Hessian perturbation
1274: . -imax - (developer) maximum initial Hessian perturbation
1275: . -pmin - (developer) minimum Hessian perturbation
1276: . -pmax - (developer) aximum Hessian perturbation
1277: . -pgfac - (developer) Hessian perturbation growth factor
1278: . -psfac - (developer) Hessian perturbation shrink factor
1279: . -imfac - (developer) initial merit factor for Hessian perturbation
1280: . -pmgfac - (developer) merit growth factor for Hessian perturbation
1281: . -pmsfac - (developer) merit shrink factor for Hessian perturbation
1282: . -eta1 - (developer) threshold for rejecting step (-update_type reduction)
1283: . -eta2 - (developer) threshold for accepting marginal step (-update_type reduction)
1284: . -eta3 - (developer) threshold for accepting reasonable step (-update_type reduction)
1285: . -eta4 - (developer) threshold for accepting good step (-update_type reduction)
1286: . -alpha1 - (developer) radius reduction factor for rejected step (-update_type reduction)
1287: . -alpha2 - (developer) radius reduction factor for marginally accepted bad step (-update_type reduction)
1288: . -alpha3 - (developer) radius increase factor for reasonable accepted step (-update_type reduction)
1289: . -alpha4 - (developer) radius increase factor for good accepted step (-update_type reduction)
1290: . -alpha5 - (developer) radius increase factor for very good accepted step (-update_type reduction)
1291: . -epsilon - (developer) tolerance for small pred/actual ratios that trigger automatic step acceptance (-update_type reduction)
1292: . -mu1 - (developer) threshold for accepting very good step (-update_type interpolation)
1293: . -mu2 - (developer) threshold for accepting good step (-update_type interpolation)
1294: . -gamma1 - (developer) radius reduction factor for rejected very bad step (-update_type interpolation)
1295: . -gamma2 - (developer) radius reduction factor for rejected bad step (-update_type interpolation)
1296: . -gamma3 - (developer) radius increase factor for accepted good step (-update_type interpolation)
1297: . -gamma4 - (developer) radius increase factor for accepted very good step (-update_type interpolation)
1298: . -theta - (developer) trust region interpolation factor (-update_type interpolation)
1299: . -nu1 - (developer) threshold for small line-search step length (-update_type step)
1300: . -nu2 - (developer) threshold for reasonable line-search step length (-update_type step)
1301: . -nu3 - (developer) threshold for large line-search step length (-update_type step)
1302: . -nu4 - (developer) threshold for very large line-search step length (-update_type step)
1303: . -omega1 - (developer) radius reduction factor for very small line-search step length (-update_type step)
1304: . -omega2 - (developer) radius reduction factor for small line-search step length (-update_type step)
1305: . -omega3 - (developer) radius factor for decent line-search step length (-update_type step)
1306: . -omega4 - (developer) radius increase factor for large line-search step length (-update_type step)
1307: . -omega5 - (developer) radius increase factor for very large line-search step length (-update_type step)
1308: . -mu1_i - (developer) threshold for accepting very good step (-init_type interpolation)
1309: . -mu2_i - (developer) threshold for accepting good step (-init_type interpolation)
1310: . -gamma1_i - (developer) radius reduction factor for rejected very bad step (-init_type interpolation)
1311: . -gamma2_i - (developer) radius reduction factor for rejected bad step (-init_type interpolation)
1312: . -gamma3_i - (developer) radius increase factor for accepted good step (-init_type interpolation)
1313: . -gamma4_i - (developer) radius increase factor for accepted very good step (-init_type interpolation)
1314: - -theta_i - (developer) trust region interpolation factor (-init_type interpolation)
1316: Level: beginner
1317: M*/
1319: PetscErrorCode TaoCreate_BNK(Tao tao)
1320: {
1321: TAO_BNK *bnk;
1322: const char *morethuente_type = TAOLINESEARCHMT;
1324: PC pc;
1327: PetscNewLog(tao,&bnk);
1329: tao->ops->setup = TaoSetUp_BNK;
1330: tao->ops->view = TaoView_BNK;
1331: tao->ops->setfromoptions = TaoSetFromOptions_BNK;
1332: tao->ops->destroy = TaoDestroy_BNK;
1334: /* Override default settings (unless already changed) */
1335: if (!tao->max_it_changed) tao->max_it = 50;
1336: if (!tao->trust0_changed) tao->trust0 = 100.0;
1338: tao->data = (void*)bnk;
1339:
1340: /* Hessian shifting parameters */
1341: bnk->computehessian = TaoBNKComputeHessian;
1342: bnk->computestep = TaoBNKComputeStep;
1343:
1344: bnk->sval = 0.0;
1345: bnk->imin = 1.0e-4;
1346: bnk->imax = 1.0e+2;
1347: bnk->imfac = 1.0e-1;
1349: bnk->pmin = 1.0e-12;
1350: bnk->pmax = 1.0e+2;
1351: bnk->pgfac = 1.0e+1;
1352: bnk->psfac = 4.0e-1;
1353: bnk->pmgfac = 1.0e-1;
1354: bnk->pmsfac = 1.0e-1;
1356: /* Default values for trust-region radius update based on steplength */
1357: bnk->nu1 = 0.25;
1358: bnk->nu2 = 0.50;
1359: bnk->nu3 = 1.00;
1360: bnk->nu4 = 1.25;
1362: bnk->omega1 = 0.25;
1363: bnk->omega2 = 0.50;
1364: bnk->omega3 = 1.00;
1365: bnk->omega4 = 2.00;
1366: bnk->omega5 = 4.00;
1368: /* Default values for trust-region radius update based on reduction */
1369: bnk->eta1 = 1.0e-4;
1370: bnk->eta2 = 0.25;
1371: bnk->eta3 = 0.50;
1372: bnk->eta4 = 0.90;
1374: bnk->alpha1 = 0.25;
1375: bnk->alpha2 = 0.50;
1376: bnk->alpha3 = 1.00;
1377: bnk->alpha4 = 2.00;
1378: bnk->alpha5 = 4.00;
1380: /* Default values for trust-region radius update based on interpolation */
1381: bnk->mu1 = 0.10;
1382: bnk->mu2 = 0.50;
1384: bnk->gamma1 = 0.25;
1385: bnk->gamma2 = 0.50;
1386: bnk->gamma3 = 2.00;
1387: bnk->gamma4 = 4.00;
1389: bnk->theta = 0.05;
1391: /* Default values for trust region initialization based on interpolation */
1392: bnk->mu1_i = 0.35;
1393: bnk->mu2_i = 0.50;
1395: bnk->gamma1_i = 0.0625;
1396: bnk->gamma2_i = 0.5;
1397: bnk->gamma3_i = 2.0;
1398: bnk->gamma4_i = 5.0;
1400: bnk->theta_i = 0.25;
1402: /* Remaining parameters */
1403: bnk->max_cg_its = 0;
1404: bnk->min_radius = 1.0e-10;
1405: bnk->max_radius = 1.0e10;
1406: bnk->epsilon = PetscPowReal(PETSC_MACHINE_EPSILON, 2.0/3.0);
1407: bnk->as_tol = 1.0e-3;
1408: bnk->as_step = 1.0e-3;
1409: bnk->dmin = 1.0e-6;
1410: bnk->dmax = 1.0e6;
1411:
1412: bnk->M = 0;
1413: bnk->bfgs_pre = 0;
1414: bnk->init_type = BNK_INIT_INTERPOLATION;
1415: bnk->update_type = BNK_UPDATE_REDUCTION;
1416: bnk->as_type = BNK_AS_BERTSEKAS;
1417:
1418: /* Create the embedded BNCG solver */
1419: TaoCreate(PetscObjectComm((PetscObject)tao), &bnk->bncg);
1420: PetscObjectIncrementTabLevel((PetscObject)bnk->bncg, (PetscObject)tao, 1);
1421: TaoSetOptionsPrefix(bnk->bncg, "tao_bnk_");
1422: TaoSetType(bnk->bncg, TAOBNCG);
1424: /* Create the line search */
1425: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
1426: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
1427: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
1428: TaoLineSearchSetType(tao->linesearch,morethuente_type);
1429: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
1431: /* Set linear solver to default for symmetric matrices */
1432: KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
1433: PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1);
1434: KSPSetOptionsPrefix(tao->ksp,"tao_bnk_");
1435: KSPSetType(tao->ksp,KSPSTCG);
1436: KSPGetPC(tao->ksp, &pc);
1437: PCSetType(pc, PCLMVM);
1438: return(0);
1439: }