Actual source code: ntl.c

petsc-3.6.4 2016-04-12
Report Typos and Errors
  1: #include <../src/tao/matrix/lmvmmat.h>
  2: #include <../src/tao/unconstrained/impls/ntl/ntl.h>

  4: #include <petscksp.h>
  5: #include <petscpc.h>
  6: #include <petsc/private/kspimpl.h>
  7: #include <petsc/private/pcimpl.h>

  9: #define NTL_KSP_NASH    0
 10: #define NTL_KSP_STCG    1
 11: #define NTL_KSP_GLTR    2
 12: #define NTL_KSP_TYPES   3

 14: #define NTL_PC_NONE     0
 15: #define NTL_PC_AHESS    1
 16: #define NTL_PC_BFGS     2
 17: #define NTL_PC_PETSC    3
 18: #define NTL_PC_TYPES    4

 20: #define BFGS_SCALE_AHESS        0
 21: #define BFGS_SCALE_BFGS         1
 22: #define BFGS_SCALE_TYPES        2

 24: #define NTL_INIT_CONSTANT         0
 25: #define NTL_INIT_DIRECTION        1
 26: #define NTL_INIT_INTERPOLATION    2
 27: #define NTL_INIT_TYPES            3

 29: #define NTL_UPDATE_REDUCTION      0
 30: #define NTL_UPDATE_INTERPOLATION  1
 31: #define NTL_UPDATE_TYPES          2

 33: static const char *NTL_KSP[64] = {"nash", "stcg", "gltr"};

 35: static const char *NTL_PC[64] = {"none", "ahess", "bfgs", "petsc"};

 37: static const char *BFGS_SCALE[64] = {"ahess", "bfgs"};

 39: static const char *NTL_INIT[64] = {"constant", "direction", "interpolation"};

 41: static const char *NTL_UPDATE[64] = {"reduction", "interpolation"};

 43: /* Routine for BFGS preconditioner */

 47: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
 48: {
 50:   Mat            M;

 56:   PCShellGetContext(pc,(void**)&M);
 57:   MatLMVMSolve(M, b, x);
 58:   return(0);
 59: }

 61: /* Implements Newton's Method with a trust-region, line-search approach for
 62:    solving unconstrained minimization problems.  A More'-Thuente line search
 63:    is used to guarantee that the bfgs preconditioner remains positive
 64:    definite. */

 66: #define NTL_NEWTON              0
 67: #define NTL_BFGS                1
 68: #define NTL_SCALED_GRADIENT     2
 69: #define NTL_GRADIENT            3

 73: static PetscErrorCode TaoSolve_NTL(Tao tao)
 74: {
 75:   TAO_NTL                      *tl = (TAO_NTL *)tao->data;
 76:   PC                           pc;
 77:   KSPConvergedReason           ksp_reason;
 78:   TaoConvergedReason           reason;
 79:   TaoLineSearchConvergedReason ls_reason;

 81:   PetscReal                    fmin, ftrial, prered, actred, kappa, sigma;
 82:   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 83:   PetscReal                    f, fold, gdx, gnorm;
 84:   PetscReal                    step = 1.0;

 86:   PetscReal                    delta;
 87:   PetscReal                    norm_d = 0.0;
 88:   PetscErrorCode               ierr;
 89:   PetscInt                     stepType;
 90:   PetscInt                     its;

 92:   PetscInt                     bfgsUpdates = 0;
 93:   PetscInt                     needH;

 95:   PetscInt                     i_max = 5;
 96:   PetscInt                     j_max = 1;
 97:   PetscInt                     i, j, n, N;

 99:   PetscInt                     tr_reject;

102:   if (tao->XL || tao->XU || tao->ops->computebounds) {
103:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntl algorithm\n");
104:   }

106:   /* Initialize trust-region radius */
107:   tao->trust = tao->trust0;

109:   /* Modify the radius if it is too large or small */
110:   tao->trust = PetscMax(tao->trust, tl->min_radius);
111:   tao->trust = PetscMin(tao->trust, tl->max_radius);

113:   if (NTL_PC_BFGS == tl->pc_type && !tl->M) {
114:     VecGetLocalSize(tao->solution,&n);
115:     VecGetSize(tao->solution,&N);
116:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tl->M);
117:     MatLMVMAllocateVectors(tl->M,tao->solution);
118:   }

120:   /* Check convergence criteria */
121:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
122:   VecNorm(tao->gradient, NORM_2, &gnorm);
123:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
124:   needH = 1;

126:   TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
127:   if (reason != TAO_CONTINUE_ITERATING) return(0);

129:   /* Create vectors for the limited memory preconditioner */
130:   if ((NTL_PC_BFGS == tl->pc_type) && (BFGS_SCALE_BFGS != tl->bfgs_scale_type)) {
131:     if (!tl->Diag) {
132:       VecDuplicate(tao->solution, &tl->Diag);
133:     }
134:   }

136:   /* Modify the linear solver to a conjugate gradient method */
137:   switch(tl->ksp_type) {
138:   case NTL_KSP_NASH:
139:     KSPSetType(tao->ksp, KSPNASH);
140:     KSPSetFromOptions(tao->ksp);
141:     break;

143:   case NTL_KSP_STCG:
144:     KSPSetType(tao->ksp, KSPSTCG);
145:     KSPSetFromOptions(tao->ksp);
146:     break;

148:   default:
149:     KSPSetType(tao->ksp, KSPGLTR);
150:     KSPSetFromOptions(tao->ksp);
151:     break;
152:   }

154:   /* Modify the preconditioner to use the bfgs approximation */
155:   KSPGetPC(tao->ksp, &pc);
156:   switch(tl->pc_type) {
157:   case NTL_PC_NONE:
158:     PCSetType(pc, PCNONE);
159:     PCSetFromOptions(pc);
160:     break;

162:   case NTL_PC_AHESS:
163:     PCSetType(pc, PCJACOBI);
164:     PCSetFromOptions(pc);
165:     PCJacobiSetUseAbs(pc,PETSC_TRUE);
166:     break;

168:   case NTL_PC_BFGS:
169:     PCSetType(pc, PCSHELL);
170:     PCSetFromOptions(pc);
171:     PCShellSetName(pc, "bfgs");
172:     PCShellSetContext(pc, tl->M);
173:     PCShellSetApply(pc, MatLMVMSolveShell);
174:     break;

176:   default:
177:     /* Use the pc method set by pc_type */
178:     break;
179:   }

181:   /* Initialize trust-region radius.  The initialization is only performed
182:      when we are using Steihaug-Toint or the Generalized Lanczos method. */
183:   switch(tl->init_type) {
184:   case NTL_INIT_CONSTANT:
185:     /* Use the initial radius specified */
186:     break;

188:   case NTL_INIT_INTERPOLATION:
189:     /* Use the initial radius specified */
190:     max_radius = 0.0;

192:     for (j = 0; j < j_max; ++j) {
193:       fmin = f;
194:       sigma = 0.0;

196:       if (needH) {
197:         TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
198:         needH = 0;
199:       }

201:       for (i = 0; i < i_max; ++i) {
202:         VecCopy(tao->solution, tl->W);
203:         VecAXPY(tl->W, -tao->trust/gnorm, tao->gradient);

205:         TaoComputeObjective(tao, tl->W, &ftrial);
206:         if (PetscIsInfOrNanReal(ftrial)) {
207:           tau = tl->gamma1_i;
208:         } else {
209:           if (ftrial < fmin) {
210:             fmin = ftrial;
211:             sigma = -tao->trust / gnorm;
212:           }

214:           MatMult(tao->hessian, tao->gradient, tao->stepdirection);
215:           VecDot(tao->gradient, tao->stepdirection, &prered);

217:           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
218:           actred = f - ftrial;
219:           if ((PetscAbsScalar(actred) <= tl->epsilon) && (PetscAbsScalar(prered) <= tl->epsilon)) {
220:             kappa = 1.0;
221:           } else {
222:             kappa = actred / prered;
223:           }

225:           tau_1 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust + (1.0 - tl->theta_i) * prered - actred);
226:           tau_2 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust - (1.0 + tl->theta_i) * prered + actred);
227:           tau_min = PetscMin(tau_1, tau_2);
228:           tau_max = PetscMax(tau_1, tau_2);

230:           if (PetscAbsScalar(kappa - 1.0) <= tl->mu1_i) {
231:             /* Great agreement */
232:             max_radius = PetscMax(max_radius, tao->trust);

234:             if (tau_max < 1.0) {
235:               tau = tl->gamma3_i;
236:             } else if (tau_max > tl->gamma4_i) {
237:               tau = tl->gamma4_i;
238:             } else if (tau_1 >= 1.0 && tau_1 <= tl->gamma4_i && tau_2 < 1.0) {
239:               tau = tau_1;
240:             } else if (tau_2 >= 1.0 && tau_2 <= tl->gamma4_i && tau_1 < 1.0) {
241:               tau = tau_2;
242:             } else {
243:               tau = tau_max;
244:             }
245:           } else if (PetscAbsScalar(kappa - 1.0) <= tl->mu2_i) {
246:             /* Good agreement */
247:             max_radius = PetscMax(max_radius, tao->trust);

249:             if (tau_max < tl->gamma2_i) {
250:               tau = tl->gamma2_i;
251:             } else if (tau_max > tl->gamma3_i) {
252:               tau = tl->gamma3_i;
253:             } else {
254:               tau = tau_max;
255:             }
256:           } else {
257:             /* Not good agreement */
258:             if (tau_min > 1.0) {
259:               tau = tl->gamma2_i;
260:             } else if (tau_max < tl->gamma1_i) {
261:               tau = tl->gamma1_i;
262:             } else if ((tau_min < tl->gamma1_i) && (tau_max >= 1.0)) {
263:               tau = tl->gamma1_i;
264:             } else if ((tau_1 >= tl->gamma1_i) && (tau_1 < 1.0) &&  ((tau_2 < tl->gamma1_i) || (tau_2 >= 1.0))) {
265:               tau = tau_1;
266:             } else if ((tau_2 >= tl->gamma1_i) && (tau_2 < 1.0) &&  ((tau_1 < tl->gamma1_i) || (tau_2 >= 1.0))) {
267:               tau = tau_2;
268:             } else {
269:               tau = tau_max;
270:             }
271:           }
272:         }
273:         tao->trust = tau * tao->trust;
274:       }

276:       if (fmin < f) {
277:         f = fmin;
278:         VecAXPY(tao->solution, sigma, tao->gradient);
279:         TaoComputeGradient(tao, tao->solution, tao->gradient);

281:         VecNorm(tao->gradient, NORM_2, &gnorm);
282:         if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
283:         needH = 1;

285:         TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
286:         if (reason != TAO_CONTINUE_ITERATING) return(0);
287:       }
288:     }
289:     tao->trust = PetscMax(tao->trust, max_radius);

291:     /* Modify the radius if it is too large or small */
292:     tao->trust = PetscMax(tao->trust, tl->min_radius);
293:     tao->trust = PetscMin(tao->trust, tl->max_radius);
294:     break;

296:   default:
297:     /* Norm of the first direction will initialize radius */
298:     tao->trust = 0.0;
299:     break;
300:   }

302:   /* Set initial scaling for the BFGS preconditioner
303:      This step is done after computing the initial trust-region radius
304:      since the function value may have decreased */
305:   if (NTL_PC_BFGS == tl->pc_type) {
306:     if (f != 0.0) {
307:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
308:     } else {
309:       delta = 2.0 / (gnorm*gnorm);
310:     }
311:     MatLMVMSetDelta(tl->M, delta);
312:   }

314:   /* Set counter for gradient/reset steps */
315:   tl->ntrust = 0;
316:   tl->newt = 0;
317:   tl->bfgs = 0;
318:   tl->sgrad = 0;
319:   tl->grad = 0;

321:   /* Have not converged; continue with Newton method */
322:   while (reason == TAO_CONTINUE_ITERATING) {
323:     ++tao->niter;
324:     tao->ksp_its=0;
325:     /* Compute the Hessian */
326:     if (needH) {
327:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
328:       needH = 0;
329:     }

331:     if (NTL_PC_BFGS == tl->pc_type) {
332:       if (BFGS_SCALE_AHESS == tl->bfgs_scale_type) {
333:         /* Obtain diagonal for the bfgs preconditioner */
334:         MatGetDiagonal(tao->hessian, tl->Diag);
335:         VecAbs(tl->Diag);
336:         VecReciprocal(tl->Diag);
337:         MatLMVMSetScale(tl->M, tl->Diag);
338:       }

340:       /* Update the limited memory preconditioner */
341:       MatLMVMUpdate(tl->M,tao->solution, tao->gradient);
342:       ++bfgsUpdates;
343:     }
344:     KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
345:     /* Solve the Newton system of equations */
346:     if (NTL_KSP_NASH == tl->ksp_type) {
347:       KSPNASHSetRadius(tao->ksp,tl->max_radius);
348:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
349:       KSPGetIterationNumber(tao->ksp,&its);
350:       tao->ksp_its+=its;
351:       tao->ksp_tot_its+=its;
352:       KSPNASHGetNormD(tao->ksp, &norm_d);
353:     } else if (NTL_KSP_STCG == tl->ksp_type) {
354:       KSPSTCGSetRadius(tao->ksp,tl->max_radius);
355:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
356:       KSPGetIterationNumber(tao->ksp,&its);
357:       tao->ksp_its+=its;
358:       tao->ksp_tot_its+=its;
359:       KSPSTCGGetNormD(tao->ksp, &norm_d);
360:     } else { /* NTL_KSP_GLTR */
361:       KSPGLTRSetRadius(tao->ksp,tl->max_radius);
362:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
363:       KSPGetIterationNumber(tao->ksp,&its);
364:       tao->ksp_its+=its;
365:       tao->ksp_tot_its+=its;
366:       KSPGLTRGetNormD(tao->ksp, &norm_d);
367:     }

369:     if (0.0 == tao->trust) {
370:       /* Radius was uninitialized; use the norm of the direction */
371:       if (norm_d > 0.0) {
372:         tao->trust = norm_d;

374:         /* Modify the radius if it is too large or small */
375:         tao->trust = PetscMax(tao->trust, tl->min_radius);
376:         tao->trust = PetscMin(tao->trust, tl->max_radius);
377:       } else {
378:         /* The direction was bad; set radius to default value and re-solve
379:            the trust-region subproblem to get a direction */
380:         tao->trust = tao->trust0;

382:         /* Modify the radius if it is too large or small */
383:         tao->trust = PetscMax(tao->trust, tl->min_radius);
384:         tao->trust = PetscMin(tao->trust, tl->max_radius);

386:         if (NTL_KSP_NASH == tl->ksp_type) {
387:           KSPNASHSetRadius(tao->ksp,tl->max_radius);
388:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
389:           KSPGetIterationNumber(tao->ksp,&its);
390:           tao->ksp_its+=its;
391:           tao->ksp_tot_its+=its;
392:           KSPNASHGetNormD(tao->ksp, &norm_d);
393:         } else if (NTL_KSP_STCG == tl->ksp_type) {
394:           KSPSTCGSetRadius(tao->ksp,tl->max_radius);
395:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
396:           KSPGetIterationNumber(tao->ksp,&its);
397:           tao->ksp_its+=its;
398:           tao->ksp_tot_its+=its;
399:           KSPSTCGGetNormD(tao->ksp, &norm_d);
400:         } else { /* NTL_KSP_GLTR */
401:           KSPGLTRSetRadius(tao->ksp,tl->max_radius);
402:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
403:           KSPGetIterationNumber(tao->ksp,&its);
404:           tao->ksp_its+=its;
405:           tao->ksp_tot_its+=its;
406:           KSPGLTRGetNormD(tao->ksp, &norm_d);
407:         }


410:         if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
411:       }
412:     }

414:     VecScale(tao->stepdirection, -1.0);
415:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
416:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NTL_PC_BFGS == tl->pc_type) && (bfgsUpdates > 1)) {
417:       /* Preconditioner is numerically indefinite; reset the
418:          approximate if using BFGS preconditioning. */

420:       if (f != 0.0) {
421:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
422:       } else {
423:         delta = 2.0 / (gnorm*gnorm);
424:       }
425:       MatLMVMSetDelta(tl->M, delta);
426:       MatLMVMReset(tl->M);
427:       MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
428:       bfgsUpdates = 1;
429:     }

431:     /* Check trust-region reduction conditions */
432:     tr_reject = 0;
433:     if (NTL_UPDATE_REDUCTION == tl->update_type) {
434:       /* Get predicted reduction */
435:       if (NTL_KSP_NASH == tl->ksp_type) {
436:         KSPNASHGetObjFcn(tao->ksp,&prered);
437:       } else if (NTL_KSP_STCG == tl->ksp_type) {
438:         KSPSTCGGetObjFcn(tao->ksp,&prered);
439:       } else { /* gltr */
440:         KSPGLTRGetObjFcn(tao->ksp,&prered);
441:       }

443:       if (prered >= 0.0) {
444:         /* The predicted reduction has the wrong sign.  This cannot
445:            happen in infinite precision arithmetic.  Step should
446:            be rejected! */
447:         tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
448:         tr_reject = 1;
449:       } else {
450:         /* Compute trial step and function value */
451:         VecCopy(tao->solution, tl->W);
452:         VecAXPY(tl->W, 1.0, tao->stepdirection);
453:         TaoComputeObjective(tao, tl->W, &ftrial);

455:         if (PetscIsInfOrNanReal(ftrial)) {
456:           tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
457:           tr_reject = 1;
458:         } else {
459:           /* Compute and actual reduction */
460:           actred = f - ftrial;
461:           prered = -prered;
462:           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
463:               (PetscAbsScalar(prered) <= tl->epsilon)) {
464:             kappa = 1.0;
465:           } else {
466:             kappa = actred / prered;
467:           }

469:           /* Accept of reject the step and update radius */
470:           if (kappa < tl->eta1) {
471:             /* Reject the step */
472:             tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
473:             tr_reject = 1;
474:           } else {
475:             /* Accept the step */
476:             if (kappa < tl->eta2) {
477:               /* Marginal bad step */
478:               tao->trust = tl->alpha2 * PetscMin(tao->trust, norm_d);
479:             } else if (kappa < tl->eta3) {
480:               /* Reasonable step */
481:               tao->trust = tl->alpha3 * tao->trust;
482:             } else if (kappa < tl->eta4) {
483:               /* Good step */
484:               tao->trust = PetscMax(tl->alpha4 * norm_d, tao->trust);
485:             } else {
486:               /* Very good step */
487:               tao->trust = PetscMax(tl->alpha5 * norm_d, tao->trust);
488:             }
489:           }
490:         }
491:       }
492:     } else {
493:       /* Get predicted reduction */
494:       if (NTL_KSP_NASH == tl->ksp_type) {
495:         KSPNASHGetObjFcn(tao->ksp,&prered);
496:       } else if (NTL_KSP_STCG == tl->ksp_type) {
497:         KSPSTCGGetObjFcn(tao->ksp,&prered);
498:       } else { /* gltr */
499:         KSPGLTRGetObjFcn(tao->ksp,&prered);
500:       }

502:       if (prered >= 0.0) {
503:         /* The predicted reduction has the wrong sign.  This cannot
504:            happen in infinite precision arithmetic.  Step should
505:            be rejected! */
506:         tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
507:         tr_reject = 1;
508:       } else {
509:         VecCopy(tao->solution, tl->W);
510:         VecAXPY(tl->W, 1.0, tao->stepdirection);
511:         TaoComputeObjective(tao, tl->W, &ftrial);
512:         if (PetscIsInfOrNanReal(ftrial)) {
513:           tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
514:           tr_reject = 1;
515:         } else {
516:           VecDot(tao->gradient, tao->stepdirection, &gdx);

518:           actred = f - ftrial;
519:           prered = -prered;
520:           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
521:               (PetscAbsScalar(prered) <= tl->epsilon)) {
522:             kappa = 1.0;
523:           } else {
524:             kappa = actred / prered;
525:           }

527:           tau_1 = tl->theta * gdx / (tl->theta * gdx - (1.0 - tl->theta) * prered + actred);
528:           tau_2 = tl->theta * gdx / (tl->theta * gdx + (1.0 + tl->theta) * prered - actred);
529:           tau_min = PetscMin(tau_1, tau_2);
530:           tau_max = PetscMax(tau_1, tau_2);

532:           if (kappa >= 1.0 - tl->mu1) {
533:             /* Great agreement; accept step and update radius */
534:             if (tau_max < 1.0) {
535:               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
536:             } else if (tau_max > tl->gamma4) {
537:               tao->trust = PetscMax(tao->trust, tl->gamma4 * norm_d);
538:             } else {
539:               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
540:             }
541:           } else if (kappa >= 1.0 - tl->mu2) {
542:             /* Good agreement */

544:             if (tau_max < tl->gamma2) {
545:               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
546:             } else if (tau_max > tl->gamma3) {
547:               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
548:             } else if (tau_max < 1.0) {
549:               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
550:             } else {
551:               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
552:             }
553:           } else {
554:             /* Not good agreement */
555:             if (tau_min > 1.0) {
556:               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
557:             } else if (tau_max < tl->gamma1) {
558:               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
559:             } else if ((tau_min < tl->gamma1) && (tau_max >= 1.0)) {
560:               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
561:             } else if ((tau_1 >= tl->gamma1) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1) || (tau_2 >= 1.0))) {
562:               tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
563:             } else if ((tau_2 >= tl->gamma1) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1) || (tau_2 >= 1.0))) {
564:               tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
565:             } else {
566:               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
567:             }
568:             tr_reject = 1;
569:           }
570:         }
571:       }
572:     }

574:     if (tr_reject) {
575:       /* The trust-region constraints rejected the step.  Apply a linesearch.
576:          Check for descent direction. */
577:       VecDot(tao->stepdirection, tao->gradient, &gdx);
578:       if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
579:         /* Newton step is not descent or direction produced Inf or NaN */

581:         if (NTL_PC_BFGS != tl->pc_type) {
582:           /* We don't have the bfgs matrix around and updated
583:              Must use gradient direction in this case */
584:           VecCopy(tao->gradient, tao->stepdirection);
585:           VecScale(tao->stepdirection, -1.0);
586:           ++tl->grad;
587:           stepType = NTL_GRADIENT;
588:         } else {
589:           /* Attempt to use the BFGS direction */
590:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);
591:           VecScale(tao->stepdirection, -1.0);

593:           /* Check for success (descent direction) */
594:           VecDot(tao->stepdirection, tao->gradient, &gdx);
595:           if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
596:             /* BFGS direction is not descent or direction produced not a number
597:                We can assert bfgsUpdates > 1 in this case because
598:                the first solve produces the scaled gradient direction,
599:                which is guaranteed to be descent */

601:             /* Use steepest descent direction (scaled) */
602:             if (f != 0.0) {
603:               delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
604:             } else {
605:               delta = 2.0 / (gnorm*gnorm);
606:             }
607:             MatLMVMSetDelta(tl->M, delta);
608:             MatLMVMReset(tl->M);
609:             MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
610:             MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);
611:             VecScale(tao->stepdirection, -1.0);

613:             bfgsUpdates = 1;
614:             ++tl->sgrad;
615:             stepType = NTL_SCALED_GRADIENT;
616:           } else {
617:             if (1 == bfgsUpdates) {
618:               /* The first BFGS direction is always the scaled gradient */
619:               ++tl->sgrad;
620:               stepType = NTL_SCALED_GRADIENT;
621:             } else {
622:               ++tl->bfgs;
623:               stepType = NTL_BFGS;
624:             }
625:           }
626:         }
627:       } else {
628:         /* Computed Newton step is descent */
629:         ++tl->newt;
630:         stepType = NTL_NEWTON;
631:       }

633:       /* Perform the linesearch */
634:       fold = f;
635:       VecCopy(tao->solution, tl->Xold);
636:       VecCopy(tao->gradient, tl->Gold);

638:       step = 1.0;
639:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
640:       TaoAddLineSearchCounts(tao);

642:       while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NTL_GRADIENT) {      /* Linesearch failed */
643:         /* Linesearch failed */
644:         f = fold;
645:         VecCopy(tl->Xold, tao->solution);
646:         VecCopy(tl->Gold, tao->gradient);

648:         switch(stepType) {
649:         case NTL_NEWTON:
650:           /* Failed to obtain acceptable iterate with Newton step */

652:           if (NTL_PC_BFGS != tl->pc_type) {
653:             /* We don't have the bfgs matrix around and being updated
654:                Must use gradient direction in this case */
655:             VecCopy(tao->gradient, tao->stepdirection);
656:             ++tl->grad;
657:             stepType = NTL_GRADIENT;
658:           } else {
659:             /* Attempt to use the BFGS direction */
660:             MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);


663:             /* Check for success (descent direction) */
664:             VecDot(tao->stepdirection, tao->gradient, &gdx);
665:             if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
666:               /* BFGS direction is not descent or direction produced
667:                  not a number.  We can assert bfgsUpdates > 1 in this case
668:                  Use steepest descent direction (scaled) */

670:               if (f != 0.0) {
671:                 delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
672:               } else {
673:                 delta = 2.0 / (gnorm*gnorm);
674:               }
675:               MatLMVMSetDelta(tl->M, delta);
676:               MatLMVMReset(tl->M);
677:               MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
678:               MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

680:               bfgsUpdates = 1;
681:               ++tl->sgrad;
682:               stepType = NTL_SCALED_GRADIENT;
683:             } else {
684:               if (1 == bfgsUpdates) {
685:                 /* The first BFGS direction is always the scaled gradient */
686:                 ++tl->sgrad;
687:                 stepType = NTL_SCALED_GRADIENT;
688:               } else {
689:                 ++tl->bfgs;
690:                 stepType = NTL_BFGS;
691:               }
692:             }
693:           }
694:           break;

696:         case NTL_BFGS:
697:           /* Can only enter if pc_type == NTL_PC_BFGS
698:              Failed to obtain acceptable iterate with BFGS step
699:              Attempt to use the scaled gradient direction */

701:           if (f != 0.0) {
702:             delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
703:           } else {
704:             delta = 2.0 / (gnorm*gnorm);
705:           }
706:           MatLMVMSetDelta(tl->M, delta);
707:           MatLMVMReset(tl->M);
708:           MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
709:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

711:           bfgsUpdates = 1;
712:           ++tl->sgrad;
713:           stepType = NTL_SCALED_GRADIENT;
714:           break;

716:         case NTL_SCALED_GRADIENT:
717:           /* Can only enter if pc_type == NTL_PC_BFGS
718:              The scaled gradient step did not produce a new iterate;
719:              attemp to use the gradient direction.
720:              Need to make sure we are not using a different diagonal scaling */
721:           MatLMVMSetScale(tl->M, tl->Diag);
722:           MatLMVMSetDelta(tl->M, 1.0);
723:           MatLMVMReset(tl->M);
724:           MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
725:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

727:           bfgsUpdates = 1;
728:           ++tl->grad;
729:           stepType = NTL_GRADIENT;
730:           break;
731:         }
732:         VecScale(tao->stepdirection, -1.0);

734:         /* This may be incorrect; linesearch has values for stepmax and stepmin
735:            that should be reset. */
736:         step = 1.0;
737:         TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
738:         TaoAddLineSearchCounts(tao);
739:       }

741:       if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
742:         /* Failed to find an improving point */
743:         f = fold;
744:         VecCopy(tl->Xold, tao->solution);
745:         VecCopy(tl->Gold, tao->gradient);
746:         tao->trust = 0.0;
747:         step = 0.0;
748:         reason = TAO_DIVERGED_LS_FAILURE;
749:         tao->reason = TAO_DIVERGED_LS_FAILURE;
750:         break;
751:       } else if (stepType == NTL_NEWTON) {
752:         if (step < tl->nu1) {
753:           /* Very bad step taken; reduce radius */
754:           tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
755:         } else if (step < tl->nu2) {
756:           /* Reasonably bad step taken; reduce radius */
757:           tao->trust = tl->omega2 * PetscMin(norm_d, tao->trust);
758:         } else if (step < tl->nu3) {
759:           /* Reasonable step was taken; leave radius alone */
760:           if (tl->omega3 < 1.0) {
761:             tao->trust = tl->omega3 * PetscMin(norm_d, tao->trust);
762:           } else if (tl->omega3 > 1.0) {
763:             tao->trust = PetscMax(tl->omega3 * norm_d, tao->trust);
764:           }
765:         } else if (step < tl->nu4) {
766:           /* Full step taken; increase the radius */
767:           tao->trust = PetscMax(tl->omega4 * norm_d, tao->trust);
768:         } else {
769:           /* More than full step taken; increase the radius */
770:           tao->trust = PetscMax(tl->omega5 * norm_d, tao->trust);
771:         }
772:       } else {
773:         /* Newton step was not good; reduce the radius */
774:         tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
775:       }
776:     } else {
777:       /* Trust-region step is accepted */
778:       VecCopy(tl->W, tao->solution);
779:       f = ftrial;
780:       TaoComputeGradient(tao, tao->solution, tao->gradient);
781:       ++tl->ntrust;
782:     }

784:     /* The radius may have been increased; modify if it is too large */
785:     tao->trust = PetscMin(tao->trust, tl->max_radius);

787:     /* Check for converged */
788:     VecNorm(tao->gradient, NORM_2, &gnorm);
789:     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
790:     needH = 1;

792:     TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust, &reason);
793:   }
794:   return(0);
795: }

797: /* ---------------------------------------------------------- */
800: static PetscErrorCode TaoSetUp_NTL(Tao tao)
801: {
802:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

806:   if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient); }
807:   if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
808:   if (!tl->W) { VecDuplicate(tao->solution, &tl->W);}
809:   if (!tl->Xold) { VecDuplicate(tao->solution, &tl->Xold);}
810:   if (!tl->Gold) { VecDuplicate(tao->solution, &tl->Gold);}
811:   tl->Diag = 0;
812:   tl->M = 0;
813:   return(0);
814: }

816: /*------------------------------------------------------------*/
819: static PetscErrorCode TaoDestroy_NTL(Tao tao)
820: {
821:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

825:   if (tao->setupcalled) {
826:     VecDestroy(&tl->W);
827:     VecDestroy(&tl->Xold);
828:     VecDestroy(&tl->Gold);
829:   }
830:   VecDestroy(&tl->Diag);
831:   MatDestroy(&tl->M);
832:   PetscFree(tao->data);
833:   return(0);
834: }

836: /*------------------------------------------------------------*/
839: static PetscErrorCode TaoSetFromOptions_NTL(PetscOptions *PetscOptionsObject,Tao tao)
840: {
841:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

845:   PetscOptionsHead(PetscOptionsObject,"Newton trust region with line search method for unconstrained optimization");
846:   PetscOptionsEList("-tao_ntl_ksp_type", "ksp type", "", NTL_KSP, NTL_KSP_TYPES, NTL_KSP[tl->ksp_type], &tl->ksp_type,NULL);
847:   PetscOptionsEList("-tao_ntl_pc_type", "pc type", "", NTL_PC, NTL_PC_TYPES, NTL_PC[tl->pc_type], &tl->pc_type,NULL);
848:   PetscOptionsEList("-tao_ntl_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tl->bfgs_scale_type], &tl->bfgs_scale_type,NULL);
849:   PetscOptionsEList("-tao_ntl_init_type", "radius initialization type", "", NTL_INIT, NTL_INIT_TYPES, NTL_INIT[tl->init_type], &tl->init_type,NULL);
850:   PetscOptionsEList("-tao_ntl_update_type", "radius update type", "", NTL_UPDATE, NTL_UPDATE_TYPES, NTL_UPDATE[tl->update_type], &tl->update_type,NULL);
851:   PetscOptionsReal("-tao_ntl_eta1", "poor steplength; reduce radius", "", tl->eta1, &tl->eta1,NULL);
852:   PetscOptionsReal("-tao_ntl_eta2", "reasonable steplength; leave radius alone", "", tl->eta2, &tl->eta2,NULL);
853:   PetscOptionsReal("-tao_ntl_eta3", "good steplength; increase radius", "", tl->eta3, &tl->eta3,NULL);
854:   PetscOptionsReal("-tao_ntl_eta4", "excellent steplength; greatly increase radius", "", tl->eta4, &tl->eta4,NULL);
855:   PetscOptionsReal("-tao_ntl_alpha1", "", "", tl->alpha1, &tl->alpha1,NULL);
856:   PetscOptionsReal("-tao_ntl_alpha2", "", "", tl->alpha2, &tl->alpha2,NULL);
857:   PetscOptionsReal("-tao_ntl_alpha3", "", "", tl->alpha3, &tl->alpha3,NULL);
858:   PetscOptionsReal("-tao_ntl_alpha4", "", "", tl->alpha4, &tl->alpha4,NULL);
859:   PetscOptionsReal("-tao_ntl_alpha5", "", "", tl->alpha5, &tl->alpha5,NULL);
860:   PetscOptionsReal("-tao_ntl_nu1", "poor steplength; reduce radius", "", tl->nu1, &tl->nu1,NULL);
861:   PetscOptionsReal("-tao_ntl_nu2", "reasonable steplength; leave radius alone", "", tl->nu2, &tl->nu2,NULL);
862:   PetscOptionsReal("-tao_ntl_nu3", "good steplength; increase radius", "", tl->nu3, &tl->nu3,NULL);
863:   PetscOptionsReal("-tao_ntl_nu4", "excellent steplength; greatly increase radius", "", tl->nu4, &tl->nu4,NULL);
864:   PetscOptionsReal("-tao_ntl_omega1", "", "", tl->omega1, &tl->omega1,NULL);
865:   PetscOptionsReal("-tao_ntl_omega2", "", "", tl->omega2, &tl->omega2,NULL);
866:   PetscOptionsReal("-tao_ntl_omega3", "", "", tl->omega3, &tl->omega3,NULL);
867:   PetscOptionsReal("-tao_ntl_omega4", "", "", tl->omega4, &tl->omega4,NULL);
868:   PetscOptionsReal("-tao_ntl_omega5", "", "", tl->omega5, &tl->omega5,NULL);
869:   PetscOptionsReal("-tao_ntl_mu1_i", "", "", tl->mu1_i, &tl->mu1_i,NULL);
870:   PetscOptionsReal("-tao_ntl_mu2_i", "", "", tl->mu2_i, &tl->mu2_i,NULL);
871:   PetscOptionsReal("-tao_ntl_gamma1_i", "", "", tl->gamma1_i, &tl->gamma1_i,NULL);
872:   PetscOptionsReal("-tao_ntl_gamma2_i", "", "", tl->gamma2_i, &tl->gamma2_i,NULL);
873:   PetscOptionsReal("-tao_ntl_gamma3_i", "", "", tl->gamma3_i, &tl->gamma3_i,NULL);
874:   PetscOptionsReal("-tao_ntl_gamma4_i", "", "", tl->gamma4_i, &tl->gamma4_i,NULL);
875:   PetscOptionsReal("-tao_ntl_theta_i", "", "", tl->theta_i, &tl->theta_i,NULL);
876:   PetscOptionsReal("-tao_ntl_mu1", "", "", tl->mu1, &tl->mu1,NULL);
877:   PetscOptionsReal("-tao_ntl_mu2", "", "", tl->mu2, &tl->mu2,NULL);
878:   PetscOptionsReal("-tao_ntl_gamma1", "", "", tl->gamma1, &tl->gamma1,NULL);
879:   PetscOptionsReal("-tao_ntl_gamma2", "", "", tl->gamma2, &tl->gamma2,NULL);
880:   PetscOptionsReal("-tao_ntl_gamma3", "", "", tl->gamma3, &tl->gamma3,NULL);
881:   PetscOptionsReal("-tao_ntl_gamma4", "", "", tl->gamma4, &tl->gamma4,NULL);
882:   PetscOptionsReal("-tao_ntl_theta", "", "", tl->theta, &tl->theta,NULL);
883:   PetscOptionsReal("-tao_ntl_min_radius", "lower bound on initial radius", "", tl->min_radius, &tl->min_radius,NULL);
884:   PetscOptionsReal("-tao_ntl_max_radius", "upper bound on radius", "", tl->max_radius, &tl->max_radius,NULL);
885:   PetscOptionsReal("-tao_ntl_epsilon", "tolerance used when computing actual and predicted reduction", "", tl->epsilon, &tl->epsilon,NULL);
886:   PetscOptionsTail();
887:   TaoLineSearchSetFromOptions(tao->linesearch);
888:   KSPSetFromOptions(tao->ksp);
889:   return(0);
890: }

892: /*------------------------------------------------------------*/
895: static PetscErrorCode TaoView_NTL(Tao tao, PetscViewer viewer)
896: {
897:   TAO_NTL        *tl = (TAO_NTL *)tao->data;
898:   PetscInt       nrejects;
899:   PetscBool      isascii;

903:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
904:   if (isascii) {
905:     PetscViewerASCIIPushTab(viewer);
906:     if (NTL_PC_BFGS == tl->pc_type && tl->M) {
907:       MatLMVMGetRejects(tl->M, &nrejects);
908:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects);
909:     }
910:     PetscViewerASCIIPrintf(viewer, "Trust-region steps: %D\n", tl->ntrust);
911:     PetscViewerASCIIPrintf(viewer, "Newton search steps: %D\n", tl->newt);
912:     PetscViewerASCIIPrintf(viewer, "BFGS search steps: %D\n", tl->bfgs);
913:     PetscViewerASCIIPrintf(viewer, "Scaled gradient search steps: %D\n", tl->sgrad);
914:     PetscViewerASCIIPrintf(viewer, "Gradient search steps: %D\n", tl->grad);
915:     PetscViewerASCIIPopTab(viewer);
916:   }
917:   return(0);
918: }

920: /* ---------------------------------------------------------- */
921: /*MC
922:   TAONTR - Newton's method with trust region and linesearch
923:   for unconstrained minimization.
924:   At each iteration, the Newton trust region method solves the system for d
925:   and performs a line search in the d direction:

927:             min_d  .5 dT Hk d + gkT d,  s.t.   ||d|| < Delta_k

929:   Options Database Keys:
930: + -tao_ntl_ksp_type - "nash","stcg","gltr"
931: . -tao_ntl_pc_type - "none","ahess","bfgs","petsc"
932: . -tao_ntl_bfgs_scale_type - type of scaling with bfgs pc, "ahess" or "bfgs"
933: . -tao_ntl_init_type - "constant","direction","interpolation"
934: . -tao_ntl_update_type - "reduction","interpolation"
935: . -tao_ntl_min_radius - lower bound on trust region radius
936: . -tao_ntl_max_radius - upper bound on trust region radius
937: . -tao_ntl_epsilon - tolerance for accepting actual / predicted reduction
938: . -tao_ntl_mu1_i - mu1 interpolation init factor
939: . -tao_ntl_mu2_i - mu2 interpolation init factor
940: . -tao_ntl_gamma1_i - gamma1 interpolation init factor
941: . -tao_ntl_gamma2_i - gamma2 interpolation init factor
942: . -tao_ntl_gamma3_i - gamma3 interpolation init factor
943: . -tao_ntl_gamma4_i - gamma4 interpolation init factor
944: . -tao_ntl_theta_i - thetha1 interpolation init factor
945: . -tao_ntl_eta1 - eta1 reduction update factor
946: . -tao_ntl_eta2 - eta2 reduction update factor
947: . -tao_ntl_eta3 - eta3 reduction update factor
948: . -tao_ntl_eta4 - eta4 reduction update factor
949: . -tao_ntl_alpha1 - alpha1 reduction update factor
950: . -tao_ntl_alpha2 - alpha2 reduction update factor
951: . -tao_ntl_alpha3 - alpha3 reduction update factor
952: . -tao_ntl_alpha4 - alpha4 reduction update factor
953: . -tao_ntl_alpha4 - alpha4 reduction update factor
954: . -tao_ntl_mu1 - mu1 interpolation update
955: . -tao_ntl_mu2 - mu2 interpolation update
956: . -tao_ntl_gamma1 - gamma1 interpolcation update
957: . -tao_ntl_gamma2 - gamma2 interpolcation update
958: . -tao_ntl_gamma3 - gamma3 interpolcation update
959: . -tao_ntl_gamma4 - gamma4 interpolation update
960: - -tao_ntl_theta - theta1 interpolation update

962:   Level: beginner
963: M*/

967: PETSC_EXTERN PetscErrorCode TaoCreate_NTL(Tao tao)
968: {
969:   TAO_NTL        *tl;
971:   const char     *morethuente_type = TAOLINESEARCHMT;

974:   PetscNewLog(tao,&tl);
975:   tao->ops->setup = TaoSetUp_NTL;
976:   tao->ops->solve = TaoSolve_NTL;
977:   tao->ops->view = TaoView_NTL;
978:   tao->ops->setfromoptions = TaoSetFromOptions_NTL;
979:   tao->ops->destroy = TaoDestroy_NTL;

981:   /* Override default settings (unless already changed) */
982:   if (!tao->max_it_changed) tao->max_it = 50;
983:   if (!tao->trust0_changed) tao->trust0 = 100.0;
984: #if defined(PETSC_USE_REAL_SINGLE)
985:   if (!tao->fatol_changed) tao->fatol = 1.0e-5;
986:   if (!tao->frtol_changed) tao->frtol = 1.0e-5;
987: #else
988:   if (!tao->fatol_changed) tao->fatol = 1.0e-10;
989:   if (!tao->frtol_changed) tao->frtol = 1.0e-10;
990: #endif

992:   tao->data = (void*)tl;

994:   /* Default values for trust-region radius update based on steplength */
995:   tl->nu1 = 0.25;
996:   tl->nu2 = 0.50;
997:   tl->nu3 = 1.00;
998:   tl->nu4 = 1.25;

1000:   tl->omega1 = 0.25;
1001:   tl->omega2 = 0.50;
1002:   tl->omega3 = 1.00;
1003:   tl->omega4 = 2.00;
1004:   tl->omega5 = 4.00;

1006:   /* Default values for trust-region radius update based on reduction */
1007:   tl->eta1 = 1.0e-4;
1008:   tl->eta2 = 0.25;
1009:   tl->eta3 = 0.50;
1010:   tl->eta4 = 0.90;

1012:   tl->alpha1 = 0.25;
1013:   tl->alpha2 = 0.50;
1014:   tl->alpha3 = 1.00;
1015:   tl->alpha4 = 2.00;
1016:   tl->alpha5 = 4.00;

1018:   /* Default values for trust-region radius update based on interpolation */
1019:   tl->mu1 = 0.10;
1020:   tl->mu2 = 0.50;

1022:   tl->gamma1 = 0.25;
1023:   tl->gamma2 = 0.50;
1024:   tl->gamma3 = 2.00;
1025:   tl->gamma4 = 4.00;

1027:   tl->theta = 0.05;

1029:   /* Default values for trust region initialization based on interpolation */
1030:   tl->mu1_i = 0.35;
1031:   tl->mu2_i = 0.50;

1033:   tl->gamma1_i = 0.0625;
1034:   tl->gamma2_i = 0.5;
1035:   tl->gamma3_i = 2.0;
1036:   tl->gamma4_i = 5.0;

1038:   tl->theta_i = 0.25;

1040:   /* Remaining parameters */
1041:   tl->min_radius = 1.0e-10;
1042:   tl->max_radius = 1.0e10;
1043:   tl->epsilon = 1.0e-6;

1045:   tl->ksp_type        = NTL_KSP_STCG;
1046:   tl->pc_type         = NTL_PC_BFGS;
1047:   tl->bfgs_scale_type = BFGS_SCALE_AHESS;
1048:   tl->init_type       = NTL_INIT_INTERPOLATION;
1049:   tl->update_type     = NTL_UPDATE_REDUCTION;

1051:   TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
1052:   TaoLineSearchSetType(tao->linesearch, morethuente_type);
1053:   TaoLineSearchUseTaoRoutines(tao->linesearch, tao);
1054:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
1055:   KSPCreate(((PetscObject)tao)->comm, &tao->ksp);
1056:   KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix);
1057:   return(0);
1058: }