Actual source code: nls.c

petsc-3.10.5 2019-03-28
Report Typos and Errors
  1:  #include <petsctaolinesearch.h>
  2:  #include <../src/tao/unconstrained/impls/nls/nlsimpl.h>

  4:  #include <petscksp.h>

  6: #define NLS_INIT_CONSTANT         0
  7: #define NLS_INIT_DIRECTION        1
  8: #define NLS_INIT_INTERPOLATION    2
  9: #define NLS_INIT_TYPES            3

 11: #define NLS_UPDATE_STEP           0
 12: #define NLS_UPDATE_REDUCTION      1
 13: #define NLS_UPDATE_INTERPOLATION  2
 14: #define NLS_UPDATE_TYPES          3

 16: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};

 18: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};

 20: /*
 21:  Implements Newton's Method with a line search approach for solving
 22:  unconstrained minimization problems.  A More'-Thuente line search
 23:  is used to guarantee that the bfgs preconditioner remains positive
 24:  definite.

 26:  The method can shift the Hessian matrix.  The shifting procedure is
 27:  adapted from the PATH algorithm for solving complementarity
 28:  problems.

 30:  The linear system solve should be done with a conjugate gradient
 31:  method, although any method can be used.
 32: */

 34: #define NLS_NEWTON              0
 35: #define NLS_BFGS                1
 36: #define NLS_GRADIENT            2

 38: static PetscErrorCode TaoSolve_NLS(Tao tao)
 39: {
 40:   PetscErrorCode               ierr;
 41:   TAO_NLS                      *nlsP = (TAO_NLS *)tao->data;
 42:   KSPType                      ksp_type;
 43:   PetscBool                    is_nash,is_stcg,is_gltr,is_bfgs,is_jacobi,is_symmetric,sym_set;
 44:   KSPConvergedReason           ksp_reason;
 45:   PC                           pc;
 46:   TaoLineSearchConvergedReason ls_reason;

 48:   PetscReal                    fmin, ftrial, f_full, prered, actred, kappa, sigma;
 49:   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 50:   PetscReal                    f, fold, gdx, gnorm, pert;
 51:   PetscReal                    step = 1.0;
 52:   PetscReal                    norm_d = 0.0, e_min;

 54:   PetscInt                     stepType;
 55:   PetscInt                     bfgsUpdates = 0;
 56:   PetscInt                     n,N,kspits;
 57:   PetscInt                     needH = 1;

 59:   PetscInt                     i_max = 5;
 60:   PetscInt                     j_max = 1;
 61:   PetscInt                     i, j;

 64:   if (tao->XL || tao->XU || tao->ops->computebounds) {
 65:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
 66:   }

 68:   /* Initialized variables */
 69:   pert = nlsP->sval;

 71:   /* Number of times ksp stopped because of these reasons */
 72:   nlsP->ksp_atol = 0;
 73:   nlsP->ksp_rtol = 0;
 74:   nlsP->ksp_dtol = 0;
 75:   nlsP->ksp_ctol = 0;
 76:   nlsP->ksp_negc = 0;
 77:   nlsP->ksp_iter = 0;
 78:   nlsP->ksp_othr = 0;

 80:   /* Initialize trust-region radius when using nash, stcg, or gltr
 81:      Command automatically ignored for other methods
 82:      Will be reset during the first iteration
 83:   */
 84:   KSPGetType(tao->ksp,&ksp_type);
 85:   PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);
 86:   PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);
 87:   PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);

 89:   KSPCGSetRadius(tao->ksp,nlsP->max_radius);

 91:   if (is_nash || is_stcg || is_gltr) {
 92:     if (tao->trust0 < 0.0) SETERRQ(PETSC_COMM_SELF,1,"Initial radius negative");
 93:     tao->trust = tao->trust0;
 94:     tao->trust = PetscMax(tao->trust, nlsP->min_radius);
 95:     tao->trust = PetscMin(tao->trust, nlsP->max_radius);
 96:   }

 98:   /* Check convergence criteria */
 99:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
100:   TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
101:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
102: 
103:   tao->reason = TAO_CONTINUE_ITERATING;
104:   TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
105:   TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
106:   (*tao->ops->convergencetest)(tao,tao->cnvP);
107:   if (tao->reason != TAO_CONTINUE_ITERATING) return(0);

109:   /* Allocate the vectors needed for the BFGS approximation */
110:   KSPGetPC(tao->ksp, &pc);
111:   PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
112:   PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
113:   if (is_bfgs) {
114:     nlsP->bfgs_pre = pc;
115:     PCLMVMGetMatLMVM(nlsP->bfgs_pre, &nlsP->M);
116:     VecGetLocalSize(tao->solution, &n);
117:     VecGetSize(tao->solution, &N);
118:     MatSetSizes(nlsP->M, n, n, N, N);
119:     MatLMVMAllocate(nlsP->M, tao->solution, tao->gradient);
120:     MatIsSymmetricKnown(nlsP->M, &sym_set, &is_symmetric);
121:     if (!sym_set || !is_symmetric) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix in the LMVM preconditioner must be symmetric.");
122:   } else if (is_jacobi) {
123:     PCJacobiSetUseAbs(pc,PETSC_TRUE);
124:   }

126:   /* Initialize trust-region radius.  The initialization is only performed
127:      when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
128:   if (is_nash || is_stcg || is_gltr) {
129:     switch(nlsP->init_type) {
130:     case NLS_INIT_CONSTANT:
131:       /* Use the initial radius specified */
132:       break;

134:     case NLS_INIT_INTERPOLATION:
135:       /* Use the initial radius specified */
136:       max_radius = 0.0;

138:       for (j = 0; j < j_max; ++j) {
139:         fmin = f;
140:         sigma = 0.0;

142:         if (needH) {
143:           TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
144:           needH = 0;
145:         }

147:         for (i = 0; i < i_max; ++i) {
148:           VecCopy(tao->solution,nlsP->W);
149:           VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
150:           TaoComputeObjective(tao, nlsP->W, &ftrial);
151:           if (PetscIsInfOrNanReal(ftrial)) {
152:             tau = nlsP->gamma1_i;
153:           } else {
154:             if (ftrial < fmin) {
155:               fmin = ftrial;
156:               sigma = -tao->trust / gnorm;
157:             }

159:             MatMult(tao->hessian, tao->gradient, nlsP->D);
160:             VecDot(tao->gradient, nlsP->D, &prered);

162:             prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
163:             actred = f - ftrial;
164:             if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
165:               kappa = 1.0;
166:             } else {
167:               kappa = actred / prered;
168:             }

170:             tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
171:             tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
172:             tau_min = PetscMin(tau_1, tau_2);
173:             tau_max = PetscMax(tau_1, tau_2);

175:             if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu1_i) {
176:               /* Great agreement */
177:               max_radius = PetscMax(max_radius, tao->trust);

179:               if (tau_max < 1.0) {
180:                 tau = nlsP->gamma3_i;
181:               } else if (tau_max > nlsP->gamma4_i) {
182:                 tau = nlsP->gamma4_i;
183:               } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
184:                 tau = tau_1;
185:               } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
186:                 tau = tau_2;
187:               } else {
188:                 tau = tau_max;
189:               }
190:             } else if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu2_i) {
191:               /* Good agreement */
192:               max_radius = PetscMax(max_radius, tao->trust);

194:               if (tau_max < nlsP->gamma2_i) {
195:                 tau = nlsP->gamma2_i;
196:               } else if (tau_max > nlsP->gamma3_i) {
197:                 tau = nlsP->gamma3_i;
198:               } else {
199:                 tau = tau_max;
200:               }
201:             } else {
202:               /* Not good agreement */
203:               if (tau_min > 1.0) {
204:                 tau = nlsP->gamma2_i;
205:               } else if (tau_max < nlsP->gamma1_i) {
206:                 tau = nlsP->gamma1_i;
207:               } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
208:                 tau = nlsP->gamma1_i;
209:               } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
210:                 tau = tau_1;
211:               } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
212:                 tau = tau_2;
213:               } else {
214:                 tau = tau_max;
215:               }
216:             }
217:           }
218:           tao->trust = tau * tao->trust;
219:         }

221:         if (fmin < f) {
222:           f = fmin;
223:           VecAXPY(tao->solution,sigma,tao->gradient);
224:           TaoComputeGradient(tao,tao->solution,tao->gradient);

226:           TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
227:           if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute gradient generated Inf or NaN");
228:           needH = 1;

230:           TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
231:           TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
232:           (*tao->ops->convergencetest)(tao,tao->cnvP);
233:           if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
234:         }
235:       }
236:       tao->trust = PetscMax(tao->trust, max_radius);

238:       /* Modify the radius if it is too large or small */
239:       tao->trust = PetscMax(tao->trust, nlsP->min_radius);
240:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
241:       break;

243:     default:
244:       /* Norm of the first direction will initialize radius */
245:       tao->trust = 0.0;
246:       break;
247:     }
248:   }

250:   /* Set counter for gradient/reset steps*/
251:   nlsP->newt = 0;
252:   nlsP->bfgs = 0;
253:   nlsP->grad = 0;

255:   /* Have not converged; continue with Newton method */
256:   while (tao->reason == TAO_CONTINUE_ITERATING) {
257:     ++tao->niter;
258:     tao->ksp_its=0;

260:     /* Compute the Hessian */
261:     if (needH) {
262:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
263:     }

265:     /* Shift the Hessian matrix */
266:     if (pert > 0) {
267:       MatShift(tao->hessian, pert);
268:       if (tao->hessian != tao->hessian_pre) {
269:         MatShift(tao->hessian_pre, pert);
270:       }
271:     }

273:     if (nlsP->bfgs_pre) {
274:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
275:       ++bfgsUpdates;
276:     }

278:     /* Solve the Newton system of equations */
279:     KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
280:     if (is_nash || is_stcg || is_gltr) {
281:       KSPCGSetRadius(tao->ksp,nlsP->max_radius);
282:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
283:       KSPGetIterationNumber(tao->ksp,&kspits);
284:       tao->ksp_its+=kspits;
285:       tao->ksp_tot_its+=kspits;
286:       KSPCGGetNormD(tao->ksp,&norm_d);

288:       if (0.0 == tao->trust) {
289:         /* Radius was uninitialized; use the norm of the direction */
290:         if (norm_d > 0.0) {
291:           tao->trust = norm_d;

293:           /* Modify the radius if it is too large or small */
294:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
295:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);
296:         } else {
297:           /* The direction was bad; set radius to default value and re-solve
298:              the trust-region subproblem to get a direction */
299:           tao->trust = tao->trust0;

301:           /* Modify the radius if it is too large or small */
302:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
303:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);

305:           KSPCGSetRadius(tao->ksp,nlsP->max_radius);
306:           KSPSolve(tao->ksp, tao->gradient, nlsP->D);
307:           KSPGetIterationNumber(tao->ksp,&kspits);
308:           tao->ksp_its+=kspits;
309:           tao->ksp_tot_its+=kspits;
310:           KSPCGGetNormD(tao->ksp,&norm_d);

312:           if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
313:         }
314:       }
315:     } else {
316:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
317:       KSPGetIterationNumber(tao->ksp, &kspits);
318:       tao->ksp_its += kspits;
319:       tao->ksp_tot_its+=kspits;
320:     }
321:     VecScale(nlsP->D, -1.0);
322:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
323:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&  (nlsP->bfgs_pre)) {
324:       /* Preconditioner is numerically indefinite; reset the
325:          approximate if using BFGS preconditioning. */
326:       MatLMVMReset(nlsP->M, PETSC_FALSE);
327:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
328:       bfgsUpdates = 1;
329:     }

331:     if (KSP_CONVERGED_ATOL == ksp_reason) {
332:       ++nlsP->ksp_atol;
333:     } else if (KSP_CONVERGED_RTOL == ksp_reason) {
334:       ++nlsP->ksp_rtol;
335:     } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
336:       ++nlsP->ksp_ctol;
337:     } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
338:       ++nlsP->ksp_negc;
339:     } else if (KSP_DIVERGED_DTOL == ksp_reason) {
340:       ++nlsP->ksp_dtol;
341:     } else if (KSP_DIVERGED_ITS == ksp_reason) {
342:       ++nlsP->ksp_iter;
343:     } else {
344:       ++nlsP->ksp_othr;
345:     }

347:     /* Check for success (descent direction) */
348:     VecDot(nlsP->D, tao->gradient, &gdx);
349:     if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
350:       /* Newton step is not descent or direction produced Inf or NaN
351:          Update the perturbation for next time */
352:       if (pert <= 0.0) {
353:         /* Initialize the perturbation */
354:         pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
355:         if (is_gltr) {
356:           KSPCGGLTRGetMinEig(tao->ksp,&e_min);
357:           pert = PetscMax(pert, -e_min);
358:         }
359:       } else {
360:         /* Increase the perturbation */
361:         pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
362:       }

364:       if (!nlsP->bfgs_pre) {
365:         /* We don't have the bfgs matrix around and updated
366:            Must use gradient direction in this case */
367:         VecCopy(tao->gradient, nlsP->D);
368:         VecScale(nlsP->D, -1.0);
369:         ++nlsP->grad;
370:         stepType = NLS_GRADIENT;
371:       } else {
372:         /* Attempt to use the BFGS direction */
373:         MatSolve(nlsP->M, tao->gradient, nlsP->D);
374:         VecScale(nlsP->D, -1.0);

376:         /* Check for success (descent direction) */
377:         VecDot(tao->gradient, nlsP->D, &gdx);
378:         if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
379:           /* BFGS direction is not descent or direction produced not a number
380:              We can assert bfgsUpdates > 1 in this case because
381:              the first solve produces the scaled gradient direction,
382:              which is guaranteed to be descent */

384:           /* Use steepest descent direction (scaled) */
385:           MatLMVMReset(nlsP->M, PETSC_FALSE);
386:           MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
387:           MatSolve(nlsP->M, tao->gradient, nlsP->D);
388:           VecScale(nlsP->D, -1.0);

390:           bfgsUpdates = 1;
391:           ++nlsP->grad;
392:           stepType = NLS_GRADIENT;
393:         } else {
394:           MatLMVMGetUpdateCount(nlsP->M, &bfgsUpdates);
395:           if (1 == bfgsUpdates) {
396:             /* The first BFGS direction is always the scaled gradient */
397:             ++nlsP->grad;
398:             stepType = NLS_GRADIENT;
399:           } else {
400:             ++nlsP->bfgs;
401:             stepType = NLS_BFGS;
402:           }
403:         }
404:       }
405:     } else {
406:       /* Computed Newton step is descent */
407:       switch (ksp_reason) {
408:       case KSP_DIVERGED_NANORINF:
409:       case KSP_DIVERGED_BREAKDOWN:
410:       case KSP_DIVERGED_INDEFINITE_MAT:
411:       case KSP_DIVERGED_INDEFINITE_PC:
412:       case KSP_CONVERGED_CG_NEG_CURVE:
413:         /* Matrix or preconditioner is indefinite; increase perturbation */
414:         if (pert <= 0.0) {
415:           /* Initialize the perturbation */
416:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
417:           if (is_gltr) {
418:             KSPCGGLTRGetMinEig(tao->ksp, &e_min);
419:             pert = PetscMax(pert, -e_min);
420:           }
421:         } else {
422:           /* Increase the perturbation */
423:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
424:         }
425:         break;

427:       default:
428:         /* Newton step computation is good; decrease perturbation */
429:         pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
430:         if (pert < nlsP->pmin) {
431:           pert = 0.0;
432:         }
433:         break;
434:       }

436:       ++nlsP->newt;
437:       stepType = NLS_NEWTON;
438:     }

440:     /* Perform the linesearch */
441:     fold = f;
442:     VecCopy(tao->solution, nlsP->Xold);
443:     VecCopy(tao->gradient, nlsP->Gold);

445:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
446:     TaoAddLineSearchCounts(tao);

448:     while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) {      /* Linesearch failed */
449:       f = fold;
450:       VecCopy(nlsP->Xold, tao->solution);
451:       VecCopy(nlsP->Gold, tao->gradient);

453:       switch(stepType) {
454:       case NLS_NEWTON:
455:         /* Failed to obtain acceptable iterate with Newton 1step
456:            Update the perturbation for next time */
457:         if (pert <= 0.0) {
458:           /* Initialize the perturbation */
459:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
460:           if (is_gltr) {
461:             KSPCGGLTRGetMinEig(tao->ksp,&e_min);
462:             pert = PetscMax(pert, -e_min);
463:           }
464:         } else {
465:           /* Increase the perturbation */
466:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
467:         }

469:         if (!nlsP->bfgs_pre) {
470:           /* We don't have the bfgs matrix around and being updated
471:              Must use gradient direction in this case */
472:           VecCopy(tao->gradient, nlsP->D);
473:           ++nlsP->grad;
474:           stepType = NLS_GRADIENT;
475:         } else {
476:           /* Attempt to use the BFGS direction */
477:           MatSolve(nlsP->M, tao->gradient, nlsP->D);
478:           /* Check for success (descent direction) */
479:           VecDot(tao->solution, nlsP->D, &gdx);
480:           if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
481:             /* BFGS direction is not descent or direction produced not a number
482:                We can assert bfgsUpdates > 1 in this case
483:                Use steepest descent direction (scaled) */
484:             MatLMVMReset(nlsP->M, PETSC_FALSE);
485:             MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
486:             MatSolve(nlsP->M, tao->gradient, nlsP->D);

488:             bfgsUpdates = 1;
489:             ++nlsP->grad;
490:             stepType = NLS_GRADIENT;
491:           } else {
492:             if (1 == bfgsUpdates) {
493:               /* The first BFGS direction is always the scaled gradient */
494:               ++nlsP->grad;
495:               stepType = NLS_GRADIENT;
496:             } else {
497:               ++nlsP->bfgs;
498:               stepType = NLS_BFGS;
499:             }
500:           }
501:         }
502:         break;

504:       case NLS_BFGS:
505:         /* Can only enter if pc_type == NLS_PC_BFGS
506:            Failed to obtain acceptable iterate with BFGS step
507:            Attempt to use the scaled gradient direction */
508:         MatLMVMReset(nlsP->M, PETSC_FALSE);
509:         MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
510:         MatSolve(nlsP->M, tao->gradient, nlsP->D);

512:         bfgsUpdates = 1;
513:         ++nlsP->grad;
514:         stepType = NLS_GRADIENT;
515:         break;
516:       }
517:       VecScale(nlsP->D, -1.0);

519:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
520:       TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
521:       TaoAddLineSearchCounts(tao);
522:     }

524:     if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
525:       /* Failed to find an improving point */
526:       f = fold;
527:       VecCopy(nlsP->Xold, tao->solution);
528:       VecCopy(nlsP->Gold, tao->gradient);
529:       step = 0.0;
530:       tao->reason = TAO_DIVERGED_LS_FAILURE;
531:       break;
532:     }

534:     /* Update trust region radius */
535:     if (is_nash || is_stcg || is_gltr) {
536:       switch(nlsP->update_type) {
537:       case NLS_UPDATE_STEP:
538:         if (stepType == NLS_NEWTON) {
539:           if (step < nlsP->nu1) {
540:             /* Very bad step taken; reduce radius */
541:             tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
542:           } else if (step < nlsP->nu2) {
543:             /* Reasonably bad step taken; reduce radius */
544:             tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
545:           } else if (step < nlsP->nu3) {
546:             /*  Reasonable step was taken; leave radius alone */
547:             if (nlsP->omega3 < 1.0) {
548:               tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
549:             } else if (nlsP->omega3 > 1.0) {
550:               tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
551:             }
552:           } else if (step < nlsP->nu4) {
553:             /*  Full step taken; increase the radius */
554:             tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
555:           } else {
556:             /*  More than full step taken; increase the radius */
557:             tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
558:           }
559:         } else {
560:           /*  Newton step was not good; reduce the radius */
561:           tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
562:         }
563:         break;

565:       case NLS_UPDATE_REDUCTION:
566:         if (stepType == NLS_NEWTON) {
567:           /*  Get predicted reduction */

569:           KSPCGGetObjFcn(tao->ksp,&prered);
570:           if (prered >= 0.0) {
571:             /*  The predicted reduction has the wrong sign.  This cannot */
572:             /*  happen in infinite precision arithmetic.  Step should */
573:             /*  be rejected! */
574:             tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
575:           } else {
576:             if (PetscIsInfOrNanReal(f_full)) {
577:               tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
578:             } else {
579:               /*  Compute and actual reduction */
580:               actred = fold - f_full;
581:               prered = -prered;
582:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
583:                   (PetscAbsScalar(prered) <= nlsP->epsilon)) {
584:                 kappa = 1.0;
585:               } else {
586:                 kappa = actred / prered;
587:               }

589:               /*  Accept of reject the step and update radius */
590:               if (kappa < nlsP->eta1) {
591:                 /*  Very bad step */
592:                 tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
593:               } else if (kappa < nlsP->eta2) {
594:                 /*  Marginal bad step */
595:                 tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
596:               } else if (kappa < nlsP->eta3) {
597:                 /*  Reasonable step */
598:                 if (nlsP->alpha3 < 1.0) {
599:                   tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
600:                 } else if (nlsP->alpha3 > 1.0) {
601:                   tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
602:                 }
603:               } else if (kappa < nlsP->eta4) {
604:                 /*  Good step */
605:                 tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
606:               } else {
607:                 /*  Very good step */
608:                 tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
609:               }
610:             }
611:           }
612:         } else {
613:           /*  Newton step was not good; reduce the radius */
614:           tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
615:         }
616:         break;

618:       default:
619:         if (stepType == NLS_NEWTON) {
620:           KSPCGGetObjFcn(tao->ksp,&prered);
621:           if (prered >= 0.0) {
622:             /*  The predicted reduction has the wrong sign.  This cannot */
623:             /*  happen in infinite precision arithmetic.  Step should */
624:             /*  be rejected! */
625:             tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
626:           } else {
627:             if (PetscIsInfOrNanReal(f_full)) {
628:               tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
629:             } else {
630:               actred = fold - f_full;
631:               prered = -prered;
632:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
633:                 kappa = 1.0;
634:               } else {
635:                 kappa = actred / prered;
636:               }

638:               tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
639:               tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
640:               tau_min = PetscMin(tau_1, tau_2);
641:               tau_max = PetscMax(tau_1, tau_2);

643:               if (kappa >= 1.0 - nlsP->mu1) {
644:                 /*  Great agreement */
645:                 if (tau_max < 1.0) {
646:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
647:                 } else if (tau_max > nlsP->gamma4) {
648:                   tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
649:                 } else {
650:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
651:                 }
652:               } else if (kappa >= 1.0 - nlsP->mu2) {
653:                 /*  Good agreement */

655:                 if (tau_max < nlsP->gamma2) {
656:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
657:                 } else if (tau_max > nlsP->gamma3) {
658:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
659:                 } else if (tau_max < 1.0) {
660:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
661:                 } else {
662:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
663:                 }
664:               } else {
665:                 /*  Not good agreement */
666:                 if (tau_min > 1.0) {
667:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
668:                 } else if (tau_max < nlsP->gamma1) {
669:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
670:                 } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
671:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
672:                 } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
673:                   tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
674:                 } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
675:                   tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
676:                 } else {
677:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
678:                 }
679:               }
680:             }
681:           }
682:         } else {
683:           /*  Newton step was not good; reduce the radius */
684:           tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
685:         }
686:         break;
687:       }

689:       /*  The radius may have been increased; modify if it is too large */
690:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
691:     }

693:     /*  Check for termination */
694:     TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
695:     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
696:     needH = 1;
697:     TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
698:     TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
699:     (*tao->ops->convergencetest)(tao,tao->cnvP);
700:   }
701:   return(0);
702: }

704: /* ---------------------------------------------------------- */
705: static PetscErrorCode TaoSetUp_NLS(Tao tao)
706: {
707:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

711:   if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
712:   if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
713:   if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
714:   if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
715:   if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
716:   if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
717:   nlsP->M = 0;
718:   nlsP->bfgs_pre = 0;
719:   return(0);
720: }

722: /*------------------------------------------------------------*/
723: static PetscErrorCode TaoDestroy_NLS(Tao tao)
724: {
725:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

729:   if (tao->setupcalled) {
730:     VecDestroy(&nlsP->D);
731:     VecDestroy(&nlsP->W);
732:     VecDestroy(&nlsP->Xold);
733:     VecDestroy(&nlsP->Gold);
734:   }
735:   PetscFree(tao->data);
736:   return(0);
737: }

739: /*------------------------------------------------------------*/
740: static PetscErrorCode TaoSetFromOptions_NLS(PetscOptionItems *PetscOptionsObject,Tao tao)
741: {
742:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

746:   PetscOptionsHead(PetscOptionsObject,"Newton line search method for unconstrained optimization");
747:   PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
748:   PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
749:   PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval,NULL);
750:   PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin,NULL);
751:   PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax,NULL);
752:   PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac,NULL);
753:   PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin,NULL);
754:   PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax,NULL);
755:   PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac,NULL);
756:   PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac,NULL);
757:   PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac,NULL);
758:   PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac,NULL);
759:   PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1,NULL);
760:   PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2,NULL);
761:   PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3,NULL);
762:   PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4,NULL);
763:   PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1,NULL);
764:   PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2,NULL);
765:   PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3,NULL);
766:   PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4,NULL);
767:   PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5,NULL);
768:   PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1,NULL);
769:   PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2,NULL);
770:   PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3,NULL);
771:   PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4,NULL);
772:   PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1,NULL);
773:   PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2,NULL);
774:   PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3,NULL);
775:   PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4,NULL);
776:   PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5,NULL);
777:   PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i,NULL);
778:   PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i,NULL);
779:   PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i,NULL);
780:   PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i,NULL);
781:   PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i,NULL);
782:   PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i,NULL);
783:   PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i,NULL);
784:   PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1,NULL);
785:   PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2,NULL);
786:   PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1,NULL);
787:   PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2,NULL);
788:   PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3,NULL);
789:   PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4,NULL);
790:   PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta,NULL);
791:   PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius,NULL);
792:   PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius,NULL);
793:   PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon,NULL);
794:   PetscOptionsTail();
795:   TaoLineSearchSetFromOptions(tao->linesearch);
796:   KSPSetFromOptions(tao->ksp);
797:   return(0);
798: }


801: /*------------------------------------------------------------*/
802: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
803: {
804:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;
805:   PetscBool      isascii;

809:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
810:   if (isascii) {
811:     PetscViewerASCIIPushTab(viewer);
812:     PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
813:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
814:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);

816:     PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
817:     PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
818:     PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
819:     PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
820:     PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
821:     PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
822:     PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
823:     PetscViewerASCIIPopTab(viewer);
824:   }
825:   return(0);
826: }

828: /* ---------------------------------------------------------- */
829: /*MC
830:   TAONLS - Newton's method with linesearch for unconstrained minimization.
831:   At each iteration, the Newton line search method solves the symmetric
832:   system of equations to obtain the step diretion dk:
833:               Hk dk = -gk
834:   a More-Thuente line search is applied on the direction dk to approximately
835:   solve
836:               min_t f(xk + t d_k)

838:     Options Database Keys:
839: + -tao_nls_init_type - "constant","direction","interpolation"
840: . -tao_nls_update_type - "step","direction","interpolation"
841: . -tao_nls_sval - perturbation starting value
842: . -tao_nls_imin - minimum initial perturbation
843: . -tao_nls_imax - maximum initial perturbation
844: . -tao_nls_pmin - minimum perturbation
845: . -tao_nls_pmax - maximum perturbation
846: . -tao_nls_pgfac - growth factor
847: . -tao_nls_psfac - shrink factor
848: . -tao_nls_imfac - initial merit factor
849: . -tao_nls_pmgfac - merit growth factor
850: . -tao_nls_pmsfac - merit shrink factor
851: . -tao_nls_eta1 - poor steplength; reduce radius
852: . -tao_nls_eta2 - reasonable steplength; leave radius
853: . -tao_nls_eta3 - good steplength; increase readius
854: . -tao_nls_eta4 - excellent steplength; greatly increase radius
855: . -tao_nls_alpha1 - alpha1 reduction
856: . -tao_nls_alpha2 - alpha2 reduction
857: . -tao_nls_alpha3 - alpha3 reduction
858: . -tao_nls_alpha4 - alpha4 reduction
859: . -tao_nls_alpha - alpha5 reduction
860: . -tao_nls_mu1 - mu1 interpolation update
861: . -tao_nls_mu2 - mu2 interpolation update
862: . -tao_nls_gamma1 - gamma1 interpolation update
863: . -tao_nls_gamma2 - gamma2 interpolation update
864: . -tao_nls_gamma3 - gamma3 interpolation update
865: . -tao_nls_gamma4 - gamma4 interpolation update
866: . -tao_nls_theta - theta interpolation update
867: . -tao_nls_omega1 - omega1 step update
868: . -tao_nls_omega2 - omega2 step update
869: . -tao_nls_omega3 - omega3 step update
870: . -tao_nls_omega4 - omega4 step update
871: . -tao_nls_omega5 - omega5 step update
872: . -tao_nls_mu1_i -  mu1 interpolation init factor
873: . -tao_nls_mu2_i -  mu2 interpolation init factor
874: . -tao_nls_gamma1_i -  gamma1 interpolation init factor
875: . -tao_nls_gamma2_i -  gamma2 interpolation init factor
876: . -tao_nls_gamma3_i -  gamma3 interpolation init factor
877: . -tao_nls_gamma4_i -  gamma4 interpolation init factor
878: - -tao_nls_theta_i -  theta interpolation init factor

880:   Level: beginner
881: M*/

883: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
884: {
885:   TAO_NLS        *nlsP;
886:   const char     *morethuente_type = TAOLINESEARCHMT;

890:   PetscNewLog(tao,&nlsP);

892:   tao->ops->setup = TaoSetUp_NLS;
893:   tao->ops->solve = TaoSolve_NLS;
894:   tao->ops->view = TaoView_NLS;
895:   tao->ops->setfromoptions = TaoSetFromOptions_NLS;
896:   tao->ops->destroy = TaoDestroy_NLS;

898:   /* Override default settings (unless already changed) */
899:   if (!tao->max_it_changed) tao->max_it = 50;
900:   if (!tao->trust0_changed) tao->trust0 = 100.0;

902:   tao->data = (void*)nlsP;

904:   nlsP->sval   = 0.0;
905:   nlsP->imin   = 1.0e-4;
906:   nlsP->imax   = 1.0e+2;
907:   nlsP->imfac  = 1.0e-1;

909:   nlsP->pmin   = 1.0e-12;
910:   nlsP->pmax   = 1.0e+2;
911:   nlsP->pgfac  = 1.0e+1;
912:   nlsP->psfac  = 4.0e-1;
913:   nlsP->pmgfac = 1.0e-1;
914:   nlsP->pmsfac = 1.0e-1;

916:   /*  Default values for trust-region radius update based on steplength */
917:   nlsP->nu1 = 0.25;
918:   nlsP->nu2 = 0.50;
919:   nlsP->nu3 = 1.00;
920:   nlsP->nu4 = 1.25;

922:   nlsP->omega1 = 0.25;
923:   nlsP->omega2 = 0.50;
924:   nlsP->omega3 = 1.00;
925:   nlsP->omega4 = 2.00;
926:   nlsP->omega5 = 4.00;

928:   /*  Default values for trust-region radius update based on reduction */
929:   nlsP->eta1 = 1.0e-4;
930:   nlsP->eta2 = 0.25;
931:   nlsP->eta3 = 0.50;
932:   nlsP->eta4 = 0.90;

934:   nlsP->alpha1 = 0.25;
935:   nlsP->alpha2 = 0.50;
936:   nlsP->alpha3 = 1.00;
937:   nlsP->alpha4 = 2.00;
938:   nlsP->alpha5 = 4.00;

940:   /*  Default values for trust-region radius update based on interpolation */
941:   nlsP->mu1 = 0.10;
942:   nlsP->mu2 = 0.50;

944:   nlsP->gamma1 = 0.25;
945:   nlsP->gamma2 = 0.50;
946:   nlsP->gamma3 = 2.00;
947:   nlsP->gamma4 = 4.00;

949:   nlsP->theta = 0.05;

951:   /*  Default values for trust region initialization based on interpolation */
952:   nlsP->mu1_i = 0.35;
953:   nlsP->mu2_i = 0.50;

955:   nlsP->gamma1_i = 0.0625;
956:   nlsP->gamma2_i = 0.5;
957:   nlsP->gamma3_i = 2.0;
958:   nlsP->gamma4_i = 5.0;

960:   nlsP->theta_i = 0.25;

962:   /*  Remaining parameters */
963:   nlsP->min_radius = 1.0e-10;
964:   nlsP->max_radius = 1.0e10;
965:   nlsP->epsilon = 1.0e-6;

967:   nlsP->init_type       = NLS_INIT_INTERPOLATION;
968:   nlsP->update_type     = NLS_UPDATE_STEP;

970:   TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
971:   PetscObjectIncrementTabLevel((PetscObject)tao->linesearch,(PetscObject)tao,1);
972:   TaoLineSearchSetType(tao->linesearch,morethuente_type);
973:   TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
974:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);

976:   /*  Set linear solver to default for symmetric matrices */
977:   KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
978:   PetscObjectIncrementTabLevel((PetscObject)tao->ksp,(PetscObject)tao,1);
979:   KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
980:   KSPAppendOptionsPrefix(tao->ksp,"tao_nls_");
981:   KSPSetType(tao->ksp,KSPCGSTCG);
982:   return(0);
983: }