Actual source code: nls.c

petsc-3.8.4 2018-03-24
Report Typos and Errors
  1:  #include <petsctaolinesearch.h>
  2:  #include <../src/tao/matrix/lmvmmat.h>
  3:  #include <../src/tao/unconstrained/impls/nls/nlsimpl.h>

  5:  #include <petscksp.h>

  7: #define NLS_PC_NONE     0
  8: #define NLS_PC_AHESS    1
  9: #define NLS_PC_BFGS     2
 10: #define NLS_PC_PETSC    3
 11: #define NLS_PC_TYPES    4

 13: #define BFGS_SCALE_AHESS        0
 14: #define BFGS_SCALE_PHESS        1
 15: #define BFGS_SCALE_BFGS         2
 16: #define BFGS_SCALE_TYPES        3

 18: #define NLS_INIT_CONSTANT         0
 19: #define NLS_INIT_DIRECTION        1
 20: #define NLS_INIT_INTERPOLATION    2
 21: #define NLS_INIT_TYPES            3

 23: #define NLS_UPDATE_STEP           0
 24: #define NLS_UPDATE_REDUCTION      1
 25: #define NLS_UPDATE_INTERPOLATION  2
 26: #define NLS_UPDATE_TYPES          3

 28: static const char *NLS_PC[64] = {"none", "ahess", "bfgs", "petsc"};

 30: static const char *BFGS_SCALE[64] = {"ahess", "phess", "bfgs"};

 32: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};

 34: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};

 36: /* Routine for BFGS preconditioner */

 38: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
 39: {
 41:   Mat            M;

 47:   PCShellGetContext(pc,(void**)&M);
 48:   MatLMVMSolve(M, b, x);
 49:   return(0);
 50: }

 52: /*
 53:  Implements Newton's Method with a line search approach for solving
 54:  unconstrained minimization problems.  A More'-Thuente line search
 55:  is used to guarantee that the bfgs preconditioner remains positive
 56:  definite.

 58:  The method can shift the Hessian matrix.  The shifting procedure is
 59:  adapted from the PATH algorithm for solving complementarity
 60:  problems.

 62:  The linear system solve should be done with a conjugate gradient
 63:  method, although any method can be used. 
 64: */

 66: #define NLS_NEWTON              0
 67: #define NLS_BFGS                1
 68: #define NLS_SCALED_GRADIENT     2
 69: #define NLS_GRADIENT            3

 71: static PetscErrorCode TaoSolve_NLS(Tao tao)
 72: {
 73:   PetscErrorCode               ierr;
 74:   TAO_NLS                      *nlsP = (TAO_NLS *)tao->data;
 75:   KSPType                      ksp_type;
 76:   PetscBool                    is_nash,is_stcg,is_gltr;
 77:   KSPConvergedReason           ksp_reason;
 78:   PC                           pc;
 79:   TaoConvergedReason           reason;
 80:   TaoLineSearchConvergedReason ls_reason;

 82:   PetscReal                    fmin, ftrial, f_full, prered, actred, kappa, sigma;
 83:   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 84:   PetscReal                    f, fold, gdx, gnorm, pert;
 85:   PetscReal                    step = 1.0;
 86:   PetscReal                    delta;
 87:   PetscReal                    norm_d = 0.0, e_min;

 89:   PetscInt                     stepType;
 90:   PetscInt                     bfgsUpdates = 0;
 91:   PetscInt                     n,N,kspits;
 92:   PetscInt                     needH = 1;

 94:   PetscInt                     i_max = 5;
 95:   PetscInt                     j_max = 1;
 96:   PetscInt                     i, j;

 99:   if (tao->XL || tao->XU || tao->ops->computebounds) {
100:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
101:   }

103:   /* Initialized variables */
104:   pert = nlsP->sval;

106:   /* Number of times ksp stopped because of these reasons */
107:   nlsP->ksp_atol = 0;
108:   nlsP->ksp_rtol = 0;
109:   nlsP->ksp_dtol = 0;
110:   nlsP->ksp_ctol = 0;
111:   nlsP->ksp_negc = 0;
112:   nlsP->ksp_iter = 0;
113:   nlsP->ksp_othr = 0;

115:   /* Initialize trust-region radius when using nash, stcg, or gltr
116:      Command automatically ignored for other methods
117:      Will be reset during the first iteration 
118:   */
119:   KSPGetType(tao->ksp,&ksp_type);
120:   PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);
121:   PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);
122:   PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);

124:   KSPCGSetRadius(tao->ksp,nlsP->max_radius);

126:   if (is_nash || is_stcg || is_gltr) {
127:     if (tao->trust0 < 0.0) SETERRQ(PETSC_COMM_SELF,1,"Initial radius negative");
128:     tao->trust = tao->trust0;
129:     tao->trust = PetscMax(tao->trust, nlsP->min_radius);
130:     tao->trust = PetscMin(tao->trust, nlsP->max_radius);
131:   }

133:   /* Get vectors we will need */
134:   if (NLS_PC_BFGS == nlsP->pc_type && !nlsP->M) {
135:     VecGetLocalSize(tao->solution,&n);
136:     VecGetSize(tao->solution,&N);
137:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&nlsP->M);
138:     MatLMVMAllocateVectors(nlsP->M,tao->solution);
139:   }

141:   /* Check convergence criteria */
142:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
143:   TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
144:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");

146:   TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
147:   if (reason != TAO_CONTINUE_ITERATING) return(0);

149:   /* create vectors for the limited memory preconditioner */
150:   if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_BFGS != nlsP->bfgs_scale_type)) {
151:     if (!nlsP->Diag) {
152:       VecDuplicate(tao->solution,&nlsP->Diag);
153:     }
154:   }

156:   /* Modify the preconditioner to use the bfgs approximation */
157:   KSPGetPC(tao->ksp, &pc);
158:   switch(nlsP->pc_type) {
159:   case NLS_PC_NONE:
160:     PCSetType(pc, PCNONE);
161:     PCSetFromOptions(pc);
162:     break;

164:   case NLS_PC_AHESS:
165:     PCSetType(pc, PCJACOBI);
166:     PCSetFromOptions(pc);
167:     PCJacobiSetUseAbs(pc,PETSC_TRUE);
168:     break;

170:   case NLS_PC_BFGS:
171:     PCSetType(pc, PCSHELL);
172:     PCSetFromOptions(pc);
173:     PCShellSetName(pc, "bfgs");
174:     PCShellSetContext(pc, nlsP->M);
175:     PCShellSetApply(pc, MatLMVMSolveShell);
176:     break;

178:   default:
179:     /* Use the pc method set by pc_type */
180:     break;
181:   }

183:   /* Initialize trust-region radius.  The initialization is only performed
184:      when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
185:   if (is_nash || is_stcg || is_gltr) {
186:     switch(nlsP->init_type) {
187:     case NLS_INIT_CONSTANT:
188:       /* Use the initial radius specified */
189:       break;

191:     case NLS_INIT_INTERPOLATION:
192:       /* Use the initial radius specified */
193:       max_radius = 0.0;

195:       for (j = 0; j < j_max; ++j) {
196:         fmin = f;
197:         sigma = 0.0;

199:         if (needH) {
200:           TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
201:           needH = 0;
202:         }

204:         for (i = 0; i < i_max; ++i) {
205:           VecCopy(tao->solution,nlsP->W);
206:           VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
207:           TaoComputeObjective(tao, nlsP->W, &ftrial);
208:           if (PetscIsInfOrNanReal(ftrial)) {
209:             tau = nlsP->gamma1_i;
210:           } else {
211:             if (ftrial < fmin) {
212:               fmin = ftrial;
213:               sigma = -tao->trust / gnorm;
214:             }

216:             MatMult(tao->hessian, tao->gradient, nlsP->D);
217:             VecDot(tao->gradient, nlsP->D, &prered);

219:             prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
220:             actred = f - ftrial;
221:             if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
222:               kappa = 1.0;
223:             } else {
224:               kappa = actred / prered;
225:             }

227:             tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
228:             tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
229:             tau_min = PetscMin(tau_1, tau_2);
230:             tau_max = PetscMax(tau_1, tau_2);

232:             if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu1_i) {
233:               /* Great agreement */
234:               max_radius = PetscMax(max_radius, tao->trust);

236:               if (tau_max < 1.0) {
237:                 tau = nlsP->gamma3_i;
238:               } else if (tau_max > nlsP->gamma4_i) {
239:                 tau = nlsP->gamma4_i;
240:               } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
241:                 tau = tau_1;
242:               } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
243:                 tau = tau_2;
244:               } else {
245:                 tau = tau_max;
246:               }
247:             } else if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu2_i) {
248:               /* Good agreement */
249:               max_radius = PetscMax(max_radius, tao->trust);

251:               if (tau_max < nlsP->gamma2_i) {
252:                 tau = nlsP->gamma2_i;
253:               } else if (tau_max > nlsP->gamma3_i) {
254:                 tau = nlsP->gamma3_i;
255:               } else {
256:                 tau = tau_max;
257:               }
258:             } else {
259:               /* Not good agreement */
260:               if (tau_min > 1.0) {
261:                 tau = nlsP->gamma2_i;
262:               } else if (tau_max < nlsP->gamma1_i) {
263:                 tau = nlsP->gamma1_i;
264:               } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
265:                 tau = nlsP->gamma1_i;
266:               } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
267:                 tau = tau_1;
268:               } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
269:                 tau = tau_2;
270:               } else {
271:                 tau = tau_max;
272:               }
273:             }
274:           }
275:           tao->trust = tau * tao->trust;
276:         }

278:         if (fmin < f) {
279:           f = fmin;
280:           VecAXPY(tao->solution,sigma,tao->gradient);
281:           TaoComputeGradient(tao,tao->solution,tao->gradient);

283:           TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
284:           if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute gradient generated Inf or NaN");
285:           needH = 1;

287:           TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
288:           if (reason != TAO_CONTINUE_ITERATING) return(0);
289:         }
290:       }
291:       tao->trust = PetscMax(tao->trust, max_radius);

293:       /* Modify the radius if it is too large or small */
294:       tao->trust = PetscMax(tao->trust, nlsP->min_radius);
295:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
296:       break;

298:     default:
299:       /* Norm of the first direction will initialize radius */
300:       tao->trust = 0.0;
301:       break;
302:     }
303:   }

305:   /* Set initial scaling for the BFGS preconditioner
306:      This step is done after computing the initial trust-region radius
307:      since the function value may have decreased */
308:   if (NLS_PC_BFGS == nlsP->pc_type) {
309:     if (f != 0.0) {
310:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
311:     } else {
312:       delta = 2.0 / (gnorm*gnorm);
313:     }
314:     MatLMVMSetDelta(nlsP->M,delta);
315:   }

317:   /* Set counter for gradient/reset steps*/
318:   nlsP->newt = 0;
319:   nlsP->bfgs = 0;
320:   nlsP->sgrad = 0;
321:   nlsP->grad = 0;

323:   /* Have not converged; continue with Newton method */
324:   while (reason == TAO_CONTINUE_ITERATING) {
325:     ++tao->niter;
326:     tao->ksp_its=0;

328:     /* Compute the Hessian */
329:     if (needH) {
330:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
331:     }

333:     if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_AHESS == nlsP->bfgs_scale_type)) {
334:       /* Obtain diagonal for the bfgs preconditioner  */
335:       MatGetDiagonal(tao->hessian, nlsP->Diag);
336:       VecAbs(nlsP->Diag);
337:       VecReciprocal(nlsP->Diag);
338:       MatLMVMSetScale(nlsP->M,nlsP->Diag);
339:     }

341:     /* Shift the Hessian matrix */
342:     if (pert > 0) {
343:       MatShift(tao->hessian, pert);
344:       if (tao->hessian != tao->hessian_pre) {
345:         MatShift(tao->hessian_pre, pert);
346:       }
347:     }

349:     if (NLS_PC_BFGS == nlsP->pc_type) {
350:       if (BFGS_SCALE_PHESS == nlsP->bfgs_scale_type) {
351:         /* Obtain diagonal for the bfgs preconditioner  */
352:         MatGetDiagonal(tao->hessian, nlsP->Diag);
353:         VecAbs(nlsP->Diag);
354:         VecReciprocal(nlsP->Diag);
355:         MatLMVMSetScale(nlsP->M,nlsP->Diag);
356:       }
357:       /* Update the limited memory preconditioner */
358:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
359:       ++bfgsUpdates;
360:     }

362:     /* Solve the Newton system of equations */
363:     KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
364:     if (is_nash || is_stcg || is_gltr) {
365:       KSPCGSetRadius(tao->ksp,nlsP->max_radius);
366:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
367:       KSPGetIterationNumber(tao->ksp,&kspits);
368:       tao->ksp_its+=kspits;
369:       tao->ksp_tot_its+=kspits;
370:       KSPCGGetNormD(tao->ksp,&norm_d);

372:       if (0.0 == tao->trust) {
373:         /* Radius was uninitialized; use the norm of the direction */
374:         if (norm_d > 0.0) {
375:           tao->trust = norm_d;

377:           /* Modify the radius if it is too large or small */
378:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
379:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);
380:         } else {
381:           /* The direction was bad; set radius to default value and re-solve
382:              the trust-region subproblem to get a direction */
383:           tao->trust = tao->trust0;

385:           /* Modify the radius if it is too large or small */
386:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
387:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);

389:           KSPCGSetRadius(tao->ksp,nlsP->max_radius);
390:           KSPSolve(tao->ksp, tao->gradient, nlsP->D);
391:           KSPGetIterationNumber(tao->ksp,&kspits);
392:           tao->ksp_its+=kspits;
393:           tao->ksp_tot_its+=kspits;
394:           KSPCGGetNormD(tao->ksp,&norm_d);

396:           if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
397:         }
398:       }
399:     } else {
400:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
401:       KSPGetIterationNumber(tao->ksp, &kspits);
402:       tao->ksp_its += kspits;
403:       tao->ksp_tot_its+=kspits;
404:     }
405:     VecScale(nlsP->D, -1.0);
406:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
407:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&  (NLS_PC_BFGS == nlsP->pc_type) && (bfgsUpdates > 1)) {
408:       /* Preconditioner is numerically indefinite; reset the
409:          approximate if using BFGS preconditioning. */

411:       if (f != 0.0) {
412:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
413:       } else {
414:         delta = 2.0 / (gnorm*gnorm);
415:       }
416:       MatLMVMSetDelta(nlsP->M,delta);
417:       MatLMVMReset(nlsP->M);
418:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
419:       bfgsUpdates = 1;
420:     }

422:     if (KSP_CONVERGED_ATOL == ksp_reason) {
423:       ++nlsP->ksp_atol;
424:     } else if (KSP_CONVERGED_RTOL == ksp_reason) {
425:       ++nlsP->ksp_rtol;
426:     } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
427:       ++nlsP->ksp_ctol;
428:     } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
429:       ++nlsP->ksp_negc;
430:     } else if (KSP_DIVERGED_DTOL == ksp_reason) {
431:       ++nlsP->ksp_dtol;
432:     } else if (KSP_DIVERGED_ITS == ksp_reason) {
433:       ++nlsP->ksp_iter;
434:     } else {
435:       ++nlsP->ksp_othr;
436:     }

438:     /* Check for success (descent direction) */
439:     VecDot(nlsP->D, tao->gradient, &gdx);
440:     if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
441:       /* Newton step is not descent or direction produced Inf or NaN
442:          Update the perturbation for next time */
443:       if (pert <= 0.0) {
444:         /* Initialize the perturbation */
445:         pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
446:         if (is_gltr) {
447:           KSPCGGLTRGetMinEig(tao->ksp,&e_min);
448:           pert = PetscMax(pert, -e_min);
449:         }
450:       } else {
451:         /* Increase the perturbation */
452:         pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
453:       }

455:       if (NLS_PC_BFGS != nlsP->pc_type) {
456:         /* We don't have the bfgs matrix around and updated
457:            Must use gradient direction in this case */
458:         VecCopy(tao->gradient, nlsP->D);
459:         VecScale(nlsP->D, -1.0);
460:         ++nlsP->grad;
461:         stepType = NLS_GRADIENT;
462:       } else {
463:         /* Attempt to use the BFGS direction */
464:         MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
465:         VecScale(nlsP->D, -1.0);

467:         /* Check for success (descent direction) */
468:         VecDot(tao->gradient, nlsP->D, &gdx);
469:         if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
470:           /* BFGS direction is not descent or direction produced not a number
471:              We can assert bfgsUpdates > 1 in this case because
472:              the first solve produces the scaled gradient direction,
473:              which is guaranteed to be descent */

475:           /* Use steepest descent direction (scaled) */

477:           if (f != 0.0) {
478:             delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
479:           } else {
480:             delta = 2.0 / (gnorm*gnorm);
481:           }
482:           MatLMVMSetDelta(nlsP->M, delta);
483:           MatLMVMReset(nlsP->M);
484:           MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
485:           MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
486:           VecScale(nlsP->D, -1.0);

488:           bfgsUpdates = 1;
489:           ++nlsP->sgrad;
490:           stepType = NLS_SCALED_GRADIENT;
491:         } else {
492:           if (1 == bfgsUpdates) {
493:             /* The first BFGS direction is always the scaled gradient */
494:             ++nlsP->sgrad;
495:             stepType = NLS_SCALED_GRADIENT;
496:           } else {
497:             ++nlsP->bfgs;
498:             stepType = NLS_BFGS;
499:           }
500:         }
501:       }
502:     } else {
503:       /* Computed Newton step is descent */
504:       switch (ksp_reason) {
505:       case KSP_DIVERGED_NANORINF:
506:       case KSP_DIVERGED_BREAKDOWN:
507:       case KSP_DIVERGED_INDEFINITE_MAT:
508:       case KSP_DIVERGED_INDEFINITE_PC:
509:       case KSP_CONVERGED_CG_NEG_CURVE:
510:         /* Matrix or preconditioner is indefinite; increase perturbation */
511:         if (pert <= 0.0) {
512:           /* Initialize the perturbation */
513:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
514:           if (is_gltr) {
515:             KSPCGGLTRGetMinEig(tao->ksp, &e_min);
516:             pert = PetscMax(pert, -e_min);
517:           }
518:         } else {
519:           /* Increase the perturbation */
520:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
521:         }
522:         break;

524:       default:
525:         /* Newton step computation is good; decrease perturbation */
526:         pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
527:         if (pert < nlsP->pmin) {
528:           pert = 0.0;
529:         }
530:         break;
531:       }

533:       ++nlsP->newt;
534:       stepType = NLS_NEWTON;
535:     }

537:     /* Perform the linesearch */
538:     fold = f;
539:     VecCopy(tao->solution, nlsP->Xold);
540:     VecCopy(tao->gradient, nlsP->Gold);

542:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
543:     TaoAddLineSearchCounts(tao);

545:     while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) {      /* Linesearch failed */
546:       f = fold;
547:       VecCopy(nlsP->Xold, tao->solution);
548:       VecCopy(nlsP->Gold, tao->gradient);

550:       switch(stepType) {
551:       case NLS_NEWTON:
552:         /* Failed to obtain acceptable iterate with Newton 1step
553:            Update the perturbation for next time */
554:         if (pert <= 0.0) {
555:           /* Initialize the perturbation */
556:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
557:           if (is_gltr) {
558:             KSPCGGLTRGetMinEig(tao->ksp,&e_min);
559:             pert = PetscMax(pert, -e_min);
560:           }
561:         } else {
562:           /* Increase the perturbation */
563:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
564:         }

566:         if (NLS_PC_BFGS != nlsP->pc_type) {
567:           /* We don't have the bfgs matrix around and being updated
568:              Must use gradient direction in this case */
569:           VecCopy(tao->gradient, nlsP->D);
570:           ++nlsP->grad;
571:           stepType = NLS_GRADIENT;
572:         } else {
573:           /* Attempt to use the BFGS direction */
574:           MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
575:           /* Check for success (descent direction) */
576:           VecDot(tao->solution, nlsP->D, &gdx);
577:           if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
578:             /* BFGS direction is not descent or direction produced not a number
579:                We can assert bfgsUpdates > 1 in this case
580:                Use steepest descent direction (scaled) */

582:             if (f != 0.0) {
583:               delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
584:             } else {
585:               delta = 2.0 / (gnorm*gnorm);
586:             }
587:             MatLMVMSetDelta(nlsP->M, delta);
588:             MatLMVMReset(nlsP->M);
589:             MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
590:             MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);

592:             bfgsUpdates = 1;
593:             ++nlsP->sgrad;
594:             stepType = NLS_SCALED_GRADIENT;
595:           } else {
596:             if (1 == bfgsUpdates) {
597:               /* The first BFGS direction is always the scaled gradient */
598:               ++nlsP->sgrad;
599:               stepType = NLS_SCALED_GRADIENT;
600:             } else {
601:               ++nlsP->bfgs;
602:               stepType = NLS_BFGS;
603:             }
604:           }
605:         }
606:         break;

608:       case NLS_BFGS:
609:         /* Can only enter if pc_type == NLS_PC_BFGS
610:            Failed to obtain acceptable iterate with BFGS step
611:            Attempt to use the scaled gradient direction */

613:         if (f != 0.0) {
614:           delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
615:         } else {
616:           delta = 2.0 / (gnorm*gnorm);
617:         }
618:         MatLMVMSetDelta(nlsP->M, delta);
619:         MatLMVMReset(nlsP->M);
620:         MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
621:         MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);

623:         bfgsUpdates = 1;
624:         ++nlsP->sgrad;
625:         stepType = NLS_SCALED_GRADIENT;
626:         break;

628:       case NLS_SCALED_GRADIENT:
629:         /* Can only enter if pc_type == NLS_PC_BFGS
630:            The scaled gradient step did not produce a new iterate;
631:            attemp to use the gradient direction.
632:            Need to make sure we are not using a different diagonal scaling */

634:         MatLMVMSetScale(nlsP->M,0);
635:         MatLMVMSetDelta(nlsP->M,1.0);
636:         MatLMVMReset(nlsP->M);
637:         MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
638:         MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);

640:         bfgsUpdates = 1;
641:         ++nlsP->grad;
642:         stepType = NLS_GRADIENT;
643:         break;
644:       }
645:       VecScale(nlsP->D, -1.0);

647:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
648:       TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
649:       TaoAddLineSearchCounts(tao);
650:     }

652:     if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
653:       /* Failed to find an improving point */
654:       f = fold;
655:       VecCopy(nlsP->Xold, tao->solution);
656:       VecCopy(nlsP->Gold, tao->gradient);
657:       step = 0.0;
658:       reason = TAO_DIVERGED_LS_FAILURE;
659:       tao->reason = TAO_DIVERGED_LS_FAILURE;
660:       break;
661:     }

663:     /* Update trust region radius */
664:     if (is_nash || is_stcg || is_gltr) {
665:       switch(nlsP->update_type) {
666:       case NLS_UPDATE_STEP:
667:         if (stepType == NLS_NEWTON) {
668:           if (step < nlsP->nu1) {
669:             /* Very bad step taken; reduce radius */
670:             tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
671:           } else if (step < nlsP->nu2) {
672:             /* Reasonably bad step taken; reduce radius */
673:             tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
674:           } else if (step < nlsP->nu3) {
675:             /*  Reasonable step was taken; leave radius alone */
676:             if (nlsP->omega3 < 1.0) {
677:               tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
678:             } else if (nlsP->omega3 > 1.0) {
679:               tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
680:             }
681:           } else if (step < nlsP->nu4) {
682:             /*  Full step taken; increase the radius */
683:             tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
684:           } else {
685:             /*  More than full step taken; increase the radius */
686:             tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
687:           }
688:         } else {
689:           /*  Newton step was not good; reduce the radius */
690:           tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
691:         }
692:         break;

694:       case NLS_UPDATE_REDUCTION:
695:         if (stepType == NLS_NEWTON) {
696:           /*  Get predicted reduction */

698:           KSPCGGetObjFcn(tao->ksp,&prered);
699:           if (prered >= 0.0) {
700:             /*  The predicted reduction has the wrong sign.  This cannot */
701:             /*  happen in infinite precision arithmetic.  Step should */
702:             /*  be rejected! */
703:             tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
704:           } else {
705:             if (PetscIsInfOrNanReal(f_full)) {
706:               tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
707:             } else {
708:               /*  Compute and actual reduction */
709:               actred = fold - f_full;
710:               prered = -prered;
711:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
712:                   (PetscAbsScalar(prered) <= nlsP->epsilon)) {
713:                 kappa = 1.0;
714:               } else {
715:                 kappa = actred / prered;
716:               }

718:               /*  Accept of reject the step and update radius */
719:               if (kappa < nlsP->eta1) {
720:                 /*  Very bad step */
721:                 tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
722:               } else if (kappa < nlsP->eta2) {
723:                 /*  Marginal bad step */
724:                 tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
725:               } else if (kappa < nlsP->eta3) {
726:                 /*  Reasonable step */
727:                 if (nlsP->alpha3 < 1.0) {
728:                   tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
729:                 } else if (nlsP->alpha3 > 1.0) {
730:                   tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
731:                 }
732:               } else if (kappa < nlsP->eta4) {
733:                 /*  Good step */
734:                 tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
735:               } else {
736:                 /*  Very good step */
737:                 tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
738:               }
739:             }
740:           }
741:         } else {
742:           /*  Newton step was not good; reduce the radius */
743:           tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
744:         }
745:         break;

747:       default:
748:         if (stepType == NLS_NEWTON) {
749:           KSPCGGetObjFcn(tao->ksp,&prered);
750:           if (prered >= 0.0) {
751:             /*  The predicted reduction has the wrong sign.  This cannot */
752:             /*  happen in infinite precision arithmetic.  Step should */
753:             /*  be rejected! */
754:             tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
755:           } else {
756:             if (PetscIsInfOrNanReal(f_full)) {
757:               tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
758:             } else {
759:               actred = fold - f_full;
760:               prered = -prered;
761:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
762:                 kappa = 1.0;
763:               } else {
764:                 kappa = actred / prered;
765:               }

767:               tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
768:               tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
769:               tau_min = PetscMin(tau_1, tau_2);
770:               tau_max = PetscMax(tau_1, tau_2);

772:               if (kappa >= 1.0 - nlsP->mu1) {
773:                 /*  Great agreement */
774:                 if (tau_max < 1.0) {
775:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
776:                 } else if (tau_max > nlsP->gamma4) {
777:                   tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
778:                 } else {
779:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
780:                 }
781:               } else if (kappa >= 1.0 - nlsP->mu2) {
782:                 /*  Good agreement */

784:                 if (tau_max < nlsP->gamma2) {
785:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
786:                 } else if (tau_max > nlsP->gamma3) {
787:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
788:                 } else if (tau_max < 1.0) {
789:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
790:                 } else {
791:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
792:                 }
793:               } else {
794:                 /*  Not good agreement */
795:                 if (tau_min > 1.0) {
796:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
797:                 } else if (tau_max < nlsP->gamma1) {
798:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
799:                 } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
800:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
801:                 } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
802:                   tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
803:                 } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
804:                   tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
805:                 } else {
806:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
807:                 }
808:               }
809:             }
810:           }
811:         } else {
812:           /*  Newton step was not good; reduce the radius */
813:           tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
814:         }
815:         break;
816:       }

818:       /*  The radius may have been increased; modify if it is too large */
819:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
820:     }

822:     /*  Check for termination */
823:     TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
824:     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
825:     needH = 1;
826:     TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step, &reason);
827:   }
828:   return(0);
829: }

831: /* ---------------------------------------------------------- */
832: static PetscErrorCode TaoSetUp_NLS(Tao tao)
833: {
834:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

838:   if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
839:   if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
840:   if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
841:   if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
842:   if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
843:   if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
844:   nlsP->Diag = 0;
845:   nlsP->M = 0;
846:   return(0);
847: }

849: /*------------------------------------------------------------*/
850: static PetscErrorCode TaoDestroy_NLS(Tao tao)
851: {
852:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

856:   if (tao->setupcalled) {
857:     VecDestroy(&nlsP->D);
858:     VecDestroy(&nlsP->W);
859:     VecDestroy(&nlsP->Xold);
860:     VecDestroy(&nlsP->Gold);
861:   }
862:   VecDestroy(&nlsP->Diag);
863:   MatDestroy(&nlsP->M);
864:   PetscFree(tao->data);
865:   return(0);
866: }

868: /*------------------------------------------------------------*/
869: static PetscErrorCode TaoSetFromOptions_NLS(PetscOptionItems *PetscOptionsObject,Tao tao)
870: {
871:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

875:   PetscOptionsHead(PetscOptionsObject,"Newton line search method for unconstrained optimization");
876:   PetscOptionsEList("-tao_nls_pc_type", "pc type", "", NLS_PC, NLS_PC_TYPES, NLS_PC[nlsP->pc_type], &nlsP->pc_type, 0);
877:   PetscOptionsEList("-tao_nls_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[nlsP->bfgs_scale_type], &nlsP->bfgs_scale_type, 0);
878:   PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
879:   PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
880:   PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval,NULL);
881:   PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin,NULL);
882:   PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax,NULL);
883:   PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac,NULL);
884:   PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin,NULL);
885:   PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax,NULL);
886:   PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac,NULL);
887:   PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac,NULL);
888:   PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac,NULL);
889:   PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac,NULL);
890:   PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1,NULL);
891:   PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2,NULL);
892:   PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3,NULL);
893:   PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4,NULL);
894:   PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1,NULL);
895:   PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2,NULL);
896:   PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3,NULL);
897:   PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4,NULL);
898:   PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5,NULL);
899:   PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1,NULL);
900:   PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2,NULL);
901:   PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3,NULL);
902:   PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4,NULL);
903:   PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1,NULL);
904:   PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2,NULL);
905:   PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3,NULL);
906:   PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4,NULL);
907:   PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5,NULL);
908:   PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i,NULL);
909:   PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i,NULL);
910:   PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i,NULL);
911:   PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i,NULL);
912:   PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i,NULL);
913:   PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i,NULL);
914:   PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i,NULL);
915:   PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1,NULL);
916:   PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2,NULL);
917:   PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1,NULL);
918:   PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2,NULL);
919:   PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3,NULL);
920:   PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4,NULL);
921:   PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta,NULL);
922:   PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius,NULL);
923:   PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius,NULL);
924:   PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon,NULL);
925:   PetscOptionsTail();
926:   TaoLineSearchSetFromOptions(tao->linesearch);
927:   KSPSetFromOptions(tao->ksp);
928:   return(0);
929: }


932: /*------------------------------------------------------------*/
933: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
934: {
935:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;
936:   PetscInt       nrejects;
937:   PetscBool      isascii;

941:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
942:   if (isascii) {
943:     PetscViewerASCIIPushTab(viewer);
944:     if (NLS_PC_BFGS == nlsP->pc_type && nlsP->M) {
945:       MatLMVMGetRejects(nlsP->M,&nrejects);
946:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n",nrejects);
947:     }
948:     PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
949:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
950:     PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", nlsP->sgrad);
951:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);

953:     PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
954:     PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
955:     PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
956:     PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
957:     PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
958:     PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
959:     PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
960:     PetscViewerASCIIPopTab(viewer);
961:   }
962:   return(0);
963: }

965: /* ---------------------------------------------------------- */
966: /*MC
967:   TAONLS - Newton's method with linesearch for unconstrained minimization.
968:   At each iteration, the Newton line search method solves the symmetric
969:   system of equations to obtain the step diretion dk:
970:               Hk dk = -gk
971:   a More-Thuente line search is applied on the direction dk to approximately
972:   solve
973:               min_t f(xk + t d_k)

975:     Options Database Keys:
976: + -tao_nls_pc_type - "none","ahess","bfgs","petsc"
977: . -tao_nls_bfgs_scale_type - "ahess","phess","bfgs"
978: . -tao_nls_init_type - "constant","direction","interpolation"
979: . -tao_nls_update_type - "step","direction","interpolation"
980: . -tao_nls_sval - perturbation starting value
981: . -tao_nls_imin - minimum initial perturbation
982: . -tao_nls_imax - maximum initial perturbation
983: . -tao_nls_pmin - minimum perturbation
984: . -tao_nls_pmax - maximum perturbation
985: . -tao_nls_pgfac - growth factor
986: . -tao_nls_psfac - shrink factor
987: . -tao_nls_imfac - initial merit factor
988: . -tao_nls_pmgfac - merit growth factor
989: . -tao_nls_pmsfac - merit shrink factor
990: . -tao_nls_eta1 - poor steplength; reduce radius
991: . -tao_nls_eta2 - reasonable steplength; leave radius
992: . -tao_nls_eta3 - good steplength; increase readius
993: . -tao_nls_eta4 - excellent steplength; greatly increase radius
994: . -tao_nls_alpha1 - alpha1 reduction
995: . -tao_nls_alpha2 - alpha2 reduction
996: . -tao_nls_alpha3 - alpha3 reduction
997: . -tao_nls_alpha4 - alpha4 reduction
998: . -tao_nls_alpha - alpha5 reduction
999: . -tao_nls_mu1 - mu1 interpolation update
1000: . -tao_nls_mu2 - mu2 interpolation update
1001: . -tao_nls_gamma1 - gamma1 interpolation update
1002: . -tao_nls_gamma2 - gamma2 interpolation update
1003: . -tao_nls_gamma3 - gamma3 interpolation update
1004: . -tao_nls_gamma4 - gamma4 interpolation update
1005: . -tao_nls_theta - theta interpolation update
1006: . -tao_nls_omega1 - omega1 step update
1007: . -tao_nls_omega2 - omega2 step update
1008: . -tao_nls_omega3 - omega3 step update
1009: . -tao_nls_omega4 - omega4 step update
1010: . -tao_nls_omega5 - omega5 step update
1011: . -tao_nls_mu1_i -  mu1 interpolation init factor
1012: . -tao_nls_mu2_i -  mu2 interpolation init factor
1013: . -tao_nls_gamma1_i -  gamma1 interpolation init factor
1014: . -tao_nls_gamma2_i -  gamma2 interpolation init factor
1015: . -tao_nls_gamma3_i -  gamma3 interpolation init factor
1016: . -tao_nls_gamma4_i -  gamma4 interpolation init factor
1017: - -tao_nls_theta_i -  theta interpolation init factor

1019:   Level: beginner
1020: M*/

1022: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
1023: {
1024:   TAO_NLS        *nlsP;
1025:   const char     *morethuente_type = TAOLINESEARCHMT;

1029:   PetscNewLog(tao,&nlsP);

1031:   tao->ops->setup = TaoSetUp_NLS;
1032:   tao->ops->solve = TaoSolve_NLS;
1033:   tao->ops->view = TaoView_NLS;
1034:   tao->ops->setfromoptions = TaoSetFromOptions_NLS;
1035:   tao->ops->destroy = TaoDestroy_NLS;

1037:   /* Override default settings (unless already changed) */
1038:   if (!tao->max_it_changed) tao->max_it = 50;
1039:   if (!tao->trust0_changed) tao->trust0 = 100.0;

1041:   tao->data = (void*)nlsP;

1043:   nlsP->sval   = 0.0;
1044:   nlsP->imin   = 1.0e-4;
1045:   nlsP->imax   = 1.0e+2;
1046:   nlsP->imfac  = 1.0e-1;

1048:   nlsP->pmin   = 1.0e-12;
1049:   nlsP->pmax   = 1.0e+2;
1050:   nlsP->pgfac  = 1.0e+1;
1051:   nlsP->psfac  = 4.0e-1;
1052:   nlsP->pmgfac = 1.0e-1;
1053:   nlsP->pmsfac = 1.0e-1;

1055:   /*  Default values for trust-region radius update based on steplength */
1056:   nlsP->nu1 = 0.25;
1057:   nlsP->nu2 = 0.50;
1058:   nlsP->nu3 = 1.00;
1059:   nlsP->nu4 = 1.25;

1061:   nlsP->omega1 = 0.25;
1062:   nlsP->omega2 = 0.50;
1063:   nlsP->omega3 = 1.00;
1064:   nlsP->omega4 = 2.00;
1065:   nlsP->omega5 = 4.00;

1067:   /*  Default values for trust-region radius update based on reduction */
1068:   nlsP->eta1 = 1.0e-4;
1069:   nlsP->eta2 = 0.25;
1070:   nlsP->eta3 = 0.50;
1071:   nlsP->eta4 = 0.90;

1073:   nlsP->alpha1 = 0.25;
1074:   nlsP->alpha2 = 0.50;
1075:   nlsP->alpha3 = 1.00;
1076:   nlsP->alpha4 = 2.00;
1077:   nlsP->alpha5 = 4.00;

1079:   /*  Default values for trust-region radius update based on interpolation */
1080:   nlsP->mu1 = 0.10;
1081:   nlsP->mu2 = 0.50;

1083:   nlsP->gamma1 = 0.25;
1084:   nlsP->gamma2 = 0.50;
1085:   nlsP->gamma3 = 2.00;
1086:   nlsP->gamma4 = 4.00;

1088:   nlsP->theta = 0.05;

1090:   /*  Default values for trust region initialization based on interpolation */
1091:   nlsP->mu1_i = 0.35;
1092:   nlsP->mu2_i = 0.50;

1094:   nlsP->gamma1_i = 0.0625;
1095:   nlsP->gamma2_i = 0.5;
1096:   nlsP->gamma3_i = 2.0;
1097:   nlsP->gamma4_i = 5.0;

1099:   nlsP->theta_i = 0.25;

1101:   /*  Remaining parameters */
1102:   nlsP->min_radius = 1.0e-10;
1103:   nlsP->max_radius = 1.0e10;
1104:   nlsP->epsilon = 1.0e-6;

1106:   nlsP->pc_type         = NLS_PC_BFGS;
1107:   nlsP->bfgs_scale_type = BFGS_SCALE_PHESS;
1108:   nlsP->init_type       = NLS_INIT_INTERPOLATION;
1109:   nlsP->update_type     = NLS_UPDATE_STEP;

1111:   TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
1112:   TaoLineSearchSetType(tao->linesearch,morethuente_type);
1113:   TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
1114:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);

1116:   /*  Set linear solver to default for symmetric matrices */
1117:   KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
1118:   KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
1119:   KSPSetType(tao->ksp,KSPCGSTCG);
1120:   return(0);
1121: }