Actual source code: nls.c

petsc-3.6.1 2015-08-06
Report Typos and Errors
  1: #include <petsctaolinesearch.h>
  2: #include <../src/tao/matrix/lmvmmat.h>
  3: #include <../src/tao/unconstrained/impls/nls/nls.h>

  5: #include <petscksp.h>
  6: #include <petscpc.h>
  7: #include <petsc/private/kspimpl.h>
  8: #include <petsc/private/pcimpl.h>

 10: #define NLS_KSP_CG      0
 11: #define NLS_KSP_NASH    1
 12: #define NLS_KSP_STCG    2
 13: #define NLS_KSP_GLTR    3
 14: #define NLS_KSP_PETSC   4
 15: #define NLS_KSP_TYPES   5

 17: #define NLS_PC_NONE     0
 18: #define NLS_PC_AHESS    1
 19: #define NLS_PC_BFGS     2
 20: #define NLS_PC_PETSC    3
 21: #define NLS_PC_TYPES    4

 23: #define BFGS_SCALE_AHESS        0
 24: #define BFGS_SCALE_PHESS        1
 25: #define BFGS_SCALE_BFGS         2
 26: #define BFGS_SCALE_TYPES        3

 28: #define NLS_INIT_CONSTANT         0
 29: #define NLS_INIT_DIRECTION        1
 30: #define NLS_INIT_INTERPOLATION    2
 31: #define NLS_INIT_TYPES            3

 33: #define NLS_UPDATE_STEP           0
 34: #define NLS_UPDATE_REDUCTION      1
 35: #define NLS_UPDATE_INTERPOLATION  2
 36: #define NLS_UPDATE_TYPES          3

 38: static const char *NLS_KSP[64] = {"cg", "nash", "stcg", "gltr", "petsc"};

 40: static const char *NLS_PC[64] = {"none", "ahess", "bfgs", "petsc"};

 42: static const char *BFGS_SCALE[64] = {"ahess", "phess", "bfgs"};

 44: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};

 46: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};

 48: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x);
 49: /* Routine for BFGS preconditioner


 52:  Implements Newton's Method with a line search approach for solving
 53:  unconstrained minimization problems.  A More'-Thuente line search
 54:  is used to guarantee that the bfgs preconditioner remains positive
 55:  definite.

 57:  The method can shift the Hessian matrix.  The shifting procedure is
 58:  adapted from the PATH algorithm for solving complementarity
 59:  problems.

 61:  The linear system solve should be done with a conjugate gradient
 62:  method, although any method can be used. */

 64: #define NLS_NEWTON              0
 65: #define NLS_BFGS                1
 66: #define NLS_SCALED_GRADIENT     2
 67: #define NLS_GRADIENT            3

 71: static PetscErrorCode TaoSolve_NLS(Tao tao)
 72: {
 73:   PetscErrorCode               ierr;
 74:   TAO_NLS                      *nlsP = (TAO_NLS *)tao->data;
 75:   PC                           pc;
 76:   KSPConvergedReason           ksp_reason;
 77:   TaoLineSearchConvergedReason ls_reason;
 78:   TaoConvergedReason           reason;

 80:   PetscReal                    fmin, ftrial, f_full, prered, actred, kappa, sigma;
 81:   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 82:   PetscReal                    f, fold, gdx, gnorm, pert;
 83:   PetscReal                    step = 1.0;
 84:   PetscReal                    delta;
 85:   PetscReal                    norm_d = 0.0, e_min;

 87:   PetscInt                     stepType;
 88:   PetscInt                     bfgsUpdates = 0;
 89:   PetscInt                     n,N,kspits;
 90:   PetscInt                     needH;

 92:   PetscInt                     i_max = 5;
 93:   PetscInt                     j_max = 1;
 94:   PetscInt                     i, j;

 97:   if (tao->XL || tao->XU || tao->ops->computebounds) {
 98:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");
 99:   }

101:   /* Initialized variables */
102:   pert = nlsP->sval;

104:   /* Number of times ksp stopped because of these reasons */
105:   nlsP->ksp_atol = 0;
106:   nlsP->ksp_rtol = 0;
107:   nlsP->ksp_dtol = 0;
108:   nlsP->ksp_ctol = 0;
109:   nlsP->ksp_negc = 0;
110:   nlsP->ksp_iter = 0;
111:   nlsP->ksp_othr = 0;

113:   /* Modify the linear solver to a trust region method if desired */
114:   switch(nlsP->ksp_type) {
115:   case NLS_KSP_CG:
116:     KSPSetType(tao->ksp, KSPCG);
117:     KSPSetFromOptions(tao->ksp);
118:     break;

120:   case NLS_KSP_NASH:
121:     KSPSetType(tao->ksp, KSPNASH);
122:     KSPSetFromOptions(tao->ksp);
123:     break;

125:   case NLS_KSP_STCG:
126:     KSPSetType(tao->ksp, KSPSTCG);
127:     KSPSetFromOptions(tao->ksp);
128:     break;

130:   case NLS_KSP_GLTR:
131:     KSPSetType(tao->ksp, KSPGLTR);
132:     KSPSetFromOptions(tao->ksp);
133:     break;

135:   default:
136:     /* Use the method set by the ksp_type */
137:     break;
138:   }

140:   /* Initialize trust-region radius when using nash, stcg, or gltr
141:    Will be reset during the first iteration */
142:   if (NLS_KSP_NASH == nlsP->ksp_type) {
143:     KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
144:   } else if (NLS_KSP_STCG == nlsP->ksp_type) {
145:     KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
146:   } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
147:     KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
148:   }

150:   if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
151:     tao->trust = tao->trust0;

153:     if (tao->trust < 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial radius negative");

155:     /* Modify the radius if it is too large or small */
156:     tao->trust = PetscMax(tao->trust, nlsP->min_radius);
157:     tao->trust = PetscMin(tao->trust, nlsP->max_radius);
158:   }

160:   /* Get vectors we will need */
161:   if (NLS_PC_BFGS == nlsP->pc_type && !nlsP->M) {
162:     VecGetLocalSize(tao->solution,&n);
163:     VecGetSize(tao->solution,&N);
164:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&nlsP->M);
165:     MatLMVMAllocateVectors(nlsP->M,tao->solution);
166:   }

168:   /* Check convergence criteria */
169:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
170:   VecNorm(tao->gradient,NORM_2,&gnorm);
171:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
172:   needH = 1;

174:   TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
175:   if (reason != TAO_CONTINUE_ITERATING) return(0);

177:   /* create vectors for the limited memory preconditioner */
178:   if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_BFGS != nlsP->bfgs_scale_type)) {
179:     if (!nlsP->Diag) {
180:       VecDuplicate(tao->solution,&nlsP->Diag);
181:     }
182:   }

184:   /* Modify the preconditioner to use the bfgs approximation */
185:   KSPGetPC(tao->ksp, &pc);
186:   switch(nlsP->pc_type) {
187:   case NLS_PC_NONE:
188:     PCSetType(pc, PCNONE);
189:     PCSetFromOptions(pc);
190:     break;

192:   case NLS_PC_AHESS:
193:     PCSetType(pc, PCJACOBI);
194:     PCSetFromOptions(pc);
195:     PCJacobiSetUseAbs(pc,PETSC_TRUE);
196:     break;

198:   case NLS_PC_BFGS:
199:     PCSetType(pc, PCSHELL);
200:     PCSetFromOptions(pc);
201:     PCShellSetName(pc, "bfgs");
202:     PCShellSetContext(pc, nlsP->M);
203:     PCShellSetApply(pc, MatLMVMSolveShell);
204:     break;

206:   default:
207:     /* Use the pc method set by pc_type */
208:     break;
209:   }

211:   /* Initialize trust-region radius.  The initialization is only performed
212:      when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
213:   if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
214:     switch(nlsP->init_type) {
215:     case NLS_INIT_CONSTANT:
216:       /* Use the initial radius specified */
217:       break;

219:     case NLS_INIT_INTERPOLATION:
220:       /* Use the initial radius specified */
221:       max_radius = 0.0;

223:       for (j = 0; j < j_max; ++j) {
224:         fmin = f;
225:         sigma = 0.0;

227:         if (needH) {
228:           TaoComputeHessian(tao, tao->solution,tao->hessian,tao->hessian_pre);
229:           needH = 0;
230:         }

232:         for (i = 0; i < i_max; ++i) {
233:           VecCopy(tao->solution,nlsP->W);
234:           VecAXPY(nlsP->W,-tao->trust/gnorm,tao->gradient);
235:           TaoComputeObjective(tao, nlsP->W, &ftrial);
236:           if (PetscIsInfOrNanReal(ftrial)) {
237:             tau = nlsP->gamma1_i;
238:           } else {
239:             if (ftrial < fmin) {
240:               fmin = ftrial;
241:               sigma = -tao->trust / gnorm;
242:             }

244:             MatMult(tao->hessian, tao->gradient, nlsP->D);
245:             VecDot(tao->gradient, nlsP->D, &prered);

247:             prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
248:             actred = f - ftrial;
249:             if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
250:               kappa = 1.0;
251:             } else {
252:               kappa = actred / prered;
253:             }

255:             tau_1 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
256:             tau_2 = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
257:             tau_min = PetscMin(tau_1, tau_2);
258:             tau_max = PetscMax(tau_1, tau_2);

260:             if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu1_i) {
261:               /* Great agreement */
262:               max_radius = PetscMax(max_radius, tao->trust);

264:               if (tau_max < 1.0) {
265:                 tau = nlsP->gamma3_i;
266:               } else if (tau_max > nlsP->gamma4_i) {
267:                 tau = nlsP->gamma4_i;
268:               } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
269:                 tau = tau_1;
270:               } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
271:                 tau = tau_2;
272:               } else {
273:                 tau = tau_max;
274:               }
275:             } else if (PetscAbsScalar(kappa - 1.0) <= nlsP->mu2_i) {
276:               /* Good agreement */
277:               max_radius = PetscMax(max_radius, tao->trust);

279:               if (tau_max < nlsP->gamma2_i) {
280:                 tau = nlsP->gamma2_i;
281:               } else if (tau_max > nlsP->gamma3_i) {
282:                 tau = nlsP->gamma3_i;
283:               } else {
284:                 tau = tau_max;
285:               }
286:             } else {
287:               /* Not good agreement */
288:               if (tau_min > 1.0) {
289:                 tau = nlsP->gamma2_i;
290:               } else if (tau_max < nlsP->gamma1_i) {
291:                 tau = nlsP->gamma1_i;
292:               } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
293:                 tau = nlsP->gamma1_i;
294:               } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
295:                 tau = tau_1;
296:               } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
297:                 tau = tau_2;
298:               } else {
299:                 tau = tau_max;
300:               }
301:             }
302:           }
303:           tao->trust = tau * tao->trust;
304:         }

306:         if (fmin < f) {
307:           f = fmin;
308:           VecAXPY(tao->solution,sigma,tao->gradient);
309:           TaoComputeGradient(tao,tao->solution,tao->gradient);

311:           VecNorm(tao->gradient,NORM_2,&gnorm);
312:           if (PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute gradient generated Inf or NaN");
313:           needH = 1;

315:           TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
316:           if (reason != TAO_CONTINUE_ITERATING) return(0);
317:         }
318:       }
319:       tao->trust = PetscMax(tao->trust, max_radius);

321:       /* Modify the radius if it is too large or small */
322:       tao->trust = PetscMax(tao->trust, nlsP->min_radius);
323:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
324:       break;

326:     default:
327:       /* Norm of the first direction will initialize radius */
328:       tao->trust = 0.0;
329:       break;
330:     }
331:   }

333:   /* Set initial scaling for the BFGS preconditioner
334:      This step is done after computing the initial trust-region radius
335:      since the function value may have decreased */
336:   if (NLS_PC_BFGS == nlsP->pc_type) {
337:     if (f != 0.0) {
338:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
339:     } else {
340:       delta = 2.0 / (gnorm*gnorm);
341:     }
342:     MatLMVMSetDelta(nlsP->M,delta);
343:   }

345:   /* Set counter for gradient/reset steps*/
346:   nlsP->newt = 0;
347:   nlsP->bfgs = 0;
348:   nlsP->sgrad = 0;
349:   nlsP->grad = 0;

351:   /* Have not converged; continue with Newton method */
352:   while (reason == TAO_CONTINUE_ITERATING) {
353:     ++tao->niter;
354:     tao->ksp_its=0;

356:     /* Compute the Hessian */
357:     if (needH) {
358:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
359:       needH = 0;
360:     }

362:     if ((NLS_PC_BFGS == nlsP->pc_type) && (BFGS_SCALE_AHESS == nlsP->bfgs_scale_type)) {
363:       /* Obtain diagonal for the bfgs preconditioner  */
364:       MatGetDiagonal(tao->hessian, nlsP->Diag);
365:       VecAbs(nlsP->Diag);
366:       VecReciprocal(nlsP->Diag);
367:       MatLMVMSetScale(nlsP->M,nlsP->Diag);
368:     }

370:     /* Shift the Hessian matrix */
371:     if (pert > 0) {
372:       MatShift(tao->hessian, pert);
373:       if (tao->hessian != tao->hessian_pre) {
374:         MatShift(tao->hessian_pre, pert);
375:       }
376:     }

378:     if (NLS_PC_BFGS == nlsP->pc_type) {
379:       if (BFGS_SCALE_PHESS == nlsP->bfgs_scale_type) {
380:         /* Obtain diagonal for the bfgs preconditioner  */
381:         MatGetDiagonal(tao->hessian, nlsP->Diag);
382:         VecAbs(nlsP->Diag);
383:         VecReciprocal(nlsP->Diag);
384:         MatLMVMSetScale(nlsP->M,nlsP->Diag);
385:       }
386:       /* Update the limited memory preconditioner */
387:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
388:       ++bfgsUpdates;
389:     }

391:     /* Solve the Newton system of equations */
392:     KSPSetOperators(tao->ksp,tao->hessian,tao->hessian_pre);
393:     if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type ||  NLS_KSP_GLTR == nlsP->ksp_type) {

395:       if (NLS_KSP_NASH == nlsP->ksp_type) {
396:         KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
397:       } else if (NLS_KSP_STCG == nlsP->ksp_type) {
398:          KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
399:       } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
400:         KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
401:       }

403:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
404:       KSPGetIterationNumber(tao->ksp,&kspits);
405:       tao->ksp_its+=kspits;
406:       tao->ksp_tot_its+=kspits;

408:       if (NLS_KSP_NASH == nlsP->ksp_type) {
409:         KSPNASHGetNormD(tao->ksp,&norm_d);
410:       } else if (NLS_KSP_STCG == nlsP->ksp_type) {
411:          KSPSTCGGetNormD(tao->ksp,&norm_d);
412:       } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
413:         KSPGLTRGetNormD(tao->ksp,&norm_d);
414:       }

416:       if (0.0 == tao->trust) {
417:         /* Radius was uninitialized; use the norm of the direction */
418:         if (norm_d > 0.0) {
419:           tao->trust = norm_d;

421:           /* Modify the radius if it is too large or small */
422:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
423:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);
424:         } else {
425:           /* The direction was bad; set radius to default value and re-solve
426:              the trust-region subproblem to get a direction */
427:           tao->trust = tao->trust0;

429:           /* Modify the radius if it is too large or small */
430:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
431:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);

433:           if (NLS_KSP_NASH == nlsP->ksp_type) {
434:             KSPNASHSetRadius(tao->ksp,nlsP->max_radius);
435:           } else if (NLS_KSP_STCG == nlsP->ksp_type) {
436:             KSPSTCGSetRadius(tao->ksp,nlsP->max_radius);
437:           } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
438:             KSPGLTRSetRadius(tao->ksp,nlsP->max_radius);
439:           }

441:           KSPSolve(tao->ksp, tao->gradient, nlsP->D);
442:           KSPGetIterationNumber(tao->ksp,&kspits);
443:           tao->ksp_its+=kspits;
444:           tao->ksp_tot_its+=kspits;
445:           if (NLS_KSP_NASH == nlsP->ksp_type) {
446:             KSPNASHGetNormD(tao->ksp,&norm_d);
447:           } else if (NLS_KSP_STCG == nlsP->ksp_type) {
448:             KSPSTCGGetNormD(tao->ksp,&norm_d);
449:           } else if (NLS_KSP_GLTR == nlsP->ksp_type) {
450:             KSPGLTRGetNormD(tao->ksp,&norm_d);
451:           }

453:           if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
454:         }
455:       }
456:     } else {
457:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
458:       KSPGetIterationNumber(tao->ksp, &kspits);
459:       tao->ksp_its += kspits;
460:       tao->ksp_tot_its+=kspits;
461:     }
462:     VecScale(nlsP->D, -1.0);
463:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
464:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&  (NLS_PC_BFGS == nlsP->pc_type) && (bfgsUpdates > 1)) {
465:       /* Preconditioner is numerically indefinite; reset the
466:          approximate if using BFGS preconditioning. */

468:       if (f != 0.0) {
469:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
470:       } else {
471:         delta = 2.0 / (gnorm*gnorm);
472:       }
473:       MatLMVMSetDelta(nlsP->M,delta);
474:       MatLMVMReset(nlsP->M);
475:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
476:       bfgsUpdates = 1;
477:     }

479:     if (KSP_CONVERGED_ATOL == ksp_reason) {
480:       ++nlsP->ksp_atol;
481:     } else if (KSP_CONVERGED_RTOL == ksp_reason) {
482:       ++nlsP->ksp_rtol;
483:     } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
484:       ++nlsP->ksp_ctol;
485:     } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
486:       ++nlsP->ksp_negc;
487:     } else if (KSP_DIVERGED_DTOL == ksp_reason) {
488:       ++nlsP->ksp_dtol;
489:     } else if (KSP_DIVERGED_ITS == ksp_reason) {
490:       ++nlsP->ksp_iter;
491:     } else {
492:       ++nlsP->ksp_othr;
493:     }

495:     /* Check for success (descent direction) */
496:     VecDot(nlsP->D, tao->gradient, &gdx);
497:     if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
498:       /* Newton step is not descent or direction produced Inf or NaN
499:          Update the perturbation for next time */
500:       if (pert <= 0.0) {
501:         /* Initialize the perturbation */
502:         pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
503:         if (NLS_KSP_GLTR == nlsP->ksp_type) {
504:           KSPGLTRGetMinEig(tao->ksp,&e_min);
505:           pert = PetscMax(pert, -e_min);
506:         }
507:       } else {
508:         /* Increase the perturbation */
509:         pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
510:       }

512:       if (NLS_PC_BFGS != nlsP->pc_type) {
513:         /* We don't have the bfgs matrix around and updated
514:            Must use gradient direction in this case */
515:         VecCopy(tao->gradient, nlsP->D);
516:         VecScale(nlsP->D, -1.0);
517:         ++nlsP->grad;
518:         stepType = NLS_GRADIENT;
519:       } else {
520:         /* Attempt to use the BFGS direction */
521:         MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
522:         VecScale(nlsP->D, -1.0);

524:         /* Check for success (descent direction) */
525:         VecDot(tao->gradient, nlsP->D, &gdx);
526:         if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
527:           /* BFGS direction is not descent or direction produced not a number
528:              We can assert bfgsUpdates > 1 in this case because
529:              the first solve produces the scaled gradient direction,
530:              which is guaranteed to be descent */

532:           /* Use steepest descent direction (scaled) */

534:           if (f != 0.0) {
535:             delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
536:           } else {
537:             delta = 2.0 / (gnorm*gnorm);
538:           }
539:           MatLMVMSetDelta(nlsP->M, delta);
540:           MatLMVMReset(nlsP->M);
541:           MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
542:           MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
543:           VecScale(nlsP->D, -1.0);

545:           bfgsUpdates = 1;
546:           ++nlsP->sgrad;
547:           stepType = NLS_SCALED_GRADIENT;
548:         } else {
549:           if (1 == bfgsUpdates) {
550:             /* The first BFGS direction is always the scaled gradient */
551:             ++nlsP->sgrad;
552:             stepType = NLS_SCALED_GRADIENT;
553:           } else {
554:             ++nlsP->bfgs;
555:             stepType = NLS_BFGS;
556:           }
557:         }
558:       }
559:     } else {
560:       /* Computed Newton step is descent */
561:       switch (ksp_reason) {
562:       case KSP_DIVERGED_NANORINF:
563:       case KSP_DIVERGED_BREAKDOWN:
564:       case KSP_DIVERGED_INDEFINITE_MAT:
565:       case KSP_DIVERGED_INDEFINITE_PC:
566:       case KSP_CONVERGED_CG_NEG_CURVE:
567:         /* Matrix or preconditioner is indefinite; increase perturbation */
568:         if (pert <= 0.0) {
569:           /* Initialize the perturbation */
570:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
571:           if (NLS_KSP_GLTR == nlsP->ksp_type) {
572:             KSPGLTRGetMinEig(tao->ksp, &e_min);
573:             pert = PetscMax(pert, -e_min);
574:           }
575:         } else {
576:           /* Increase the perturbation */
577:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
578:         }
579:         break;

581:       default:
582:         /* Newton step computation is good; decrease perturbation */
583:         pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
584:         if (pert < nlsP->pmin) {
585:           pert = 0.0;
586:         }
587:         break;
588:       }

590:       ++nlsP->newt;
591:       stepType = NLS_NEWTON;
592:     }

594:     /* Perform the linesearch */
595:     fold = f;
596:     VecCopy(tao->solution, nlsP->Xold);
597:     VecCopy(tao->gradient, nlsP->Gold);

599:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
600:     TaoAddLineSearchCounts(tao);

602:     while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) {      /* Linesearch failed */
603:       f = fold;
604:       VecCopy(nlsP->Xold, tao->solution);
605:       VecCopy(nlsP->Gold, tao->gradient);

607:       switch(stepType) {
608:       case NLS_NEWTON:
609:         /* Failed to obtain acceptable iterate with Newton 1step
610:            Update the perturbation for next time */
611:         if (pert <= 0.0) {
612:           /* Initialize the perturbation */
613:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
614:           if (NLS_KSP_GLTR == nlsP->ksp_type) {
615:             KSPGLTRGetMinEig(tao->ksp,&e_min);
616:             pert = PetscMax(pert, -e_min);
617:           }
618:         } else {
619:           /* Increase the perturbation */
620:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
621:         }

623:         if (NLS_PC_BFGS != nlsP->pc_type) {
624:           /* We don't have the bfgs matrix around and being updated
625:              Must use gradient direction in this case */
626:           VecCopy(tao->gradient, nlsP->D);
627:           ++nlsP->grad;
628:           stepType = NLS_GRADIENT;
629:         } else {
630:           /* Attempt to use the BFGS direction */
631:           MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);
632:           /* Check for success (descent direction) */
633:           VecDot(tao->solution, nlsP->D, &gdx);
634:           if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
635:             /* BFGS direction is not descent or direction produced not a number
636:                We can assert bfgsUpdates > 1 in this case
637:                Use steepest descent direction (scaled) */

639:             if (f != 0.0) {
640:               delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
641:             } else {
642:               delta = 2.0 / (gnorm*gnorm);
643:             }
644:             MatLMVMSetDelta(nlsP->M, delta);
645:             MatLMVMReset(nlsP->M);
646:             MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
647:             MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);

649:             bfgsUpdates = 1;
650:             ++nlsP->sgrad;
651:             stepType = NLS_SCALED_GRADIENT;
652:           } else {
653:             if (1 == bfgsUpdates) {
654:               /* The first BFGS direction is always the scaled gradient */
655:               ++nlsP->sgrad;
656:               stepType = NLS_SCALED_GRADIENT;
657:             } else {
658:               ++nlsP->bfgs;
659:               stepType = NLS_BFGS;
660:             }
661:           }
662:         }
663:         break;

665:       case NLS_BFGS:
666:         /* Can only enter if pc_type == NLS_PC_BFGS
667:            Failed to obtain acceptable iterate with BFGS step
668:            Attempt to use the scaled gradient direction */

670:         if (f != 0.0) {
671:           delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
672:         } else {
673:           delta = 2.0 / (gnorm*gnorm);
674:         }
675:         MatLMVMSetDelta(nlsP->M, delta);
676:         MatLMVMReset(nlsP->M);
677:         MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
678:         MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);

680:         bfgsUpdates = 1;
681:         ++nlsP->sgrad;
682:         stepType = NLS_SCALED_GRADIENT;
683:         break;

685:       case NLS_SCALED_GRADIENT:
686:         /* Can only enter if pc_type == NLS_PC_BFGS
687:            The scaled gradient step did not produce a new iterate;
688:            attemp to use the gradient direction.
689:            Need to make sure we are not using a different diagonal scaling */

691:         MatLMVMSetScale(nlsP->M,0);
692:         MatLMVMSetDelta(nlsP->M,1.0);
693:         MatLMVMReset(nlsP->M);
694:         MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
695:         MatLMVMSolve(nlsP->M, tao->gradient, nlsP->D);

697:         bfgsUpdates = 1;
698:         ++nlsP->grad;
699:         stepType = NLS_GRADIENT;
700:         break;
701:       }
702:       VecScale(nlsP->D, -1.0);

704:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
705:       TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
706:       TaoAddLineSearchCounts(tao);
707:     }

709:     if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
710:       /* Failed to find an improving point */
711:       f = fold;
712:       VecCopy(nlsP->Xold, tao->solution);
713:       VecCopy(nlsP->Gold, tao->gradient);
714:       step = 0.0;
715:       reason = TAO_DIVERGED_LS_FAILURE;
716:       tao->reason = TAO_DIVERGED_LS_FAILURE;
717:       break;
718:     }

720:     /* Update trust region radius */
721:     if (NLS_KSP_NASH == nlsP->ksp_type || NLS_KSP_STCG == nlsP->ksp_type || NLS_KSP_GLTR == nlsP->ksp_type) {
722:       switch(nlsP->update_type) {
723:       case NLS_UPDATE_STEP:
724:         if (stepType == NLS_NEWTON) {
725:           if (step < nlsP->nu1) {
726:             /* Very bad step taken; reduce radius */
727:             tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
728:           } else if (step < nlsP->nu2) {
729:             /* Reasonably bad step taken; reduce radius */
730:             tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
731:           } else if (step < nlsP->nu3) {
732:             /*  Reasonable step was taken; leave radius alone */
733:             if (nlsP->omega3 < 1.0) {
734:               tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
735:             } else if (nlsP->omega3 > 1.0) {
736:               tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
737:             }
738:           } else if (step < nlsP->nu4) {
739:             /*  Full step taken; increase the radius */
740:             tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
741:           } else {
742:             /*  More than full step taken; increase the radius */
743:             tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
744:           }
745:         } else {
746:           /*  Newton step was not good; reduce the radius */
747:           tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
748:         }
749:         break;

751:       case NLS_UPDATE_REDUCTION:
752:         if (stepType == NLS_NEWTON) {
753:           /*  Get predicted reduction */

755:           if (NLS_KSP_STCG == nlsP->ksp_type) {
756:             KSPSTCGGetObjFcn(tao->ksp,&prered);
757:           } else if (NLS_KSP_NASH == nlsP->ksp_type)  {
758:             KSPNASHGetObjFcn(tao->ksp,&prered);
759:           } else {
760:             KSPGLTRGetObjFcn(tao->ksp,&prered);
761:           }

763:           if (prered >= 0.0) {
764:             /*  The predicted reduction has the wrong sign.  This cannot */
765:             /*  happen in infinite precision arithmetic.  Step should */
766:             /*  be rejected! */
767:             tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
768:           } else {
769:             if (PetscIsInfOrNanReal(f_full)) {
770:               tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
771:             } else {
772:               /*  Compute and actual reduction */
773:               actred = fold - f_full;
774:               prered = -prered;
775:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) &&
776:                   (PetscAbsScalar(prered) <= nlsP->epsilon)) {
777:                 kappa = 1.0;
778:               } else {
779:                 kappa = actred / prered;
780:               }

782:               /*  Accept of reject the step and update radius */
783:               if (kappa < nlsP->eta1) {
784:                 /*  Very bad step */
785:                 tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
786:               } else if (kappa < nlsP->eta2) {
787:                 /*  Marginal bad step */
788:                 tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
789:               } else if (kappa < nlsP->eta3) {
790:                 /*  Reasonable step */
791:                 if (nlsP->alpha3 < 1.0) {
792:                   tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
793:                 } else if (nlsP->alpha3 > 1.0) {
794:                   tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
795:                 }
796:               } else if (kappa < nlsP->eta4) {
797:                 /*  Good step */
798:                 tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
799:               } else {
800:                 /*  Very good step */
801:                 tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
802:               }
803:             }
804:           }
805:         } else {
806:           /*  Newton step was not good; reduce the radius */
807:           tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
808:         }
809:         break;

811:       default:
812:         if (stepType == NLS_NEWTON) {

814:           if (NLS_KSP_STCG == nlsP->ksp_type) {
815:               KSPSTCGGetObjFcn(tao->ksp,&prered);
816:           } else if (NLS_KSP_NASH == nlsP->ksp_type)  {
817:               KSPNASHGetObjFcn(tao->ksp,&prered);
818:           } else {
819:               KSPGLTRGetObjFcn(tao->ksp,&prered);
820:           }
821:           if (prered >= 0.0) {
822:             /*  The predicted reduction has the wrong sign.  This cannot */
823:             /*  happen in infinite precision arithmetic.  Step should */
824:             /*  be rejected! */
825:             tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
826:           } else {
827:             if (PetscIsInfOrNanReal(f_full)) {
828:               tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
829:             } else {
830:               actred = fold - f_full;
831:               prered = -prered;
832:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
833:                 kappa = 1.0;
834:               } else {
835:                 kappa = actred / prered;
836:               }

838:               tau_1 = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
839:               tau_2 = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
840:               tau_min = PetscMin(tau_1, tau_2);
841:               tau_max = PetscMax(tau_1, tau_2);

843:               if (kappa >= 1.0 - nlsP->mu1) {
844:                 /*  Great agreement */
845:                 if (tau_max < 1.0) {
846:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
847:                 } else if (tau_max > nlsP->gamma4) {
848:                   tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
849:                 } else {
850:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
851:                 }
852:               } else if (kappa >= 1.0 - nlsP->mu2) {
853:                 /*  Good agreement */

855:                 if (tau_max < nlsP->gamma2) {
856:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
857:                 } else if (tau_max > nlsP->gamma3) {
858:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
859:                 } else if (tau_max < 1.0) {
860:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
861:                 } else {
862:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
863:                 }
864:               } else {
865:                 /*  Not good agreement */
866:                 if (tau_min > 1.0) {
867:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
868:                 } else if (tau_max < nlsP->gamma1) {
869:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
870:                 } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
871:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
872:                 } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
873:                   tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
874:                 } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
875:                   tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
876:                 } else {
877:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
878:                 }
879:               }
880:             }
881:           }
882:         } else {
883:           /*  Newton step was not good; reduce the radius */
884:           tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
885:         }
886:         break;
887:       }

889:       /*  The radius may have been increased; modify if it is too large */
890:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
891:     }

893:     /*  Check for termination */
894:     VecNorm(tao->gradient, NORM_2, &gnorm);
895:     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
896:     needH = 1;
897:     TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step, &reason);
898:   }
899:   return(0);
900: }

902: /* ---------------------------------------------------------- */
905: static PetscErrorCode TaoSetUp_NLS(Tao tao)
906: {
907:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

911:   if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);}
912:   if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);}
913:   if (!nlsP->W) {VecDuplicate(tao->solution,&nlsP->W);}
914:   if (!nlsP->D) {VecDuplicate(tao->solution,&nlsP->D);}
915:   if (!nlsP->Xold) {VecDuplicate(tao->solution,&nlsP->Xold);}
916:   if (!nlsP->Gold) {VecDuplicate(tao->solution,&nlsP->Gold);}
917:   nlsP->Diag = 0;
918:   nlsP->M = 0;
919:   return(0);
920: }

922: /*------------------------------------------------------------*/
925: static PetscErrorCode TaoDestroy_NLS(Tao tao)
926: {
927:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

931:   if (tao->setupcalled) {
932:     VecDestroy(&nlsP->D);
933:     VecDestroy(&nlsP->W);
934:     VecDestroy(&nlsP->Xold);
935:     VecDestroy(&nlsP->Gold);
936:   }
937:   VecDestroy(&nlsP->Diag);
938:   MatDestroy(&nlsP->M);
939:   PetscFree(tao->data);
940:   return(0);
941: }

943: /*------------------------------------------------------------*/
946: static PetscErrorCode TaoSetFromOptions_NLS(PetscOptions *PetscOptionsObject,Tao tao)
947: {
948:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;

952:   PetscOptionsHead(PetscOptionsObject,"Newton line search method for unconstrained optimization");
953:   PetscOptionsEList("-tao_nls_ksp_type", "ksp type", "", NLS_KSP, NLS_KSP_TYPES, NLS_KSP[nlsP->ksp_type], &nlsP->ksp_type, 0);
954:   PetscOptionsEList("-tao_nls_pc_type", "pc type", "", NLS_PC, NLS_PC_TYPES, NLS_PC[nlsP->pc_type], &nlsP->pc_type, 0);
955:   PetscOptionsEList("-tao_nls_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[nlsP->bfgs_scale_type], &nlsP->bfgs_scale_type, 0);
956:   PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, 0);
957:   PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, 0);
958:  PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval,NULL);
959:   PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin,NULL);
960:   PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax,NULL);
961:   PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac,NULL);
962:   PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin,NULL);
963:   PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax,NULL);
964:   PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac,NULL);
965:   PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac,NULL);
966:   PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac,NULL);
967:   PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac,NULL);
968:   PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1,NULL);
969:   PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2,NULL);
970:   PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3,NULL);
971:   PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4,NULL);
972:   PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1,NULL);
973:   PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2,NULL);
974:   PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3,NULL);
975:   PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4,NULL);
976:   PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5,NULL);
977:   PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1,NULL);
978:   PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2,NULL);
979:   PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3,NULL);
980:   PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4,NULL);
981:   PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1,NULL);
982:   PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2,NULL);
983:   PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3,NULL);
984:   PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4,NULL);
985:   PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5,NULL);
986:   PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i,NULL);
987:   PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i,NULL);
988:   PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i,NULL);
989:   PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i,NULL);
990:   PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i,NULL);
991:   PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i,NULL);
992:   PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i,NULL);
993:   PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1,NULL);
994:   PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2,NULL);
995:   PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1,NULL);
996:   PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2,NULL);
997:   PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3,NULL);
998:   PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4,NULL);
999:   PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta,NULL);
1000:   PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius,NULL);
1001:   PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius,NULL);
1002:   PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon,NULL);
1003:   PetscOptionsTail();
1004:   TaoLineSearchSetFromOptions(tao->linesearch);
1005:   KSPSetFromOptions(tao->ksp);
1006:   return(0);
1007: }


1010: /*------------------------------------------------------------*/
1013: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
1014: {
1015:   TAO_NLS        *nlsP = (TAO_NLS *)tao->data;
1016:   PetscInt       nrejects;
1017:   PetscBool      isascii;

1021:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
1022:   if (isascii) {
1023:     PetscViewerASCIIPushTab(viewer);
1024:     if (NLS_PC_BFGS == nlsP->pc_type && nlsP->M) {
1025:       MatLMVMGetRejects(nlsP->M,&nrejects);
1026:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n",nrejects);
1027:     }
1028:     PetscViewerASCIIPrintf(viewer, "Newton steps: %D\n", nlsP->newt);
1029:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", nlsP->bfgs);
1030:     PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", nlsP->sgrad);
1031:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", nlsP->grad);

1033:     PetscViewerASCIIPrintf(viewer, "nls ksp atol: %D\n", nlsP->ksp_atol);
1034:     PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %D\n", nlsP->ksp_rtol);
1035:     PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %D\n", nlsP->ksp_ctol);
1036:     PetscViewerASCIIPrintf(viewer, "nls ksp negc: %D\n", nlsP->ksp_negc);
1037:     PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %D\n", nlsP->ksp_dtol);
1038:     PetscViewerASCIIPrintf(viewer, "nls ksp iter: %D\n", nlsP->ksp_iter);
1039:     PetscViewerASCIIPrintf(viewer, "nls ksp othr: %D\n", nlsP->ksp_othr);
1040:     PetscViewerASCIIPopTab(viewer);
1041:   }
1042:   return(0);
1043: }

1045: /* ---------------------------------------------------------- */
1046: /*MC
1047:   TAONLS - Newton's method with linesearch for unconstrained minimization.
1048:   At each iteration, the Newton line search method solves the symmetric
1049:   system of equations to obtain the step diretion dk:
1050:               Hk dk = -gk
1051:   a More-Thuente line search is applied on the direction dk to approximately
1052:   solve
1053:               min_t f(xk + t d_k)

1055:     Options Database Keys:
1056: + -tao_nls_ksp_type - "cg","nash","stcg","gltr","petsc"
1057: . -tao_nls_pc_type - "none","ahess","bfgs","petsc"
1058: . -tao_nls_bfgs_scale_type - "ahess","phess","bfgs"
1059: . -tao_nls_init_type - "constant","direction","interpolation"
1060: . -tao_nls_update_type - "step","direction","interpolation"
1061: . -tao_nls_sval - perturbation starting value
1062: . -tao_nls_imin - minimum initial perturbation
1063: . -tao_nls_imax - maximum initial perturbation
1064: . -tao_nls_pmin - minimum perturbation
1065: . -tao_nls_pmax - maximum perturbation
1066: . -tao_nls_pgfac - growth factor
1067: . -tao_nls_psfac - shrink factor
1068: . -tao_nls_imfac - initial merit factor
1069: . -tao_nls_pmgfac - merit growth factor
1070: . -tao_nls_pmsfac - merit shrink factor
1071: . -tao_nls_eta1 - poor steplength; reduce radius
1072: . -tao_nls_eta2 - reasonable steplength; leave radius
1073: . -tao_nls_eta3 - good steplength; increase readius
1074: . -tao_nls_eta4 - excellent steplength; greatly increase radius
1075: . -tao_nls_alpha1 - alpha1 reduction
1076: . -tao_nls_alpha2 - alpha2 reduction
1077: . -tao_nls_alpha3 - alpha3 reduction
1078: . -tao_nls_alpha4 - alpha4 reduction
1079: . -tao_nls_alpha - alpha5 reduction
1080: . -tao_nls_mu1 - mu1 interpolation update
1081: . -tao_nls_mu2 - mu2 interpolation update
1082: . -tao_nls_gamma1 - gamma1 interpolation update
1083: . -tao_nls_gamma2 - gamma2 interpolation update
1084: . -tao_nls_gamma3 - gamma3 interpolation update
1085: . -tao_nls_gamma4 - gamma4 interpolation update
1086: . -tao_nls_theta - theta interpolation update
1087: . -tao_nls_omega1 - omega1 step update
1088: . -tao_nls_omega2 - omega2 step update
1089: . -tao_nls_omega3 - omega3 step update
1090: . -tao_nls_omega4 - omega4 step update
1091: . -tao_nls_omega5 - omega5 step update
1092: . -tao_nls_mu1_i -  mu1 interpolation init factor
1093: . -tao_nls_mu2_i -  mu2 interpolation init factor
1094: . -tao_nls_gamma1_i -  gamma1 interpolation init factor
1095: . -tao_nls_gamma2_i -  gamma2 interpolation init factor
1096: . -tao_nls_gamma3_i -  gamma3 interpolation init factor
1097: . -tao_nls_gamma4_i -  gamma4 interpolation init factor
1098: - -tao_nls_theta_i -  theta interpolation init factor

1100:   Level: beginner
1101: M*/

1105: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
1106: {
1107:   TAO_NLS        *nlsP;
1108:   const char     *morethuente_type = TAOLINESEARCHMT;

1112:   PetscNewLog(tao,&nlsP);

1114:   tao->ops->setup = TaoSetUp_NLS;
1115:   tao->ops->solve = TaoSolve_NLS;
1116:   tao->ops->view = TaoView_NLS;
1117:   tao->ops->setfromoptions = TaoSetFromOptions_NLS;
1118:   tao->ops->destroy = TaoDestroy_NLS;

1120:   /* Override default settings (unless already changed) */
1121:   if (!tao->max_it_changed) tao->max_it = 50;
1122:   if (!tao->trust0_changed) tao->trust0 = 100.0;
1123: #if defined(PETSC_USE_REAL_SINGLE)
1124:   if (!tao->fatol_changed) tao->fatol = 1.0e-5;
1125:   if (!tao->frtol_changed) tao->frtol = 1.0e-5;
1126: #else
1127:   if (!tao->fatol_changed) tao->fatol = 1.0e-10;
1128:   if (!tao->frtol_changed) tao->frtol = 1.0e-10;
1129: #endif

1131:   tao->data = (void*)nlsP;

1133:   nlsP->sval   = 0.0;
1134:   nlsP->imin   = 1.0e-4;
1135:   nlsP->imax   = 1.0e+2;
1136:   nlsP->imfac  = 1.0e-1;

1138:   nlsP->pmin   = 1.0e-12;
1139:   nlsP->pmax   = 1.0e+2;
1140:   nlsP->pgfac  = 1.0e+1;
1141:   nlsP->psfac  = 4.0e-1;
1142:   nlsP->pmgfac = 1.0e-1;
1143:   nlsP->pmsfac = 1.0e-1;

1145:   /*  Default values for trust-region radius update based on steplength */
1146:   nlsP->nu1 = 0.25;
1147:   nlsP->nu2 = 0.50;
1148:   nlsP->nu3 = 1.00;
1149:   nlsP->nu4 = 1.25;

1151:   nlsP->omega1 = 0.25;
1152:   nlsP->omega2 = 0.50;
1153:   nlsP->omega3 = 1.00;
1154:   nlsP->omega4 = 2.00;
1155:   nlsP->omega5 = 4.00;

1157:   /*  Default values for trust-region radius update based on reduction */
1158:   nlsP->eta1 = 1.0e-4;
1159:   nlsP->eta2 = 0.25;
1160:   nlsP->eta3 = 0.50;
1161:   nlsP->eta4 = 0.90;

1163:   nlsP->alpha1 = 0.25;
1164:   nlsP->alpha2 = 0.50;
1165:   nlsP->alpha3 = 1.00;
1166:   nlsP->alpha4 = 2.00;
1167:   nlsP->alpha5 = 4.00;

1169:   /*  Default values for trust-region radius update based on interpolation */
1170:   nlsP->mu1 = 0.10;
1171:   nlsP->mu2 = 0.50;

1173:   nlsP->gamma1 = 0.25;
1174:   nlsP->gamma2 = 0.50;
1175:   nlsP->gamma3 = 2.00;
1176:   nlsP->gamma4 = 4.00;

1178:   nlsP->theta = 0.05;

1180:   /*  Default values for trust region initialization based on interpolation */
1181:   nlsP->mu1_i = 0.35;
1182:   nlsP->mu2_i = 0.50;

1184:   nlsP->gamma1_i = 0.0625;
1185:   nlsP->gamma2_i = 0.5;
1186:   nlsP->gamma3_i = 2.0;
1187:   nlsP->gamma4_i = 5.0;

1189:   nlsP->theta_i = 0.25;

1191:   /*  Remaining parameters */
1192:   nlsP->min_radius = 1.0e-10;
1193:   nlsP->max_radius = 1.0e10;
1194:   nlsP->epsilon = 1.0e-6;

1196:   nlsP->ksp_type        = NLS_KSP_STCG;
1197:   nlsP->pc_type         = NLS_PC_BFGS;
1198:   nlsP->bfgs_scale_type = BFGS_SCALE_PHESS;
1199:   nlsP->init_type       = NLS_INIT_INTERPOLATION;
1200:   nlsP->update_type     = NLS_UPDATE_STEP;

1202:   TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
1203:   TaoLineSearchSetType(tao->linesearch,morethuente_type);
1204:   TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
1205:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);

1207:   /*  Set linear solver to default for symmetric matrices */
1208:   KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
1209:   KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix);
1210:   return(0);
1211: }

1215: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
1216: {
1218:   Mat            M;

1224:   PCShellGetContext(pc,(void**)&M);
1225:   MatLMVMSolve(M, b, x);
1226:   return(0);
1227: }