Actual source code: ntr.c

petsc-3.8.4 2018-03-24
Report Typos and Errors
  1:  #include <../src/tao/matrix/lmvmmat.h>
  2:  #include <../src/tao/unconstrained/impls/ntr/ntrimpl.h>

  4:  #include <petscksp.h>

  6: #define NTR_PC_NONE     0
  7: #define NTR_PC_AHESS    1
  8: #define NTR_PC_BFGS     2
  9: #define NTR_PC_PETSC    3
 10: #define NTR_PC_TYPES    4

 12: #define BFGS_SCALE_AHESS   0
 13: #define BFGS_SCALE_BFGS    1
 14: #define BFGS_SCALE_TYPES   2

 16: #define NTR_INIT_CONSTANT         0
 17: #define NTR_INIT_DIRECTION        1
 18: #define NTR_INIT_INTERPOLATION    2
 19: #define NTR_INIT_TYPES            3

 21: #define NTR_UPDATE_REDUCTION      0
 22: #define NTR_UPDATE_INTERPOLATION  1
 23: #define NTR_UPDATE_TYPES          2

 25: static const char *NTR_PC[64] = {"none","ahess","bfgs","petsc"};

 27: static const char *BFGS_SCALE[64] = {"ahess","bfgs"};

 29: static const char *NTR_INIT[64] = {"constant","direction","interpolation"};

 31: static const char *NTR_UPDATE[64] = {"reduction","interpolation"};

 33: /*  Routine for BFGS preconditioner */
 34: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
 35: {
 37:     Mat M;
 42:     PCShellGetContext(pc,(void**)&M);
 43:     MatLMVMSolve(M, b, x);
 44:     return(0);
 45: }

 47: /*
 48:    TaoSolve_NTR - Implements Newton's Method with a trust region approach
 49:    for solving unconstrained minimization problems.

 51:    The basic algorithm is taken from MINPACK-2 (dstrn).

 53:    TaoSolve_NTR computes a local minimizer of a twice differentiable function
 54:    f by applying a trust region variant of Newton's method.  At each stage
 55:    of the algorithm, we use the prconditioned conjugate gradient method to
 56:    determine an approximate minimizer of the quadratic equation

 58:         q(s) = <s, Hs + g>

 60:    subject to the trust region constraint

 62:         || s ||_M <= radius,

 64:    where radius is the trust region radius and M is a symmetric positive
 65:    definite matrix (the preconditioner).  Here g is the gradient and H
 66:    is the Hessian matrix.

 68:    Note:  TaoSolve_NTR MUST use the iterative solver KSPCGNASH, KSPCGSTCG,
 69:           or KSPCGGLTR.  Thus, we set KSPCGNASH, KSPCGSTCG, or KSPCGGLTR in this
 70:           routine regardless of what the user may have previously specified.
 71: */
 72: static PetscErrorCode TaoSolve_NTR(Tao tao)
 73: {
 74:   TAO_NTR            *tr = (TAO_NTR *)tao->data;
 75:   KSPType            ksp_type;
 76:   PetscBool          is_nash,is_stcg,is_gltr;
 77:   KSPConvergedReason ksp_reason;
 78:   PC                 pc;
 79:   TaoConvergedReason reason;
 80:   PetscReal          fmin, ftrial, prered, actred, kappa, sigma, beta;
 81:   PetscReal          tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 82:   PetscReal          f, gnorm;

 84:   PetscReal          delta;
 85:   PetscReal          norm_d;
 86:   PetscErrorCode     ierr;
 87:   PetscInt           bfgsUpdates = 0;
 88:   PetscInt           needH;

 90:   PetscInt           i_max = 5;
 91:   PetscInt           j_max = 1;
 92:   PetscInt           i, j, N, n, its;

 95:   if (tao->XL || tao->XU || tao->ops->computebounds) {
 96:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n");
 97:   }

 99:   KSPGetType(tao->ksp,&ksp_type);
100:   PetscStrcmp(ksp_type,KSPCGNASH,&is_nash);
101:   PetscStrcmp(ksp_type,KSPCGSTCG,&is_stcg);
102:   PetscStrcmp(ksp_type,KSPCGGLTR,&is_gltr);
103:   if (!is_nash && !is_stcg && !is_gltr) {
104:     SETERRQ(PETSC_COMM_SELF,1,"TAO_NTR requires nash, stcg, or gltr for the KSP");
105:   }

107:   /* Initialize the radius and modify if it is too large or small */
108:   tao->trust = tao->trust0;
109:   tao->trust = PetscMax(tao->trust, tr->min_radius);
110:   tao->trust = PetscMin(tao->trust, tr->max_radius);

112:   if (NTR_PC_BFGS == tr->pc_type && !tr->M) {
113:     VecGetLocalSize(tao->solution,&n);
114:     VecGetSize(tao->solution,&N);
115:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tr->M);
116:     MatLMVMAllocateVectors(tr->M,tao->solution);
117:   }

119:   /* Check convergence criteria */
120:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
121:   TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
122:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Inf or NaN");
123:   needH = 1;

125:   TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
126:   if (reason != TAO_CONTINUE_ITERATING) return(0);

128:   /* Create vectors for the limited memory preconditioner */
129:   if ((NTR_PC_BFGS == tr->pc_type) && (BFGS_SCALE_BFGS != tr->bfgs_scale_type)) {
130:     if (!tr->Diag) {
131:         VecDuplicate(tao->solution, &tr->Diag);
132:     }
133:   }

135:   /*  Modify the preconditioner to use the bfgs approximation */
136:   KSPGetPC(tao->ksp, &pc);
137:   switch(tr->pc_type) {
138:   case NTR_PC_NONE:
139:     PCSetType(pc, PCNONE);
140:     PCSetFromOptions(pc);
141:     break;

143:   case NTR_PC_AHESS:
144:     PCSetType(pc, PCJACOBI);
145:     PCSetFromOptions(pc);
146:     PCJacobiSetUseAbs(pc,PETSC_TRUE);
147:     break;

149:   case NTR_PC_BFGS:
150:     PCSetType(pc, PCSHELL);
151:     PCSetFromOptions(pc);
152:     PCShellSetName(pc, "bfgs");
153:     PCShellSetContext(pc, tr->M);
154:     PCShellSetApply(pc, MatLMVMSolveShell);
155:     break;

157:   default:
158:     /*  Use the pc method set by pc_type */
159:     break;
160:   }

162:   /*  Initialize trust-region radius */
163:   switch(tr->init_type) {
164:   case NTR_INIT_CONSTANT:
165:     /*  Use the initial radius specified */
166:     break;

168:   case NTR_INIT_INTERPOLATION:
169:     /*  Use the initial radius specified */
170:     max_radius = 0.0;

172:     for (j = 0; j < j_max; ++j) {
173:       fmin = f;
174:       sigma = 0.0;

176:       if (needH) {
177:         TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
178:         needH = 0;
179:       }

181:       for (i = 0; i < i_max; ++i) {

183:         VecCopy(tao->solution, tr->W);
184:         VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient);
185:         TaoComputeObjective(tao, tr->W, &ftrial);

187:         if (PetscIsInfOrNanReal(ftrial)) {
188:           tau = tr->gamma1_i;
189:         }
190:         else {
191:           if (ftrial < fmin) {
192:             fmin = ftrial;
193:             sigma = -tao->trust / gnorm;
194:           }

196:           MatMult(tao->hessian, tao->gradient, tao->stepdirection);
197:           VecDot(tao->gradient, tao->stepdirection, &prered);

199:           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
200:           actred = f - ftrial;
201:           if ((PetscAbsScalar(actred) <= tr->epsilon) &&
202:               (PetscAbsScalar(prered) <= tr->epsilon)) {
203:             kappa = 1.0;
204:           }
205:           else {
206:             kappa = actred / prered;
207:           }

209:           tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
210:           tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
211:           tau_min = PetscMin(tau_1, tau_2);
212:           tau_max = PetscMax(tau_1, tau_2);

214:           if (PetscAbsScalar(kappa - 1.0) <= tr->mu1_i) {
215:             /*  Great agreement */
216:             max_radius = PetscMax(max_radius, tao->trust);

218:             if (tau_max < 1.0) {
219:               tau = tr->gamma3_i;
220:             }
221:             else if (tau_max > tr->gamma4_i) {
222:               tau = tr->gamma4_i;
223:             }
224:             else {
225:               tau = tau_max;
226:             }
227:           }
228:           else if (PetscAbsScalar(kappa - 1.0) <= tr->mu2_i) {
229:             /*  Good agreement */
230:             max_radius = PetscMax(max_radius, tao->trust);

232:             if (tau_max < tr->gamma2_i) {
233:               tau = tr->gamma2_i;
234:             }
235:             else if (tau_max > tr->gamma3_i) {
236:               tau = tr->gamma3_i;
237:             }
238:             else {
239:               tau = tau_max;
240:             }
241:           }
242:           else {
243:             /*  Not good agreement */
244:             if (tau_min > 1.0) {
245:               tau = tr->gamma2_i;
246:             }
247:             else if (tau_max < tr->gamma1_i) {
248:               tau = tr->gamma1_i;
249:             }
250:             else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
251:               tau = tr->gamma1_i;
252:             }
253:             else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) &&
254:                      ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
255:               tau = tau_1;
256:             }
257:             else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) &&
258:                      ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
259:               tau = tau_2;
260:             }
261:             else {
262:               tau = tau_max;
263:             }
264:           }
265:         }
266:         tao->trust = tau * tao->trust;
267:       }

269:       if (fmin < f) {
270:         f = fmin;
271:         VecAXPY(tao->solution, sigma, tao->gradient);
272:         TaoComputeGradient(tao,tao->solution, tao->gradient);

274:         TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);

276:         if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
277:         needH = 1;

279:         TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
280:         if (reason != TAO_CONTINUE_ITERATING) {
281:           return(0);
282:         }
283:       }
284:     }
285:     tao->trust = PetscMax(tao->trust, max_radius);

287:     /*  Modify the radius if it is too large or small */
288:     tao->trust = PetscMax(tao->trust, tr->min_radius);
289:     tao->trust = PetscMin(tao->trust, tr->max_radius);
290:     break;

292:   default:
293:     /*  Norm of the first direction will initialize radius */
294:     tao->trust = 0.0;
295:     break;
296:   }

298:   /* Set initial scaling for the BFGS preconditioner
299:      This step is done after computing the initial trust-region radius
300:      since the function value may have decreased */
301:   if (NTR_PC_BFGS == tr->pc_type) {
302:     if (f != 0.0) {
303:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
304:     }
305:     else {
306:       delta = 2.0 / (gnorm*gnorm);
307:     }
308:     MatLMVMSetDelta(tr->M,delta);
309:   }

311:   /* Have not converged; continue with Newton method */
312:   while (reason == TAO_CONTINUE_ITERATING) {
313:     ++tao->niter;
314:     tao->ksp_its=0;
315:     /* Compute the Hessian */
316:     if (needH) {
317:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
318:       needH = 0;
319:     }

321:     if (NTR_PC_BFGS == tr->pc_type) {
322:       if (BFGS_SCALE_AHESS == tr->bfgs_scale_type) {
323:         /* Obtain diagonal for the bfgs preconditioner */
324:         MatGetDiagonal(tao->hessian, tr->Diag);
325:         VecAbs(tr->Diag);
326:         VecReciprocal(tr->Diag);
327:         MatLMVMSetScale(tr->M,tr->Diag);
328:       }

330:       /* Update the limited memory preconditioner */
331:       MatLMVMUpdate(tr->M, tao->solution, tao->gradient);
332:       ++bfgsUpdates;
333:     }

335:     while (reason == TAO_CONTINUE_ITERATING) {
336:       KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);

338:       /* Solve the trust region subproblem */
339:       KSPCGSetRadius(tao->ksp,tao->trust);
340:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
341:       KSPGetIterationNumber(tao->ksp,&its);
342:       tao->ksp_its+=its;
343:       tao->ksp_tot_its+=its;
344:       KSPCGGetNormD(tao->ksp, &norm_d);

346:       if (0.0 == tao->trust) {
347:         /* Radius was uninitialized; use the norm of the direction */
348:         if (norm_d > 0.0) {
349:           tao->trust = norm_d;

351:           /* Modify the radius if it is too large or small */
352:           tao->trust = PetscMax(tao->trust, tr->min_radius);
353:           tao->trust = PetscMin(tao->trust, tr->max_radius);
354:         }
355:         else {
356:           /* The direction was bad; set radius to default value and re-solve
357:              the trust-region subproblem to get a direction */
358:           tao->trust = tao->trust0;

360:           /* Modify the radius if it is too large or small */
361:           tao->trust = PetscMax(tao->trust, tr->min_radius);
362:           tao->trust = PetscMin(tao->trust, tr->max_radius);

364:           KSPCGSetRadius(tao->ksp,tao->trust);
365:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
366:           KSPGetIterationNumber(tao->ksp,&its);
367:           tao->ksp_its+=its;
368:           tao->ksp_tot_its+=its;
369:           KSPCGGetNormD(tao->ksp, &norm_d);

371:           if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
372:         }
373:       }
374:       VecScale(tao->stepdirection, -1.0);
375:       KSPGetConvergedReason(tao->ksp, &ksp_reason);
376:       if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&
377:           (NTR_PC_BFGS == tr->pc_type) && (bfgsUpdates > 1)) {
378:         /* Preconditioner is numerically indefinite; reset the
379:            approximate if using BFGS preconditioning. */

381:         if (f != 0.0) {
382:           delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
383:         }
384:         else {
385:           delta = 2.0 / (gnorm*gnorm);
386:         }
387:         MatLMVMSetDelta(tr->M, delta);
388:         MatLMVMReset(tr->M);
389:         MatLMVMUpdate(tr->M, tao->solution, tao->gradient);
390:         bfgsUpdates = 1;
391:       }

393:       if (NTR_UPDATE_REDUCTION == tr->update_type) {
394:         /* Get predicted reduction */
395:         KSPCGGetObjFcn(tao->ksp,&prered);
396:         if (prered >= 0.0) {
397:           /* The predicted reduction has the wrong sign.  This cannot
398:              happen in infinite precision arithmetic.  Step should
399:              be rejected! */
400:           tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
401:         }
402:         else {
403:           /* Compute trial step and function value */
404:           VecCopy(tao->solution,tr->W);
405:           VecAXPY(tr->W, 1.0, tao->stepdirection);
406:           TaoComputeObjective(tao, tr->W, &ftrial);

408:           if (PetscIsInfOrNanReal(ftrial)) {
409:             tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
410:           } else {
411:             /* Compute and actual reduction */
412:             actred = f - ftrial;
413:             prered = -prered;
414:             if ((PetscAbsScalar(actred) <= tr->epsilon) &&
415:                 (PetscAbsScalar(prered) <= tr->epsilon)) {
416:               kappa = 1.0;
417:             }
418:             else {
419:               kappa = actred / prered;
420:             }

422:             /* Accept or reject the step and update radius */
423:             if (kappa < tr->eta1) {
424:               /* Reject the step */
425:               tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
426:             }
427:             else {
428:               /* Accept the step */
429:               if (kappa < tr->eta2) {
430:                 /* Marginal bad step */
431:                 tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
432:               }
433:               else if (kappa < tr->eta3) {
434:                 /* Reasonable step */
435:                 tao->trust = tr->alpha3 * tao->trust;
436:               }
437:               else if (kappa < tr->eta4) {
438:                 /* Good step */
439:                 tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
440:               }
441:               else {
442:                 /* Very good step */
443:                 tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
444:               }
445:               break;
446:             }
447:           }
448:         }
449:       }
450:       else {
451:         /* Get predicted reduction */
452:         KSPCGGetObjFcn(tao->ksp,&prered);
453:         if (prered >= 0.0) {
454:           /* The predicted reduction has the wrong sign.  This cannot
455:              happen in infinite precision arithmetic.  Step should
456:              be rejected! */
457:           tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
458:         }
459:         else {
460:           VecCopy(tao->solution, tr->W);
461:           VecAXPY(tr->W, 1.0, tao->stepdirection);
462:           TaoComputeObjective(tao, tr->W, &ftrial);
463:           if (PetscIsInfOrNanReal(ftrial)) {
464:             tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
465:           }
466:           else {
467:             VecDot(tao->gradient, tao->stepdirection, &beta);
468:             actred = f - ftrial;
469:             prered = -prered;
470:             if ((PetscAbsScalar(actred) <= tr->epsilon) &&
471:                 (PetscAbsScalar(prered) <= tr->epsilon)) {
472:               kappa = 1.0;
473:             }
474:             else {
475:               kappa = actred / prered;
476:             }

478:             tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
479:             tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
480:             tau_min = PetscMin(tau_1, tau_2);
481:             tau_max = PetscMax(tau_1, tau_2);

483:             if (kappa >= 1.0 - tr->mu1) {
484:               /* Great agreement; accept step and update radius */
485:               if (tau_max < 1.0) {
486:                 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
487:               }
488:               else if (tau_max > tr->gamma4) {
489:                 tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
490:               }
491:               else {
492:                 tao->trust = PetscMax(tao->trust, tau_max * norm_d);
493:               }
494:               break;
495:             }
496:             else if (kappa >= 1.0 - tr->mu2) {
497:               /* Good agreement */

499:               if (tau_max < tr->gamma2) {
500:                 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
501:               }
502:               else if (tau_max > tr->gamma3) {
503:                 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
504:               }
505:               else if (tau_max < 1.0) {
506:                 tao->trust = tau_max * PetscMin(tao->trust, norm_d);
507:               }
508:               else {
509:                 tao->trust = PetscMax(tao->trust, tau_max * norm_d);
510:               }
511:               break;
512:             }
513:             else {
514:               /* Not good agreement */
515:               if (tau_min > 1.0) {
516:                 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
517:               }
518:               else if (tau_max < tr->gamma1) {
519:                 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
520:               }
521:               else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
522:                 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
523:               }
524:               else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) &&
525:                        ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
526:                 tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
527:               }
528:               else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) &&
529:                        ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
530:                 tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
531:               }
532:               else {
533:                 tao->trust = tau_max * PetscMin(tao->trust, norm_d);
534:               }
535:             }
536:           }
537:         }
538:       }

540:       /* The step computed was not good and the radius was decreased.
541:          Monitor the radius to terminate. */
542:       TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust, &reason);
543:     }

545:     /* The radius may have been increased; modify if it is too large */
546:     tao->trust = PetscMin(tao->trust, tr->max_radius);

548:     if (reason == TAO_CONTINUE_ITERATING) {
549:       VecCopy(tr->W, tao->solution);
550:       f = ftrial;
551:       TaoComputeGradient(tao, tao->solution, tao->gradient);
552:       TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
553:       if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
554:       needH = 1;
555:       TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust, &reason);
556:     }
557:   }
558:   return(0);
559: }

561: /*------------------------------------------------------------*/
562: static PetscErrorCode TaoSetUp_NTR(Tao tao)
563: {
564:   TAO_NTR *tr = (TAO_NTR *)tao->data;

568:   if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient);}
569:   if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
570:   if (!tr->W) {VecDuplicate(tao->solution, &tr->W);}

572:   tr->Diag = 0;
573:   tr->M = 0;
574:   return(0);
575: }

577: /*------------------------------------------------------------*/
578: static PetscErrorCode TaoDestroy_NTR(Tao tao)
579: {
580:   TAO_NTR        *tr = (TAO_NTR *)tao->data;

584:   if (tao->setupcalled) {
585:     VecDestroy(&tr->W);
586:   }
587:   MatDestroy(&tr->M);
588:   VecDestroy(&tr->Diag);
589:   PetscFree(tao->data);
590:   return(0);
591: }

593: /*------------------------------------------------------------*/
594: static PetscErrorCode TaoSetFromOptions_NTR(PetscOptionItems *PetscOptionsObject,Tao tao)
595: {
596:   TAO_NTR        *tr = (TAO_NTR *)tao->data;

600:   PetscOptionsHead(PetscOptionsObject,"Newton trust region method for unconstrained optimization");
601:   PetscOptionsEList("-tao_ntr_pc_type", "pc type", "", NTR_PC, NTR_PC_TYPES, NTR_PC[tr->pc_type], &tr->pc_type,NULL);
602:   PetscOptionsEList("-tao_ntr_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tr->bfgs_scale_type], &tr->bfgs_scale_type,NULL);
603:   PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type,NULL);
604:   PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type,NULL);
605:   PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1,NULL);
606:   PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2,NULL);
607:   PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3,NULL);
608:   PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4,NULL);
609:   PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1,NULL);
610:   PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2,NULL);
611:   PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3,NULL);
612:   PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4,NULL);
613:   PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5,NULL);
614:   PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1,NULL);
615:   PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2,NULL);
616:   PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1,NULL);
617:   PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2,NULL);
618:   PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3,NULL);
619:   PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4,NULL);
620:   PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta,NULL);
621:   PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i,NULL);
622:   PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i,NULL);
623:   PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i,NULL);
624:   PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i,NULL);
625:   PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i,NULL);
626:   PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i,NULL);
627:   PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i,NULL);
628:   PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius,NULL);
629:   PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius,NULL);
630:   PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon,NULL);
631:   PetscOptionsTail();
632:   KSPSetFromOptions(tao->ksp);
633:   return(0);
634: }

636: /*------------------------------------------------------------*/
637: static PetscErrorCode TaoView_NTR(Tao tao, PetscViewer viewer)
638: {
639:   TAO_NTR        *tr = (TAO_NTR *)tao->data;
641:   PetscInt       nrejects;
642:   PetscBool      isascii;

645:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
646:   if (isascii) {
647:     PetscViewerASCIIPushTab(viewer);
648:     if (NTR_PC_BFGS == tr->pc_type && tr->M) {
649:       MatLMVMGetRejects(tr->M, &nrejects);
650:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects);
651:     }
652:     PetscViewerASCIIPopTab(viewer);
653:   }
654:   return(0);
655: }

657: /*------------------------------------------------------------*/
658: /*MC
659:   TAONTR - Newton's method with trust region for unconstrained minimization.
660:   At each iteration, the Newton trust region method solves the system.
661:   NTR expects a KSP solver with a trust region radius.
662:             min_d  .5 dT Hk d + gkT d,  s.t.   ||d|| < Delta_k

664:   Options Database Keys:
665: + -tao_ntr_pc_type - "none","ahess","bfgs","petsc"
666: . -tao_ntr_bfgs_scale_type - type of scaling with bfgs pc, "ahess" or "bfgs"
667: . -tao_ntr_init_type - "constant","direction","interpolation"
668: . -tao_ntr_update_type - "reduction","interpolation"
669: . -tao_ntr_min_radius - lower bound on trust region radius
670: . -tao_ntr_max_radius - upper bound on trust region radius
671: . -tao_ntr_epsilon - tolerance for accepting actual / predicted reduction
672: . -tao_ntr_mu1_i - mu1 interpolation init factor
673: . -tao_ntr_mu2_i - mu2 interpolation init factor
674: . -tao_ntr_gamma1_i - gamma1 interpolation init factor
675: . -tao_ntr_gamma2_i - gamma2 interpolation init factor
676: . -tao_ntr_gamma3_i - gamma3 interpolation init factor
677: . -tao_ntr_gamma4_i - gamma4 interpolation init factor
678: . -tao_ntr_theta_i - thetha1 interpolation init factor
679: . -tao_ntr_eta1 - eta1 reduction update factor
680: . -tao_ntr_eta2 - eta2 reduction update factor
681: . -tao_ntr_eta3 - eta3 reduction update factor
682: . -tao_ntr_eta4 - eta4 reduction update factor
683: . -tao_ntr_alpha1 - alpha1 reduction update factor
684: . -tao_ntr_alpha2 - alpha2 reduction update factor
685: . -tao_ntr_alpha3 - alpha3 reduction update factor
686: . -tao_ntr_alpha4 - alpha4 reduction update factor
687: . -tao_ntr_alpha4 - alpha4 reduction update factor
688: . -tao_ntr_mu1 - mu1 interpolation update
689: . -tao_ntr_mu2 - mu2 interpolation update
690: . -tao_ntr_gamma1 - gamma1 interpolcation update
691: . -tao_ntr_gamma2 - gamma2 interpolcation update
692: . -tao_ntr_gamma3 - gamma3 interpolcation update
693: . -tao_ntr_gamma4 - gamma4 interpolation update
694: - -tao_ntr_theta - theta interpolation update

696:   Level: beginner
697: M*/

699: PETSC_EXTERN PetscErrorCode TaoCreate_NTR(Tao tao)
700: {
701:   TAO_NTR *tr;


706:   PetscNewLog(tao,&tr);

708:   tao->ops->setup = TaoSetUp_NTR;
709:   tao->ops->solve = TaoSolve_NTR;
710:   tao->ops->view = TaoView_NTR;
711:   tao->ops->setfromoptions = TaoSetFromOptions_NTR;
712:   tao->ops->destroy = TaoDestroy_NTR;

714:   /* Override default settings (unless already changed) */
715:   if (!tao->max_it_changed) tao->max_it = 50;
716:   if (!tao->trust0_changed) tao->trust0 = 100.0;
717:   tao->data = (void*)tr;

719:   /*  Standard trust region update parameters */
720:   tr->eta1 = 1.0e-4;
721:   tr->eta2 = 0.25;
722:   tr->eta3 = 0.50;
723:   tr->eta4 = 0.90;

725:   tr->alpha1 = 0.25;
726:   tr->alpha2 = 0.50;
727:   tr->alpha3 = 1.00;
728:   tr->alpha4 = 2.00;
729:   tr->alpha5 = 4.00;

731:   /*  Interpolation trust region update parameters */
732:   tr->mu1 = 0.10;
733:   tr->mu2 = 0.50;

735:   tr->gamma1 = 0.25;
736:   tr->gamma2 = 0.50;
737:   tr->gamma3 = 2.00;
738:   tr->gamma4 = 4.00;

740:   tr->theta = 0.05;

742:   /*  Interpolation parameters for initialization */
743:   tr->mu1_i = 0.35;
744:   tr->mu2_i = 0.50;

746:   tr->gamma1_i = 0.0625;
747:   tr->gamma2_i = 0.50;
748:   tr->gamma3_i = 2.00;
749:   tr->gamma4_i = 5.00;

751:   tr->theta_i = 0.25;

753:   tr->min_radius = 1.0e-10;
754:   tr->max_radius = 1.0e10;
755:   tr->epsilon    = 1.0e-6;

757:   tr->pc_type         = NTR_PC_BFGS;
758:   tr->bfgs_scale_type = BFGS_SCALE_AHESS;
759:   tr->init_type       = NTR_INIT_INTERPOLATION;
760:   tr->update_type     = NTR_UPDATE_REDUCTION;

762:   /* Set linear solver to default for trust region */
763:   KSPCreate(((PetscObject)tao)->comm,&tao->ksp);
764:   KSPSetOptionsPrefix(tao->ksp,tao->hdr.prefix);
765:   KSPSetType(tao->ksp,KSPCGSTCG);
766:   return(0);
767: }