Actual source code: ntl.c

petsc-3.7.7 2017-09-25
Report Typos and Errors
  1: #include <../src/tao/matrix/lmvmmat.h>
  2: #include <../src/tao/unconstrained/impls/ntl/ntl.h>

  4: #include <petscksp.h>
  5: #include <petscpc.h>
  6: #include <petsc/private/kspimpl.h>
  7: #include <petsc/private/pcimpl.h>

  9: #define NTL_KSP_NASH    0
 10: #define NTL_KSP_STCG    1
 11: #define NTL_KSP_GLTR    2
 12: #define NTL_KSP_TYPES   3

 14: #define NTL_PC_NONE     0
 15: #define NTL_PC_AHESS    1
 16: #define NTL_PC_BFGS     2
 17: #define NTL_PC_PETSC    3
 18: #define NTL_PC_TYPES    4

 20: #define BFGS_SCALE_AHESS        0
 21: #define BFGS_SCALE_BFGS         1
 22: #define BFGS_SCALE_TYPES        2

 24: #define NTL_INIT_CONSTANT         0
 25: #define NTL_INIT_DIRECTION        1
 26: #define NTL_INIT_INTERPOLATION    2
 27: #define NTL_INIT_TYPES            3

 29: #define NTL_UPDATE_REDUCTION      0
 30: #define NTL_UPDATE_INTERPOLATION  1
 31: #define NTL_UPDATE_TYPES          2

 33: static const char *NTL_KSP[64] = {"nash", "stcg", "gltr"};

 35: static const char *NTL_PC[64] = {"none", "ahess", "bfgs", "petsc"};

 37: static const char *BFGS_SCALE[64] = {"ahess", "bfgs"};

 39: static const char *NTL_INIT[64] = {"constant", "direction", "interpolation"};

 41: static const char *NTL_UPDATE[64] = {"reduction", "interpolation"};

 43: /* Routine for BFGS preconditioner */

 47: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x)
 48: {
 50:   Mat            M;

 56:   PCShellGetContext(pc,(void**)&M);
 57:   MatLMVMSolve(M, b, x);
 58:   return(0);
 59: }

 61: /* Implements Newton's Method with a trust-region, line-search approach for
 62:    solving unconstrained minimization problems.  A More'-Thuente line search
 63:    is used to guarantee that the bfgs preconditioner remains positive
 64:    definite. */

 66: #define NTL_NEWTON              0
 67: #define NTL_BFGS                1
 68: #define NTL_SCALED_GRADIENT     2
 69: #define NTL_GRADIENT            3

 73: static PetscErrorCode TaoSolve_NTL(Tao tao)
 74: {
 75:   TAO_NTL                      *tl = (TAO_NTL *)tao->data;
 76:   PC                           pc;
 77:   KSPConvergedReason           ksp_reason;
 78:   TaoConvergedReason           reason;
 79:   TaoLineSearchConvergedReason ls_reason;

 81:   PetscReal                    fmin, ftrial, prered, actred, kappa, sigma;
 82:   PetscReal                    tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 83:   PetscReal                    f, fold, gdx, gnorm;
 84:   PetscReal                    step = 1.0;

 86:   PetscReal                    delta;
 87:   PetscReal                    norm_d = 0.0;
 88:   PetscErrorCode               ierr;
 89:   PetscInt                     stepType;
 90:   PetscInt                     its;

 92:   PetscInt                     bfgsUpdates = 0;
 93:   PetscInt                     needH;

 95:   PetscInt                     i_max = 5;
 96:   PetscInt                     j_max = 1;
 97:   PetscInt                     i, j, n, N;

 99:   PetscInt                     tr_reject;

102:   if (tao->XL || tao->XU || tao->ops->computebounds) {
103:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntl algorithm\n");
104:   }

106:   /* Initialize trust-region radius */
107:   tao->trust = tao->trust0;

109:   /* Modify the radius if it is too large or small */
110:   tao->trust = PetscMax(tao->trust, tl->min_radius);
111:   tao->trust = PetscMin(tao->trust, tl->max_radius);

113:   if (NTL_PC_BFGS == tl->pc_type && !tl->M) {
114:     VecGetLocalSize(tao->solution,&n);
115:     VecGetSize(tao->solution,&N);
116:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tl->M);
117:     MatLMVMAllocateVectors(tl->M,tao->solution);
118:   }

120:   /* Check convergence criteria */
121:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
122:   VecNorm(tao->gradient, NORM_2, &gnorm);
123:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
124:   needH = 1;

126:   TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
127:   if (reason != TAO_CONTINUE_ITERATING) return(0);

129:   /* Create vectors for the limited memory preconditioner */
130:   if ((NTL_PC_BFGS == tl->pc_type) && (BFGS_SCALE_BFGS != tl->bfgs_scale_type)) {
131:     if (!tl->Diag) {
132:       VecDuplicate(tao->solution, &tl->Diag);
133:     }
134:   }

136:   /* Modify the linear solver to a conjugate gradient method */
137:   switch(tl->ksp_type) {
138:   case NTL_KSP_NASH:
139:     KSPSetType(tao->ksp, KSPNASH);
140:     KSPSetFromOptions(tao->ksp);
141:     break;

143:   case NTL_KSP_STCG:
144:     KSPSetType(tao->ksp, KSPSTCG);
145:     KSPSetFromOptions(tao->ksp);
146:     break;

148:   default:
149:     KSPSetType(tao->ksp, KSPGLTR);
150:     KSPSetFromOptions(tao->ksp);
151:     break;
152:   }

154:   /* Modify the preconditioner to use the bfgs approximation */
155:   KSPGetPC(tao->ksp, &pc);
156:   switch(tl->pc_type) {
157:   case NTL_PC_NONE:
158:     PCSetType(pc, PCNONE);
159:     PCSetFromOptions(pc);
160:     break;

162:   case NTL_PC_AHESS:
163:     PCSetType(pc, PCJACOBI);
164:     PCSetFromOptions(pc);
165:     PCJacobiSetUseAbs(pc,PETSC_TRUE);
166:     break;

168:   case NTL_PC_BFGS:
169:     PCSetType(pc, PCSHELL);
170:     PCSetFromOptions(pc);
171:     PCShellSetName(pc, "bfgs");
172:     PCShellSetContext(pc, tl->M);
173:     PCShellSetApply(pc, MatLMVMSolveShell);
174:     break;

176:   default:
177:     /* Use the pc method set by pc_type */
178:     break;
179:   }

181:   /* Initialize trust-region radius.  The initialization is only performed
182:      when we are using Steihaug-Toint or the Generalized Lanczos method. */
183:   switch(tl->init_type) {
184:   case NTL_INIT_CONSTANT:
185:     /* Use the initial radius specified */
186:     break;

188:   case NTL_INIT_INTERPOLATION:
189:     /* Use the initial radius specified */
190:     max_radius = 0.0;

192:     for (j = 0; j < j_max; ++j) {
193:       fmin = f;
194:       sigma = 0.0;

196:       if (needH) {
197:         TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
198:         needH = 0;
199:       }

201:       for (i = 0; i < i_max; ++i) {
202:         VecCopy(tao->solution, tl->W);
203:         VecAXPY(tl->W, -tao->trust/gnorm, tao->gradient);

205:         TaoComputeObjective(tao, tl->W, &ftrial);
206:         if (PetscIsInfOrNanReal(ftrial)) {
207:           tau = tl->gamma1_i;
208:         } else {
209:           if (ftrial < fmin) {
210:             fmin = ftrial;
211:             sigma = -tao->trust / gnorm;
212:           }

214:           MatMult(tao->hessian, tao->gradient, tao->stepdirection);
215:           VecDot(tao->gradient, tao->stepdirection, &prered);

217:           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
218:           actred = f - ftrial;
219:           if ((PetscAbsScalar(actred) <= tl->epsilon) && (PetscAbsScalar(prered) <= tl->epsilon)) {
220:             kappa = 1.0;
221:           } else {
222:             kappa = actred / prered;
223:           }

225:           tau_1 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust + (1.0 - tl->theta_i) * prered - actred);
226:           tau_2 = tl->theta_i * gnorm * tao->trust / (tl->theta_i * gnorm * tao->trust - (1.0 + tl->theta_i) * prered + actred);
227:           tau_min = PetscMin(tau_1, tau_2);
228:           tau_max = PetscMax(tau_1, tau_2);

230:           if (PetscAbsScalar(kappa - 1.0) <= tl->mu1_i) {
231:             /* Great agreement */
232:             max_radius = PetscMax(max_radius, tao->trust);

234:             if (tau_max < 1.0) {
235:               tau = tl->gamma3_i;
236:             } else if (tau_max > tl->gamma4_i) {
237:               tau = tl->gamma4_i;
238:             } else if (tau_1 >= 1.0 && tau_1 <= tl->gamma4_i && tau_2 < 1.0) {
239:               tau = tau_1;
240:             } else if (tau_2 >= 1.0 && tau_2 <= tl->gamma4_i && tau_1 < 1.0) {
241:               tau = tau_2;
242:             } else {
243:               tau = tau_max;
244:             }
245:           } else if (PetscAbsScalar(kappa - 1.0) <= tl->mu2_i) {
246:             /* Good agreement */
247:             max_radius = PetscMax(max_radius, tao->trust);

249:             if (tau_max < tl->gamma2_i) {
250:               tau = tl->gamma2_i;
251:             } else if (tau_max > tl->gamma3_i) {
252:               tau = tl->gamma3_i;
253:             } else {
254:               tau = tau_max;
255:             }
256:           } else {
257:             /* Not good agreement */
258:             if (tau_min > 1.0) {
259:               tau = tl->gamma2_i;
260:             } else if (tau_max < tl->gamma1_i) {
261:               tau = tl->gamma1_i;
262:             } else if ((tau_min < tl->gamma1_i) && (tau_max >= 1.0)) {
263:               tau = tl->gamma1_i;
264:             } else if ((tau_1 >= tl->gamma1_i) && (tau_1 < 1.0) &&  ((tau_2 < tl->gamma1_i) || (tau_2 >= 1.0))) {
265:               tau = tau_1;
266:             } else if ((tau_2 >= tl->gamma1_i) && (tau_2 < 1.0) &&  ((tau_1 < tl->gamma1_i) || (tau_2 >= 1.0))) {
267:               tau = tau_2;
268:             } else {
269:               tau = tau_max;
270:             }
271:           }
272:         }
273:         tao->trust = tau * tao->trust;
274:       }

276:       if (fmin < f) {
277:         f = fmin;
278:         VecAXPY(tao->solution, sigma, tao->gradient);
279:         TaoComputeGradient(tao, tao->solution, tao->gradient);

281:         VecNorm(tao->gradient, NORM_2, &gnorm);
282:         if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
283:         needH = 1;

285:         TaoMonitor(tao, tao->niter, f, gnorm, 0.0, 1.0, &reason);
286:         if (reason != TAO_CONTINUE_ITERATING) return(0);
287:       }
288:     }
289:     tao->trust = PetscMax(tao->trust, max_radius);

291:     /* Modify the radius if it is too large or small */
292:     tao->trust = PetscMax(tao->trust, tl->min_radius);
293:     tao->trust = PetscMin(tao->trust, tl->max_radius);
294:     break;

296:   default:
297:     /* Norm of the first direction will initialize radius */
298:     tao->trust = 0.0;
299:     break;
300:   }

302:   /* Set initial scaling for the BFGS preconditioner
303:      This step is done after computing the initial trust-region radius
304:      since the function value may have decreased */
305:   if (NTL_PC_BFGS == tl->pc_type) {
306:     if (f != 0.0) {
307:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
308:     } else {
309:       delta = 2.0 / (gnorm*gnorm);
310:     }
311:     MatLMVMSetDelta(tl->M, delta);
312:   }

314:   /* Set counter for gradient/reset steps */
315:   tl->ntrust = 0;
316:   tl->newt = 0;
317:   tl->bfgs = 0;
318:   tl->sgrad = 0;
319:   tl->grad = 0;

321:   /* Have not converged; continue with Newton method */
322:   while (reason == TAO_CONTINUE_ITERATING) {
323:     ++tao->niter;
324:     tao->ksp_its=0;
325:     /* Compute the Hessian */
326:     if (needH) {
327:       TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);
328:     }

330:     if (NTL_PC_BFGS == tl->pc_type) {
331:       if (BFGS_SCALE_AHESS == tl->bfgs_scale_type) {
332:         /* Obtain diagonal for the bfgs preconditioner */
333:         MatGetDiagonal(tao->hessian, tl->Diag);
334:         VecAbs(tl->Diag);
335:         VecReciprocal(tl->Diag);
336:         MatLMVMSetScale(tl->M, tl->Diag);
337:       }

339:       /* Update the limited memory preconditioner */
340:       MatLMVMUpdate(tl->M,tao->solution, tao->gradient);
341:       ++bfgsUpdates;
342:     }
343:     KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
344:     /* Solve the Newton system of equations */
345:     if (NTL_KSP_NASH == tl->ksp_type) {
346:       KSPNASHSetRadius(tao->ksp,tl->max_radius);
347:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
348:       KSPGetIterationNumber(tao->ksp,&its);
349:       tao->ksp_its+=its;
350:       tao->ksp_tot_its+=its;
351:       KSPNASHGetNormD(tao->ksp, &norm_d);
352:     } else if (NTL_KSP_STCG == tl->ksp_type) {
353:       KSPSTCGSetRadius(tao->ksp,tl->max_radius);
354:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
355:       KSPGetIterationNumber(tao->ksp,&its);
356:       tao->ksp_its+=its;
357:       tao->ksp_tot_its+=its;
358:       KSPSTCGGetNormD(tao->ksp, &norm_d);
359:     } else { /* NTL_KSP_GLTR */
360:       KSPGLTRSetRadius(tao->ksp,tl->max_radius);
361:       KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
362:       KSPGetIterationNumber(tao->ksp,&its);
363:       tao->ksp_its+=its;
364:       tao->ksp_tot_its+=its;
365:       KSPGLTRGetNormD(tao->ksp, &norm_d);
366:     }

368:     if (0.0 == tao->trust) {
369:       /* Radius was uninitialized; use the norm of the direction */
370:       if (norm_d > 0.0) {
371:         tao->trust = norm_d;

373:         /* Modify the radius if it is too large or small */
374:         tao->trust = PetscMax(tao->trust, tl->min_radius);
375:         tao->trust = PetscMin(tao->trust, tl->max_radius);
376:       } else {
377:         /* The direction was bad; set radius to default value and re-solve
378:            the trust-region subproblem to get a direction */
379:         tao->trust = tao->trust0;

381:         /* Modify the radius if it is too large or small */
382:         tao->trust = PetscMax(tao->trust, tl->min_radius);
383:         tao->trust = PetscMin(tao->trust, tl->max_radius);

385:         if (NTL_KSP_NASH == tl->ksp_type) {
386:           KSPNASHSetRadius(tao->ksp,tl->max_radius);
387:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
388:           KSPGetIterationNumber(tao->ksp,&its);
389:           tao->ksp_its+=its;
390:           tao->ksp_tot_its+=its;
391:           KSPNASHGetNormD(tao->ksp, &norm_d);
392:         } else if (NTL_KSP_STCG == tl->ksp_type) {
393:           KSPSTCGSetRadius(tao->ksp,tl->max_radius);
394:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
395:           KSPGetIterationNumber(tao->ksp,&its);
396:           tao->ksp_its+=its;
397:           tao->ksp_tot_its+=its;
398:           KSPSTCGGetNormD(tao->ksp, &norm_d);
399:         } else { /* NTL_KSP_GLTR */
400:           KSPGLTRSetRadius(tao->ksp,tl->max_radius);
401:           KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);
402:           KSPGetIterationNumber(tao->ksp,&its);
403:           tao->ksp_its+=its;
404:           tao->ksp_tot_its+=its;
405:           KSPGLTRGetNormD(tao->ksp, &norm_d);
406:         }


409:         if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
410:       }
411:     }

413:     VecScale(tao->stepdirection, -1.0);
414:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
415:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (NTL_PC_BFGS == tl->pc_type) && (bfgsUpdates > 1)) {
416:       /* Preconditioner is numerically indefinite; reset the
417:          approximate if using BFGS preconditioning. */

419:       if (f != 0.0) {
420:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
421:       } else {
422:         delta = 2.0 / (gnorm*gnorm);
423:       }
424:       MatLMVMSetDelta(tl->M, delta);
425:       MatLMVMReset(tl->M);
426:       MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
427:       bfgsUpdates = 1;
428:     }

430:     /* Check trust-region reduction conditions */
431:     tr_reject = 0;
432:     if (NTL_UPDATE_REDUCTION == tl->update_type) {
433:       /* Get predicted reduction */
434:       if (NTL_KSP_NASH == tl->ksp_type) {
435:         KSPNASHGetObjFcn(tao->ksp,&prered);
436:       } else if (NTL_KSP_STCG == tl->ksp_type) {
437:         KSPSTCGGetObjFcn(tao->ksp,&prered);
438:       } else { /* gltr */
439:         KSPGLTRGetObjFcn(tao->ksp,&prered);
440:       }

442:       if (prered >= 0.0) {
443:         /* The predicted reduction has the wrong sign.  This cannot
444:            happen in infinite precision arithmetic.  Step should
445:            be rejected! */
446:         tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
447:         tr_reject = 1;
448:       } else {
449:         /* Compute trial step and function value */
450:         VecCopy(tao->solution, tl->W);
451:         VecAXPY(tl->W, 1.0, tao->stepdirection);
452:         TaoComputeObjective(tao, tl->W, &ftrial);

454:         if (PetscIsInfOrNanReal(ftrial)) {
455:           tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
456:           tr_reject = 1;
457:         } else {
458:           /* Compute and actual reduction */
459:           actred = f - ftrial;
460:           prered = -prered;
461:           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
462:               (PetscAbsScalar(prered) <= tl->epsilon)) {
463:             kappa = 1.0;
464:           } else {
465:             kappa = actred / prered;
466:           }

468:           /* Accept of reject the step and update radius */
469:           if (kappa < tl->eta1) {
470:             /* Reject the step */
471:             tao->trust = tl->alpha1 * PetscMin(tao->trust, norm_d);
472:             tr_reject = 1;
473:           } else {
474:             /* Accept the step */
475:             if (kappa < tl->eta2) {
476:               /* Marginal bad step */
477:               tao->trust = tl->alpha2 * PetscMin(tao->trust, norm_d);
478:             } else if (kappa < tl->eta3) {
479:               /* Reasonable step */
480:               tao->trust = tl->alpha3 * tao->trust;
481:             } else if (kappa < tl->eta4) {
482:               /* Good step */
483:               tao->trust = PetscMax(tl->alpha4 * norm_d, tao->trust);
484:             } else {
485:               /* Very good step */
486:               tao->trust = PetscMax(tl->alpha5 * norm_d, tao->trust);
487:             }
488:           }
489:         }
490:       }
491:     } else {
492:       /* Get predicted reduction */
493:       if (NTL_KSP_NASH == tl->ksp_type) {
494:         KSPNASHGetObjFcn(tao->ksp,&prered);
495:       } else if (NTL_KSP_STCG == tl->ksp_type) {
496:         KSPSTCGGetObjFcn(tao->ksp,&prered);
497:       } else { /* gltr */
498:         KSPGLTRGetObjFcn(tao->ksp,&prered);
499:       }

501:       if (prered >= 0.0) {
502:         /* The predicted reduction has the wrong sign.  This cannot
503:            happen in infinite precision arithmetic.  Step should
504:            be rejected! */
505:         tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
506:         tr_reject = 1;
507:       } else {
508:         VecCopy(tao->solution, tl->W);
509:         VecAXPY(tl->W, 1.0, tao->stepdirection);
510:         TaoComputeObjective(tao, tl->W, &ftrial);
511:         if (PetscIsInfOrNanReal(ftrial)) {
512:           tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
513:           tr_reject = 1;
514:         } else {
515:           VecDot(tao->gradient, tao->stepdirection, &gdx);

517:           actred = f - ftrial;
518:           prered = -prered;
519:           if ((PetscAbsScalar(actred) <= tl->epsilon) &&
520:               (PetscAbsScalar(prered) <= tl->epsilon)) {
521:             kappa = 1.0;
522:           } else {
523:             kappa = actred / prered;
524:           }

526:           tau_1 = tl->theta * gdx / (tl->theta * gdx - (1.0 - tl->theta) * prered + actred);
527:           tau_2 = tl->theta * gdx / (tl->theta * gdx + (1.0 + tl->theta) * prered - actred);
528:           tau_min = PetscMin(tau_1, tau_2);
529:           tau_max = PetscMax(tau_1, tau_2);

531:           if (kappa >= 1.0 - tl->mu1) {
532:             /* Great agreement; accept step and update radius */
533:             if (tau_max < 1.0) {
534:               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
535:             } else if (tau_max > tl->gamma4) {
536:               tao->trust = PetscMax(tao->trust, tl->gamma4 * norm_d);
537:             } else {
538:               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
539:             }
540:           } else if (kappa >= 1.0 - tl->mu2) {
541:             /* Good agreement */

543:             if (tau_max < tl->gamma2) {
544:               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
545:             } else if (tau_max > tl->gamma3) {
546:               tao->trust = PetscMax(tao->trust, tl->gamma3 * norm_d);
547:             } else if (tau_max < 1.0) {
548:               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
549:             } else {
550:               tao->trust = PetscMax(tao->trust, tau_max * norm_d);
551:             }
552:           } else {
553:             /* Not good agreement */
554:             if (tau_min > 1.0) {
555:               tao->trust = tl->gamma2 * PetscMin(tao->trust, norm_d);
556:             } else if (tau_max < tl->gamma1) {
557:               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
558:             } else if ((tau_min < tl->gamma1) && (tau_max >= 1.0)) {
559:               tao->trust = tl->gamma1 * PetscMin(tao->trust, norm_d);
560:             } else if ((tau_1 >= tl->gamma1) && (tau_1 < 1.0) && ((tau_2 < tl->gamma1) || (tau_2 >= 1.0))) {
561:               tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
562:             } else if ((tau_2 >= tl->gamma1) && (tau_2 < 1.0) && ((tau_1 < tl->gamma1) || (tau_2 >= 1.0))) {
563:               tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
564:             } else {
565:               tao->trust = tau_max * PetscMin(tao->trust, norm_d);
566:             }
567:             tr_reject = 1;
568:           }
569:         }
570:       }
571:     }

573:     if (tr_reject) {
574:       /* The trust-region constraints rejected the step.  Apply a linesearch.
575:          Check for descent direction. */
576:       VecDot(tao->stepdirection, tao->gradient, &gdx);
577:       if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
578:         /* Newton step is not descent or direction produced Inf or NaN */

580:         if (NTL_PC_BFGS != tl->pc_type) {
581:           /* We don't have the bfgs matrix around and updated
582:              Must use gradient direction in this case */
583:           VecCopy(tao->gradient, tao->stepdirection);
584:           VecScale(tao->stepdirection, -1.0);
585:           ++tl->grad;
586:           stepType = NTL_GRADIENT;
587:         } else {
588:           /* Attempt to use the BFGS direction */
589:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);
590:           VecScale(tao->stepdirection, -1.0);

592:           /* Check for success (descent direction) */
593:           VecDot(tao->stepdirection, tao->gradient, &gdx);
594:           if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
595:             /* BFGS direction is not descent or direction produced not a number
596:                We can assert bfgsUpdates > 1 in this case because
597:                the first solve produces the scaled gradient direction,
598:                which is guaranteed to be descent */

600:             /* Use steepest descent direction (scaled) */
601:             if (f != 0.0) {
602:               delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
603:             } else {
604:               delta = 2.0 / (gnorm*gnorm);
605:             }
606:             MatLMVMSetDelta(tl->M, delta);
607:             MatLMVMReset(tl->M);
608:             MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
609:             MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);
610:             VecScale(tao->stepdirection, -1.0);

612:             bfgsUpdates = 1;
613:             ++tl->sgrad;
614:             stepType = NTL_SCALED_GRADIENT;
615:           } else {
616:             if (1 == bfgsUpdates) {
617:               /* The first BFGS direction is always the scaled gradient */
618:               ++tl->sgrad;
619:               stepType = NTL_SCALED_GRADIENT;
620:             } else {
621:               ++tl->bfgs;
622:               stepType = NTL_BFGS;
623:             }
624:           }
625:         }
626:       } else {
627:         /* Computed Newton step is descent */
628:         ++tl->newt;
629:         stepType = NTL_NEWTON;
630:       }

632:       /* Perform the linesearch */
633:       fold = f;
634:       VecCopy(tao->solution, tl->Xold);
635:       VecCopy(tao->gradient, tl->Gold);

637:       step = 1.0;
638:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
639:       TaoAddLineSearchCounts(tao);

641:       while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NTL_GRADIENT) {      /* Linesearch failed */
642:         /* Linesearch failed */
643:         f = fold;
644:         VecCopy(tl->Xold, tao->solution);
645:         VecCopy(tl->Gold, tao->gradient);

647:         switch(stepType) {
648:         case NTL_NEWTON:
649:           /* Failed to obtain acceptable iterate with Newton step */

651:           if (NTL_PC_BFGS != tl->pc_type) {
652:             /* We don't have the bfgs matrix around and being updated
653:                Must use gradient direction in this case */
654:             VecCopy(tao->gradient, tao->stepdirection);
655:             ++tl->grad;
656:             stepType = NTL_GRADIENT;
657:           } else {
658:             /* Attempt to use the BFGS direction */
659:             MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);


662:             /* Check for success (descent direction) */
663:             VecDot(tao->stepdirection, tao->gradient, &gdx);
664:             if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
665:               /* BFGS direction is not descent or direction produced
666:                  not a number.  We can assert bfgsUpdates > 1 in this case
667:                  Use steepest descent direction (scaled) */

669:               if (f != 0.0) {
670:                 delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
671:               } else {
672:                 delta = 2.0 / (gnorm*gnorm);
673:               }
674:               MatLMVMSetDelta(tl->M, delta);
675:               MatLMVMReset(tl->M);
676:               MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
677:               MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

679:               bfgsUpdates = 1;
680:               ++tl->sgrad;
681:               stepType = NTL_SCALED_GRADIENT;
682:             } else {
683:               if (1 == bfgsUpdates) {
684:                 /* The first BFGS direction is always the scaled gradient */
685:                 ++tl->sgrad;
686:                 stepType = NTL_SCALED_GRADIENT;
687:               } else {
688:                 ++tl->bfgs;
689:                 stepType = NTL_BFGS;
690:               }
691:             }
692:           }
693:           break;

695:         case NTL_BFGS:
696:           /* Can only enter if pc_type == NTL_PC_BFGS
697:              Failed to obtain acceptable iterate with BFGS step
698:              Attempt to use the scaled gradient direction */

700:           if (f != 0.0) {
701:             delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
702:           } else {
703:             delta = 2.0 / (gnorm*gnorm);
704:           }
705:           MatLMVMSetDelta(tl->M, delta);
706:           MatLMVMReset(tl->M);
707:           MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
708:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

710:           bfgsUpdates = 1;
711:           ++tl->sgrad;
712:           stepType = NTL_SCALED_GRADIENT;
713:           break;

715:         case NTL_SCALED_GRADIENT:
716:           /* Can only enter if pc_type == NTL_PC_BFGS
717:              The scaled gradient step did not produce a new iterate;
718:              attemp to use the gradient direction.
719:              Need to make sure we are not using a different diagonal scaling */
720:           MatLMVMSetScale(tl->M, tl->Diag);
721:           MatLMVMSetDelta(tl->M, 1.0);
722:           MatLMVMReset(tl->M);
723:           MatLMVMUpdate(tl->M, tao->solution, tao->gradient);
724:           MatLMVMSolve(tl->M, tao->gradient, tao->stepdirection);

726:           bfgsUpdates = 1;
727:           ++tl->grad;
728:           stepType = NTL_GRADIENT;
729:           break;
730:         }
731:         VecScale(tao->stepdirection, -1.0);

733:         /* This may be incorrect; linesearch has values for stepmax and stepmin
734:            that should be reset. */
735:         step = 1.0;
736:         TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, tao->stepdirection, &step, &ls_reason);
737:         TaoAddLineSearchCounts(tao);
738:       }

740:       if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
741:         /* Failed to find an improving point */
742:         f = fold;
743:         VecCopy(tl->Xold, tao->solution);
744:         VecCopy(tl->Gold, tao->gradient);
745:         tao->trust = 0.0;
746:         step = 0.0;
747:         reason = TAO_DIVERGED_LS_FAILURE;
748:         tao->reason = TAO_DIVERGED_LS_FAILURE;
749:         break;
750:       } else if (stepType == NTL_NEWTON) {
751:         if (step < tl->nu1) {
752:           /* Very bad step taken; reduce radius */
753:           tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
754:         } else if (step < tl->nu2) {
755:           /* Reasonably bad step taken; reduce radius */
756:           tao->trust = tl->omega2 * PetscMin(norm_d, tao->trust);
757:         } else if (step < tl->nu3) {
758:           /* Reasonable step was taken; leave radius alone */
759:           if (tl->omega3 < 1.0) {
760:             tao->trust = tl->omega3 * PetscMin(norm_d, tao->trust);
761:           } else if (tl->omega3 > 1.0) {
762:             tao->trust = PetscMax(tl->omega3 * norm_d, tao->trust);
763:           }
764:         } else if (step < tl->nu4) {
765:           /* Full step taken; increase the radius */
766:           tao->trust = PetscMax(tl->omega4 * norm_d, tao->trust);
767:         } else {
768:           /* More than full step taken; increase the radius */
769:           tao->trust = PetscMax(tl->omega5 * norm_d, tao->trust);
770:         }
771:       } else {
772:         /* Newton step was not good; reduce the radius */
773:         tao->trust = tl->omega1 * PetscMin(norm_d, tao->trust);
774:       }
775:     } else {
776:       /* Trust-region step is accepted */
777:       VecCopy(tl->W, tao->solution);
778:       f = ftrial;
779:       TaoComputeGradient(tao, tao->solution, tao->gradient);
780:       ++tl->ntrust;
781:     }

783:     /* The radius may have been increased; modify if it is too large */
784:     tao->trust = PetscMin(tao->trust, tl->max_radius);

786:     /* Check for converged */
787:     VecNorm(tao->gradient, NORM_2, &gnorm);
788:     if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1,"User provided compute function generated Not-a-Number");
789:     needH = 1;

791:     TaoMonitor(tao, tao->niter, f, gnorm, 0.0, tao->trust, &reason);
792:   }
793:   return(0);
794: }

796: /* ---------------------------------------------------------- */
799: static PetscErrorCode TaoSetUp_NTL(Tao tao)
800: {
801:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

805:   if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient); }
806:   if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection);}
807:   if (!tl->W) { VecDuplicate(tao->solution, &tl->W);}
808:   if (!tl->Xold) { VecDuplicate(tao->solution, &tl->Xold);}
809:   if (!tl->Gold) { VecDuplicate(tao->solution, &tl->Gold);}
810:   tl->Diag = 0;
811:   tl->M = 0;
812:   return(0);
813: }

815: /*------------------------------------------------------------*/
818: static PetscErrorCode TaoDestroy_NTL(Tao tao)
819: {
820:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

824:   if (tao->setupcalled) {
825:     VecDestroy(&tl->W);
826:     VecDestroy(&tl->Xold);
827:     VecDestroy(&tl->Gold);
828:   }
829:   VecDestroy(&tl->Diag);
830:   MatDestroy(&tl->M);
831:   PetscFree(tao->data);
832:   return(0);
833: }

835: /*------------------------------------------------------------*/
838: static PetscErrorCode TaoSetFromOptions_NTL(PetscOptionItems *PetscOptionsObject,Tao tao)
839: {
840:   TAO_NTL        *tl = (TAO_NTL *)tao->data;

844:   PetscOptionsHead(PetscOptionsObject,"Newton trust region with line search method for unconstrained optimization");
845:   PetscOptionsEList("-tao_ntl_ksp_type", "ksp type", "", NTL_KSP, NTL_KSP_TYPES, NTL_KSP[tl->ksp_type], &tl->ksp_type,NULL);
846:   PetscOptionsEList("-tao_ntl_pc_type", "pc type", "", NTL_PC, NTL_PC_TYPES, NTL_PC[tl->pc_type], &tl->pc_type,NULL);
847:   PetscOptionsEList("-tao_ntl_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tl->bfgs_scale_type], &tl->bfgs_scale_type,NULL);
848:   PetscOptionsEList("-tao_ntl_init_type", "radius initialization type", "", NTL_INIT, NTL_INIT_TYPES, NTL_INIT[tl->init_type], &tl->init_type,NULL);
849:   PetscOptionsEList("-tao_ntl_update_type", "radius update type", "", NTL_UPDATE, NTL_UPDATE_TYPES, NTL_UPDATE[tl->update_type], &tl->update_type,NULL);
850:   PetscOptionsReal("-tao_ntl_eta1", "poor steplength; reduce radius", "", tl->eta1, &tl->eta1,NULL);
851:   PetscOptionsReal("-tao_ntl_eta2", "reasonable steplength; leave radius alone", "", tl->eta2, &tl->eta2,NULL);
852:   PetscOptionsReal("-tao_ntl_eta3", "good steplength; increase radius", "", tl->eta3, &tl->eta3,NULL);
853:   PetscOptionsReal("-tao_ntl_eta4", "excellent steplength; greatly increase radius", "", tl->eta4, &tl->eta4,NULL);
854:   PetscOptionsReal("-tao_ntl_alpha1", "", "", tl->alpha1, &tl->alpha1,NULL);
855:   PetscOptionsReal("-tao_ntl_alpha2", "", "", tl->alpha2, &tl->alpha2,NULL);
856:   PetscOptionsReal("-tao_ntl_alpha3", "", "", tl->alpha3, &tl->alpha3,NULL);
857:   PetscOptionsReal("-tao_ntl_alpha4", "", "", tl->alpha4, &tl->alpha4,NULL);
858:   PetscOptionsReal("-tao_ntl_alpha5", "", "", tl->alpha5, &tl->alpha5,NULL);
859:   PetscOptionsReal("-tao_ntl_nu1", "poor steplength; reduce radius", "", tl->nu1, &tl->nu1,NULL);
860:   PetscOptionsReal("-tao_ntl_nu2", "reasonable steplength; leave radius alone", "", tl->nu2, &tl->nu2,NULL);
861:   PetscOptionsReal("-tao_ntl_nu3", "good steplength; increase radius", "", tl->nu3, &tl->nu3,NULL);
862:   PetscOptionsReal("-tao_ntl_nu4", "excellent steplength; greatly increase radius", "", tl->nu4, &tl->nu4,NULL);
863:   PetscOptionsReal("-tao_ntl_omega1", "", "", tl->omega1, &tl->omega1,NULL);
864:   PetscOptionsReal("-tao_ntl_omega2", "", "", tl->omega2, &tl->omega2,NULL);
865:   PetscOptionsReal("-tao_ntl_omega3", "", "", tl->omega3, &tl->omega3,NULL);
866:   PetscOptionsReal("-tao_ntl_omega4", "", "", tl->omega4, &tl->omega4,NULL);
867:   PetscOptionsReal("-tao_ntl_omega5", "", "", tl->omega5, &tl->omega5,NULL);
868:   PetscOptionsReal("-tao_ntl_mu1_i", "", "", tl->mu1_i, &tl->mu1_i,NULL);
869:   PetscOptionsReal("-tao_ntl_mu2_i", "", "", tl->mu2_i, &tl->mu2_i,NULL);
870:   PetscOptionsReal("-tao_ntl_gamma1_i", "", "", tl->gamma1_i, &tl->gamma1_i,NULL);
871:   PetscOptionsReal("-tao_ntl_gamma2_i", "", "", tl->gamma2_i, &tl->gamma2_i,NULL);
872:   PetscOptionsReal("-tao_ntl_gamma3_i", "", "", tl->gamma3_i, &tl->gamma3_i,NULL);
873:   PetscOptionsReal("-tao_ntl_gamma4_i", "", "", tl->gamma4_i, &tl->gamma4_i,NULL);
874:   PetscOptionsReal("-tao_ntl_theta_i", "", "", tl->theta_i, &tl->theta_i,NULL);
875:   PetscOptionsReal("-tao_ntl_mu1", "", "", tl->mu1, &tl->mu1,NULL);
876:   PetscOptionsReal("-tao_ntl_mu2", "", "", tl->mu2, &tl->mu2,NULL);
877:   PetscOptionsReal("-tao_ntl_gamma1", "", "", tl->gamma1, &tl->gamma1,NULL);
878:   PetscOptionsReal("-tao_ntl_gamma2", "", "", tl->gamma2, &tl->gamma2,NULL);
879:   PetscOptionsReal("-tao_ntl_gamma3", "", "", tl->gamma3, &tl->gamma3,NULL);
880:   PetscOptionsReal("-tao_ntl_gamma4", "", "", tl->gamma4, &tl->gamma4,NULL);
881:   PetscOptionsReal("-tao_ntl_theta", "", "", tl->theta, &tl->theta,NULL);
882:   PetscOptionsReal("-tao_ntl_min_radius", "lower bound on initial radius", "", tl->min_radius, &tl->min_radius,NULL);
883:   PetscOptionsReal("-tao_ntl_max_radius", "upper bound on radius", "", tl->max_radius, &tl->max_radius,NULL);
884:   PetscOptionsReal("-tao_ntl_epsilon", "tolerance used when computing actual and predicted reduction", "", tl->epsilon, &tl->epsilon,NULL);
885:   PetscOptionsTail();
886:   TaoLineSearchSetFromOptions(tao->linesearch);
887:   KSPSetFromOptions(tao->ksp);
888:   return(0);
889: }

891: /*------------------------------------------------------------*/
894: static PetscErrorCode TaoView_NTL(Tao tao, PetscViewer viewer)
895: {
896:   TAO_NTL        *tl = (TAO_NTL *)tao->data;
897:   PetscInt       nrejects;
898:   PetscBool      isascii;

902:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
903:   if (isascii) {
904:     PetscViewerASCIIPushTab(viewer);
905:     if (NTL_PC_BFGS == tl->pc_type && tl->M) {
906:       MatLMVMGetRejects(tl->M, &nrejects);
907:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects);
908:     }
909:     PetscViewerASCIIPrintf(viewer, "Trust-region steps: %D\n", tl->ntrust);
910:     PetscViewerASCIIPrintf(viewer, "Newton search steps: %D\n", tl->newt);
911:     PetscViewerASCIIPrintf(viewer, "BFGS search steps: %D\n", tl->bfgs);
912:     PetscViewerASCIIPrintf(viewer, "Scaled gradient search steps: %D\n", tl->sgrad);
913:     PetscViewerASCIIPrintf(viewer, "Gradient search steps: %D\n", tl->grad);
914:     PetscViewerASCIIPopTab(viewer);
915:   }
916:   return(0);
917: }

919: /* ---------------------------------------------------------- */
920: /*MC
921:   TAONTR - Newton's method with trust region and linesearch
922:   for unconstrained minimization.
923:   At each iteration, the Newton trust region method solves the system for d
924:   and performs a line search in the d direction:

926:             min_d  .5 dT Hk d + gkT d,  s.t.   ||d|| < Delta_k

928:   Options Database Keys:
929: + -tao_ntl_ksp_type - "nash","stcg","gltr"
930: . -tao_ntl_pc_type - "none","ahess","bfgs","petsc"
931: . -tao_ntl_bfgs_scale_type - type of scaling with bfgs pc, "ahess" or "bfgs"
932: . -tao_ntl_init_type - "constant","direction","interpolation"
933: . -tao_ntl_update_type - "reduction","interpolation"
934: . -tao_ntl_min_radius - lower bound on trust region radius
935: . -tao_ntl_max_radius - upper bound on trust region radius
936: . -tao_ntl_epsilon - tolerance for accepting actual / predicted reduction
937: . -tao_ntl_mu1_i - mu1 interpolation init factor
938: . -tao_ntl_mu2_i - mu2 interpolation init factor
939: . -tao_ntl_gamma1_i - gamma1 interpolation init factor
940: . -tao_ntl_gamma2_i - gamma2 interpolation init factor
941: . -tao_ntl_gamma3_i - gamma3 interpolation init factor
942: . -tao_ntl_gamma4_i - gamma4 interpolation init factor
943: . -tao_ntl_theta_i - thetha1 interpolation init factor
944: . -tao_ntl_eta1 - eta1 reduction update factor
945: . -tao_ntl_eta2 - eta2 reduction update factor
946: . -tao_ntl_eta3 - eta3 reduction update factor
947: . -tao_ntl_eta4 - eta4 reduction update factor
948: . -tao_ntl_alpha1 - alpha1 reduction update factor
949: . -tao_ntl_alpha2 - alpha2 reduction update factor
950: . -tao_ntl_alpha3 - alpha3 reduction update factor
951: . -tao_ntl_alpha4 - alpha4 reduction update factor
952: . -tao_ntl_alpha4 - alpha4 reduction update factor
953: . -tao_ntl_mu1 - mu1 interpolation update
954: . -tao_ntl_mu2 - mu2 interpolation update
955: . -tao_ntl_gamma1 - gamma1 interpolcation update
956: . -tao_ntl_gamma2 - gamma2 interpolcation update
957: . -tao_ntl_gamma3 - gamma3 interpolcation update
958: . -tao_ntl_gamma4 - gamma4 interpolation update
959: - -tao_ntl_theta - theta1 interpolation update

961:   Level: beginner
962: M*/

966: PETSC_EXTERN PetscErrorCode TaoCreate_NTL(Tao tao)
967: {
968:   TAO_NTL        *tl;
970:   const char     *morethuente_type = TAOLINESEARCHMT;

973:   PetscNewLog(tao,&tl);
974:   tao->ops->setup = TaoSetUp_NTL;
975:   tao->ops->solve = TaoSolve_NTL;
976:   tao->ops->view = TaoView_NTL;
977:   tao->ops->setfromoptions = TaoSetFromOptions_NTL;
978:   tao->ops->destroy = TaoDestroy_NTL;

980:   /* Override default settings (unless already changed) */
981:   if (!tao->max_it_changed) tao->max_it = 50;
982:   if (!tao->trust0_changed) tao->trust0 = 100.0;

984:   tao->data = (void*)tl;

986:   /* Default values for trust-region radius update based on steplength */
987:   tl->nu1 = 0.25;
988:   tl->nu2 = 0.50;
989:   tl->nu3 = 1.00;
990:   tl->nu4 = 1.25;

992:   tl->omega1 = 0.25;
993:   tl->omega2 = 0.50;
994:   tl->omega3 = 1.00;
995:   tl->omega4 = 2.00;
996:   tl->omega5 = 4.00;

998:   /* Default values for trust-region radius update based on reduction */
999:   tl->eta1 = 1.0e-4;
1000:   tl->eta2 = 0.25;
1001:   tl->eta3 = 0.50;
1002:   tl->eta4 = 0.90;

1004:   tl->alpha1 = 0.25;
1005:   tl->alpha2 = 0.50;
1006:   tl->alpha3 = 1.00;
1007:   tl->alpha4 = 2.00;
1008:   tl->alpha5 = 4.00;

1010:   /* Default values for trust-region radius update based on interpolation */
1011:   tl->mu1 = 0.10;
1012:   tl->mu2 = 0.50;

1014:   tl->gamma1 = 0.25;
1015:   tl->gamma2 = 0.50;
1016:   tl->gamma3 = 2.00;
1017:   tl->gamma4 = 4.00;

1019:   tl->theta = 0.05;

1021:   /* Default values for trust region initialization based on interpolation */
1022:   tl->mu1_i = 0.35;
1023:   tl->mu2_i = 0.50;

1025:   tl->gamma1_i = 0.0625;
1026:   tl->gamma2_i = 0.5;
1027:   tl->gamma3_i = 2.0;
1028:   tl->gamma4_i = 5.0;

1030:   tl->theta_i = 0.25;

1032:   /* Remaining parameters */
1033:   tl->min_radius = 1.0e-10;
1034:   tl->max_radius = 1.0e10;
1035:   tl->epsilon = 1.0e-6;

1037:   tl->ksp_type        = NTL_KSP_STCG;
1038:   tl->pc_type         = NTL_PC_BFGS;
1039:   tl->bfgs_scale_type = BFGS_SCALE_AHESS;
1040:   tl->init_type       = NTL_INIT_INTERPOLATION;
1041:   tl->update_type     = NTL_UPDATE_REDUCTION;

1043:   TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
1044:   TaoLineSearchSetType(tao->linesearch, morethuente_type);
1045:   TaoLineSearchUseTaoRoutines(tao->linesearch, tao);
1046:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
1047:   KSPCreate(((PetscObject)tao)->comm, &tao->ksp);
1048:   KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix);
1049:   return(0);
1050: }