Actual source code: lmvm.c

petsc-3.9.4 2018-09-11
Report Typos and Errors
  1:  #include <petsctaolinesearch.h>
  2:  #include <../src/tao/matrix/lmvmmat.h>
  3:  #include <../src/tao/unconstrained/impls/lmvm/lmvm.h>

  5: #define LMVM_BFGS                0
  6: #define LMVM_SCALED_GRADIENT     1
  7: #define LMVM_GRADIENT            2

  9: static PetscErrorCode TaoSolve_LMVM(Tao tao)
 10: {
 11:   TAO_LMVM                     *lmP = (TAO_LMVM *)tao->data;
 12:   PetscReal                    f, fold, gdx, gnorm;
 13:   PetscReal                    step = 1.0;
 14:   PetscReal                    delta;
 15:   PetscErrorCode               ierr;
 16:   PetscInt                     stepType, nupdates;
 17:   PetscBool                    recycle;
 18:   TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;


 22:   if (tao->XL || tao->XU || tao->ops->computebounds) {
 23:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by lmvm algorithm\n");
 24:   }

 26:   /*  Check convergence criteria */
 27:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
 28:   TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);

 30:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
 31: 
 32:   tao->reason = TAO_CONTINUE_ITERATING;
 33:   TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
 34:   TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
 35:   (*tao->ops->convergencetest)(tao,tao->cnvP);
 36:   if (tao->reason != TAO_CONTINUE_ITERATING) return(0);

 38:   /*  Set initial scaling for the function */
 39:   if (f != 0.0) {
 40:     delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
 41:   } else {
 42:     delta = 2.0 / (gnorm*gnorm);
 43:   }
 44:   MatLMVMSetDelta(lmP->M,delta);

 46:   /*  Set counter for gradient/reset steps */
 47:   MatLMVMGetRecycleFlag(lmP->M, &recycle);
 48:   if (!recycle) {
 49:     lmP->bfgs = 0;
 50:     lmP->sgrad = 0;
 51:     lmP->grad = 0;
 52:     MatLMVMReset(lmP->M);
 53:   }

 55:   /*  Have not converged; continue with Newton method */
 56:   while (tao->reason == TAO_CONTINUE_ITERATING) {
 57:     /*  Compute direction */
 58:     MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);
 59:     MatLMVMSolve(lmP->M, tao->gradient, lmP->D);

 61:     /*  Check for success (descent direction) */
 62:     VecDot(lmP->D, tao->gradient, &gdx);
 63:     if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
 64:       /* Step is not descent or direction produced not a number
 65:          We can assert bfgsUpdates > 1 in this case because
 66:          the first solve produces the scaled gradient direction,
 67:          which is guaranteed to be descent

 69:          Use steepest descent direction (scaled)
 70:       */

 72:       if (f != 0.0) {
 73:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
 74:       } else {
 75:         delta = 2.0 / (gnorm*gnorm);
 76:       }
 77:       MatLMVMSetDelta(lmP->M, delta);
 78:       MatLMVMReset(lmP->M);
 79:       MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
 80:       MatLMVMSolve(lmP->M,tao->gradient, lmP->D);

 82:       /* On a reset, the direction cannot be not a number; it is a
 83:          scaled gradient step.  No need to check for this condition. */
 84:       ++lmP->sgrad;
 85:       stepType = LMVM_SCALED_GRADIENT;
 86:     } else {
 87:       MatLMVMGetUpdates(lmP->M, &nupdates);
 88:       if (1 == nupdates) {
 89:         /*  The first BFGS direction is always the scaled gradient */
 90:         ++lmP->sgrad;
 91:         stepType = LMVM_SCALED_GRADIENT;
 92:       } else {
 93:         ++lmP->bfgs;
 94:         stepType = LMVM_BFGS;
 95:       }
 96:     }
 97:     VecScale(lmP->D, -1.0);

 99:     /*  Perform the linesearch */
100:     fold = f;
101:     VecCopy(tao->solution, lmP->Xold);
102:     VecCopy(tao->gradient, lmP->Gold);

104:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step,&ls_status);
105:     TaoAddLineSearchCounts(tao);

107:     while (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER && (stepType != LMVM_GRADIENT)) {
108:       /*  Reset factors and use scaled gradient step */
109:       f = fold;
110:       VecCopy(lmP->Xold, tao->solution);
111:       VecCopy(lmP->Gold, tao->gradient);

113:       switch(stepType) {
114:       case LMVM_BFGS:
115:         /*  Failed to obtain acceptable iterate with BFGS step */
116:         /*  Attempt to use the scaled gradient direction */

118:         if (f != 0.0) {
119:           delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
120:         } else {
121:           delta = 2.0 / (gnorm*gnorm);
122:         }
123:         MatLMVMSetDelta(lmP->M, delta);
124:         MatLMVMReset(lmP->M);
125:         MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
126:         MatLMVMSolve(lmP->M, tao->gradient, lmP->D);

128:         /* On a reset, the direction cannot be not a number; it is a
129:            scaled gradient step.  No need to check for this condition. */
130:         --lmP->bfgs;
131:         ++lmP->sgrad;
132:         stepType = LMVM_SCALED_GRADIENT;
133:         break;

135:       case LMVM_SCALED_GRADIENT:
136:         /* The scaled gradient step did not produce a new iterate;
137:            attempt to use the gradient direction.
138:            Need to make sure we are not using a different diagonal scaling */
139:         MatLMVMSetDelta(lmP->M, 1.0);
140:         MatLMVMReset(lmP->M);
141:         MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
142:         MatLMVMSolve(lmP->M, tao->gradient, lmP->D);
143: 
144:         --lmP->sgrad;
145:         ++lmP->grad;
146:         stepType = LMVM_GRADIENT;
147:         break;
148:       }
149:       VecScale(lmP->D, -1.0);

151:       /*  Perform the linesearch */
152:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step, &ls_status);
153:       TaoAddLineSearchCounts(tao);
154:     }

156:     if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
157:       /*  Failed to find an improving point */
158:       f = fold;
159:       VecCopy(lmP->Xold, tao->solution);
160:       VecCopy(lmP->Gold, tao->gradient);
161:       step = 0.0;
162:       tao->reason = TAO_DIVERGED_LS_FAILURE;
163:     }

165:     /*  Check for termination */
166:     TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);

168:     tao->niter++;
169:     TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
170:     TaoMonitor(tao,tao->niter,f,gnorm,0.0,step);
171:     (*tao->ops->convergencetest)(tao,tao->cnvP);
172:   }
173:   return(0);
174: }

176: static PetscErrorCode TaoSetUp_LMVM(Tao tao)
177: {
178:   TAO_LMVM       *lmP = (TAO_LMVM *)tao->data;
179:   PetscInt       n,N;
181:   KSP            H0ksp;

184:   /* Existence of tao->solution checked in TaoSetUp() */
185:   if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);  }
186:   if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);  }
187:   if (!lmP->D) {VecDuplicate(tao->solution,&lmP->D);  }
188:   if (!lmP->Xold) {VecDuplicate(tao->solution,&lmP->Xold);  }
189:   if (!lmP->Gold) {VecDuplicate(tao->solution,&lmP->Gold);  }

191:   /*  Create matrix for the limited memory approximation */
192:   VecGetLocalSize(tao->solution,&n);
193:   VecGetSize(tao->solution,&N);
194:   MatCreateLMVM(((PetscObject)tao)->comm,n,N,&lmP->M);
195:   MatLMVMAllocateVectors(lmP->M,tao->solution);

197:   /* If the user has set a matrix to solve as the initial H0, set the options prefix here, and set up the KSP */
198:   if (lmP->H0) {
199:     const char *prefix;
200:     PC H0pc;

202:     MatLMVMSetH0(lmP->M, lmP->H0);
203:     MatLMVMGetH0KSP(lmP->M, &H0ksp);

205:     TaoGetOptionsPrefix(tao, &prefix);
206:     KSPSetOptionsPrefix(H0ksp, prefix);
207:     PetscObjectAppendOptionsPrefix((PetscObject)H0ksp, "tao_h0_");
208:     KSPGetPC(H0ksp, &H0pc);
209:     PetscObjectAppendOptionsPrefix((PetscObject)H0pc,  "tao_h0_");

211:     KSPSetFromOptions(H0ksp);
212:     KSPSetUp(H0ksp);
213:   }

215:   return(0);
216: }

218: /* ---------------------------------------------------------- */
219: static PetscErrorCode TaoDestroy_LMVM(Tao tao)
220: {
221:   TAO_LMVM       *lmP = (TAO_LMVM *)tao->data;
223:   PetscBool      recycle;

226:   if (lmP->M) {
227:     MatLMVMGetRecycleFlag(lmP->M, &recycle);
228:     if (recycle) {
229:       PetscInfo(tao, "WARNING: TaoDestroy() called when LMVM recycling is enabled!\n");
230:     }
231:   }
232: 
233:   if (tao->setupcalled) {
234:     VecDestroy(&lmP->Xold);
235:     VecDestroy(&lmP->Gold);
236:     VecDestroy(&lmP->D);
237:     MatDestroy(&lmP->M);
238:   }

240:   if (lmP->H0) {
241:     PetscObjectDereference((PetscObject)lmP->H0);
242:   }

244:   PetscFree(tao->data);

246:   return(0);
247: }

249: /*------------------------------------------------------------*/
250: static PetscErrorCode TaoSetFromOptions_LMVM(PetscOptionItems *PetscOptionsObject,Tao tao)
251: {

255:   PetscOptionsHead(PetscOptionsObject,"Limited-memory variable-metric method for unconstrained optimization");
256:   TaoLineSearchSetFromOptions(tao->linesearch);
257:   PetscOptionsTail();
258:   return(0);
259: }

261: /*------------------------------------------------------------*/
262: static PetscErrorCode TaoView_LMVM(Tao tao, PetscViewer viewer)
263: {
264:   TAO_LMVM       *lm = (TAO_LMVM *)tao->data;
265:   PetscBool      isascii, recycle;
266:   PetscInt       recycled_its;

270:   PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
271:   if (isascii) {
272:     PetscViewerASCIIPushTab(viewer);
273:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", lm->bfgs);
274:     PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", lm->sgrad);
275:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", lm->grad);
276:     MatLMVMGetRecycleFlag(lm->M, &recycle);
277:     if (recycle) {
278:       PetscViewerASCIIPrintf(viewer, "Recycle: on\n");
279:       recycled_its = lm->bfgs + lm->sgrad + lm->grad;
280:       PetscViewerASCIIPrintf(viewer, "Total recycled iterations: %D\n", recycled_its);
281:     }
282:     PetscViewerASCIIPopTab(viewer);
283:   }
284:   return(0);
285: }

287: /* ---------------------------------------------------------- */

289: /*MC
290:      TAOLMVM - Limited Memory Variable Metric method is a quasi-Newton
291:      optimization solver for unconstrained minimization. It solves
292:      the Newton step
293:               Hkdk = - gk

295:      using an approximation Bk in place of Hk, where Bk is composed using
296:      the BFGS update formula. A More-Thuente line search is then used
297:      to computed the steplength in the dk direction
298:   Options Database Keys:
299: +     -tao_lmm_vectors - number of vectors to use for approximation
300: .     -tao_lmm_scale_type - "none","scalar","broyden"
301: .     -tao_lmm_limit_type - "none","average","relative","absolute"
302: .     -tao_lmm_rescale_type - "none","scalar","gl"
303: .     -tao_lmm_limit_mu - mu limiting factor
304: .     -tao_lmm_limit_nu - nu limiting factor
305: .     -tao_lmm_delta_min - minimum delta value
306: .     -tao_lmm_delta_max - maximum delta value
307: .     -tao_lmm_broyden_phi - phi factor for Broyden scaling
308: .     -tao_lmm_scalar_alpha - alpha factor for scalar scaling
309: .     -tao_lmm_rescale_alpha - alpha factor for rescaling diagonal
310: .     -tao_lmm_rescale_beta - beta factor for rescaling diagonal
311: .     -tao_lmm_scalar_history - amount of history for scalar scaling
312: .     -tao_lmm_rescale_history - amount of history for rescaling diagonal
313: .     -tao_lmm_eps - rejection tolerance
314: -     -tao_lmm_recycle - enable recycling LMVM updates between TaoSolve() calls

316:   Level: beginner
317: M*/

319: PETSC_EXTERN PetscErrorCode TaoCreate_LMVM(Tao tao)
320: {
321:   TAO_LMVM       *lmP;
322:   const char     *morethuente_type = TAOLINESEARCHMT;

326:   tao->ops->setup = TaoSetUp_LMVM;
327:   tao->ops->solve = TaoSolve_LMVM;
328:   tao->ops->view = TaoView_LMVM;
329:   tao->ops->setfromoptions = TaoSetFromOptions_LMVM;
330:   tao->ops->destroy = TaoDestroy_LMVM;

332:   PetscNewLog(tao,&lmP);
333:   lmP->D = 0;
334:   lmP->M = 0;
335:   lmP->Xold = 0;
336:   lmP->Gold = 0;
337:   lmP->H0   = NULL;

339:   tao->data = (void*)lmP;
340:   /* Override default settings (unless already changed) */
341:   if (!tao->max_it_changed) tao->max_it = 2000;
342:   if (!tao->max_funcs_changed) tao->max_funcs = 4000;

344:   TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
345:   PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
346:   TaoLineSearchSetType(tao->linesearch,morethuente_type);
347:   TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
348:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
349:   return(0);
350: }