Actual source code: owlqn.c

petsc-3.9.4 2018-09-11
Report Typos and Errors
  1:  #include <petsctaolinesearch.h>
  2:  #include <../src/tao/matrix/lmvmmat.h>
  3:  #include <../src/tao/unconstrained/impls/owlqn/owlqn.h>

  5: #define OWLQN_BFGS                0
  6: #define OWLQN_SCALED_GRADIENT     1
  7: #define OWLQN_GRADIENT            2

  9: static PetscErrorCode ProjDirect_OWLQN(Vec d, Vec g)
 10: {
 11:   PetscErrorCode  ierr;
 12:   const PetscReal *gptr;
 13:   PetscReal       *dptr;
 14:   PetscInt        low,high,low1,high1,i;

 17:   ierr=VecGetOwnershipRange(d,&low,&high);
 18:   ierr=VecGetOwnershipRange(g,&low1,&high1);

 20:   VecGetArrayRead(g,&gptr);
 21:   VecGetArray(d,&dptr);
 22:   for (i = 0; i < high-low; i++) {
 23:     if (dptr[i] * gptr[i] <= 0.0 ) {
 24:       dptr[i] = 0.0;
 25:     }
 26:   }
 27:   VecRestoreArray(d,&dptr);
 28:   VecRestoreArrayRead(g,&gptr);
 29:   return(0);
 30: }

 32: static PetscErrorCode ComputePseudoGrad_OWLQN(Vec x, Vec gv, PetscReal lambda)
 33: {
 34:   PetscErrorCode  ierr;
 35:   const PetscReal *xptr;
 36:   PetscReal       *gptr;
 37:   PetscInt        low,high,low1,high1,i;

 40:   ierr=VecGetOwnershipRange(x,&low,&high);
 41:   ierr=VecGetOwnershipRange(gv,&low1,&high1);

 43:   VecGetArrayRead(x,&xptr);
 44:   VecGetArray(gv,&gptr);
 45:   for (i = 0; i < high-low; i++) {
 46:     if (xptr[i] < 0.0)               gptr[i] = gptr[i] - lambda;
 47:     else if (xptr[i] > 0.0)          gptr[i] = gptr[i] + lambda;
 48:     else if (gptr[i] + lambda < 0.0) gptr[i] = gptr[i] + lambda;
 49:     else if (gptr[i] - lambda > 0.0) gptr[i] = gptr[i] - lambda;
 50:     else                             gptr[i] = 0.0;
 51:   }
 52:   VecRestoreArray(gv,&gptr);
 53:   VecRestoreArrayRead(x,&xptr);
 54:   return(0);
 55: }

 57: static PetscErrorCode TaoSolve_OWLQN(Tao tao)
 58: {
 59:   TAO_OWLQN                    *lmP = (TAO_OWLQN *)tao->data;
 60:   PetscReal                    f, fold, gdx, gnorm;
 61:   PetscReal                    step = 1.0;
 62:   PetscReal                    delta;
 63:   PetscErrorCode               ierr;
 64:   PetscInt                     stepType;
 65:   PetscInt                     iter = 0;
 66:   TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

 69:   if (tao->XL || tao->XU || tao->ops->computebounds) {
 70:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by owlqn algorithm\n");
 71:   }

 73:   /* Check convergence criteria */
 74:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);

 76:   VecCopy(tao->gradient, lmP->GV);

 78:   ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);

 80:   VecNorm(lmP->GV,NORM_2,&gnorm);

 82:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");

 84:   tao->reason = TAO_CONTINUE_ITERATING;
 85:   TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
 86:   TaoMonitor(tao,iter,f,gnorm,0.0,step);
 87:   (*tao->ops->convergencetest)(tao,tao->cnvP);
 88:   if (tao->reason != TAO_CONTINUE_ITERATING) return(0);

 90:   /* Set initial scaling for the function */
 91:   if (f != 0.0) {
 92:     delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
 93:   } else {
 94:     delta = 2.0 / (gnorm*gnorm);
 95:   }
 96:   MatLMVMSetDelta(lmP->M,delta);

 98:   /* Set counter for gradient/reset steps */
 99:   lmP->bfgs = 0;
100:   lmP->sgrad = 0;
101:   lmP->grad = 0;

103:   /* Have not converged; continue with Newton method */
104:   while (tao->reason == TAO_CONTINUE_ITERATING) {
105:     /* Compute direction */
106:     MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);
107:     MatLMVMSolve(lmP->M, lmP->GV, lmP->D);

109:     ProjDirect_OWLQN(lmP->D,lmP->GV);

111:     ++lmP->bfgs;

113:     /* Check for success (descent direction) */
114:     VecDot(lmP->D, lmP->GV , &gdx);
115:     if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {

117:       /* Step is not descent or direction produced not a number
118:          We can assert bfgsUpdates > 1 in this case because
119:          the first solve produces the scaled gradient direction,
120:          which is guaranteed to be descent

122:          Use steepest descent direction (scaled) */
123:       ++lmP->grad;

125:       if (f != 0.0) {
126:         delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
127:       } else {
128:         delta = 2.0 / (gnorm*gnorm);
129:       }
130:       MatLMVMSetDelta(lmP->M, delta);
131:       MatLMVMReset(lmP->M);
132:       MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
133:       MatLMVMSolve(lmP->M,lmP->GV, lmP->D);

135:       ProjDirect_OWLQN(lmP->D,lmP->GV);

137:       lmP->bfgs = 1;
138:       ++lmP->sgrad;
139:       stepType = OWLQN_SCALED_GRADIENT;
140:     } else {
141:       if (1 == lmP->bfgs) {
142:         /* The first BFGS direction is always the scaled gradient */
143:         ++lmP->sgrad;
144:         stepType = OWLQN_SCALED_GRADIENT;
145:       } else {
146:         ++lmP->bfgs;
147:         stepType = OWLQN_BFGS;
148:       }
149:     }

151:     VecScale(lmP->D, -1.0);

153:     /* Perform the linesearch */
154:     fold = f;
155:     VecCopy(tao->solution, lmP->Xold);
156:     VecCopy(tao->gradient, lmP->Gold);

158:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step,&ls_status);
159:     TaoAddLineSearchCounts(tao);

161:     while (((int)ls_status < 0) && (stepType != OWLQN_GRADIENT)) {

163:       /* Reset factors and use scaled gradient step */
164:       f = fold;
165:       VecCopy(lmP->Xold, tao->solution);
166:       VecCopy(lmP->Gold, tao->gradient);
167:       VecCopy(tao->gradient, lmP->GV);

169:       ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);

171:       switch(stepType) {
172:       case OWLQN_BFGS:
173:         /* Failed to obtain acceptable iterate with BFGS step
174:            Attempt to use the scaled gradient direction */

176:         if (f != 0.0) {
177:           delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
178:         } else {
179:           delta = 2.0 / (gnorm*gnorm);
180:         }
181:         MatLMVMSetDelta(lmP->M, delta);
182:         MatLMVMReset(lmP->M);
183:         MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
184:         MatLMVMSolve(lmP->M, lmP->GV, lmP->D);

186:         ProjDirect_OWLQN(lmP->D,lmP->GV);

188:         lmP->bfgs = 1;
189:         ++lmP->sgrad;
190:         stepType = OWLQN_SCALED_GRADIENT;
191:         break;

193:       case OWLQN_SCALED_GRADIENT:
194:         /* The scaled gradient step did not produce a new iterate;
195:            attempt to use the gradient direction.
196:            Need to make sure we are not using a different diagonal scaling */
197:         MatLMVMSetDelta(lmP->M, 1.0);
198:         MatLMVMReset(lmP->M);
199:         MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
200:         MatLMVMSolve(lmP->M, lmP->GV, lmP->D);

202:         ProjDirect_OWLQN(lmP->D,lmP->GV);

204:         lmP->bfgs = 1;
205:         ++lmP->grad;
206:         stepType = OWLQN_GRADIENT;
207:         break;
208:       }
209:       VecScale(lmP->D, -1.0);


212:       /* Perform the linesearch */
213:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status);
214:       TaoAddLineSearchCounts(tao);
215:     }

217:     if ((int)ls_status < 0) {
218:       /* Failed to find an improving point*/
219:       f = fold;
220:       VecCopy(lmP->Xold, tao->solution);
221:       VecCopy(lmP->Gold, tao->gradient);
222:       VecCopy(tao->gradient, lmP->GV);
223:       step = 0.0;
224:     } else {
225:       /* a little hack here, because that gv is used to store g */
226:       VecCopy(lmP->GV, tao->gradient);
227:     }

229:     ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);

231:     /* Check for termination */

233:     VecNorm(lmP->GV,NORM_2,&gnorm);

235:     iter++;
236:     TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
237:     TaoMonitor(tao,iter,f,gnorm,0.0,step);
238:     (*tao->ops->convergencetest)(tao,tao->cnvP);

240:     if ((int)ls_status < 0) break;
241:   }
242:   return(0);
243: }

245: static PetscErrorCode TaoSetUp_OWLQN(Tao tao)
246: {
247:   TAO_OWLQN      *lmP = (TAO_OWLQN *)tao->data;
248:   PetscInt       n,N;

252:   /* Existence of tao->solution checked in TaoSetUp() */
253:   if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);  }
254:   if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);  }
255:   if (!lmP->D) {VecDuplicate(tao->solution,&lmP->D);  }
256:   if (!lmP->GV) {VecDuplicate(tao->solution,&lmP->GV);  }
257:   if (!lmP->Xold) {VecDuplicate(tao->solution,&lmP->Xold);  }
258:   if (!lmP->Gold) {VecDuplicate(tao->solution,&lmP->Gold);  }

260:   /* Create matrix for the limited memory approximation */
261:   VecGetLocalSize(tao->solution,&n);
262:   VecGetSize(tao->solution,&N);
263:   MatCreateLMVM(((PetscObject)tao)->comm,n,N,&lmP->M);
264:   MatLMVMAllocateVectors(lmP->M,tao->solution);
265:   return(0);
266: }

268: /* ---------------------------------------------------------- */
269: static PetscErrorCode TaoDestroy_OWLQN(Tao tao)
270: {
271:   TAO_OWLQN      *lmP = (TAO_OWLQN *)tao->data;

275:   if (tao->setupcalled) {
276:     VecDestroy(&lmP->Xold);
277:     VecDestroy(&lmP->Gold);
278:     VecDestroy(&lmP->D);
279:     MatDestroy(&lmP->M);
280:     VecDestroy(&lmP->GV);
281:   }
282:   PetscFree(tao->data);
283:   return(0);
284: }

286: /*------------------------------------------------------------*/
287: static PetscErrorCode TaoSetFromOptions_OWLQN(PetscOptionItems *PetscOptionsObject,Tao tao)
288: {
289:   TAO_OWLQN      *lmP = (TAO_OWLQN *)tao->data;

293:   PetscOptionsHead(PetscOptionsObject,"Orthant-Wise Limited-memory method for Quasi-Newton unconstrained optimization");
294:   PetscOptionsReal("-tao_owlqn_lambda", "regulariser weight","", 100,&lmP->lambda,NULL);
295:   PetscOptionsTail();
296:   TaoLineSearchSetFromOptions(tao->linesearch);
297:   return(0);
298: }

300: /*------------------------------------------------------------*/
301: static PetscErrorCode TaoView_OWLQN(Tao tao, PetscViewer viewer)
302: {
303:   TAO_OWLQN      *lm = (TAO_OWLQN *)tao->data;
304:   PetscBool      isascii;

308:   PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
309:   if (isascii) {
310:     PetscViewerASCIIPushTab(viewer);
311:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", lm->bfgs);
312:     PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", lm->sgrad);
313:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", lm->grad);
314:     PetscViewerASCIIPopTab(viewer);
315:   }
316:   return(0);
317: }

319: /* ---------------------------------------------------------- */
320: /*MC
321:   TAOOWLQN - orthant-wise limited memory quasi-newton algorithm

323: . - tao_owlqn_lambda - regulariser weight

325:   Level: beginner
326: M*/


329: PETSC_EXTERN PetscErrorCode TaoCreate_OWLQN(Tao tao)
330: {
331:   TAO_OWLQN      *lmP;
332:   const char     *owarmijo_type = TAOLINESEARCHOWARMIJO;

336:   tao->ops->setup = TaoSetUp_OWLQN;
337:   tao->ops->solve = TaoSolve_OWLQN;
338:   tao->ops->view = TaoView_OWLQN;
339:   tao->ops->setfromoptions = TaoSetFromOptions_OWLQN;
340:   tao->ops->destroy = TaoDestroy_OWLQN;

342:   PetscNewLog(tao,&lmP);
343:   lmP->D = 0;
344:   lmP->M = 0;
345:   lmP->GV = 0;
346:   lmP->Xold = 0;
347:   lmP->Gold = 0;
348:   lmP->lambda = 1.0;

350:   tao->data = (void*)lmP;
351:   /* Override default settings (unless already changed) */
352:   if (!tao->max_it_changed) tao->max_it = 2000;
353:   if (!tao->max_funcs_changed) tao->max_funcs = 4000;

355:   TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
356:   PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
357:   TaoLineSearchSetType(tao->linesearch,owarmijo_type);
358:   TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
359:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
360:   return(0);
361: }