Actual source code: owlqn.c

  1: #include <petsctaolinesearch.h>
  2: #include <../src/tao/unconstrained/impls/owlqn/owlqn.h>

  4: #define OWLQN_BFGS                0
  5: #define OWLQN_SCALED_GRADIENT     1
  6: #define OWLQN_GRADIENT            2

  8: static PetscErrorCode ProjDirect_OWLQN(Vec d, Vec g)
  9: {
 10:   PetscErrorCode  ierr;
 11:   const PetscReal *gptr;
 12:   PetscReal       *dptr;
 13:   PetscInt        low,high,low1,high1,i;

 16:   ierr=VecGetOwnershipRange(d,&low,&high);
 17:   ierr=VecGetOwnershipRange(g,&low1,&high1);

 19:   VecGetArrayRead(g,&gptr);
 20:   VecGetArray(d,&dptr);
 21:   for (i = 0; i < high-low; i++) {
 22:     if (dptr[i] * gptr[i] <= 0.0) {
 23:       dptr[i] = 0.0;
 24:     }
 25:   }
 26:   VecRestoreArray(d,&dptr);
 27:   VecRestoreArrayRead(g,&gptr);
 28:   return(0);
 29: }

 31: static PetscErrorCode ComputePseudoGrad_OWLQN(Vec x, Vec gv, PetscReal lambda)
 32: {
 33:   PetscErrorCode  ierr;
 34:   const PetscReal *xptr;
 35:   PetscReal       *gptr;
 36:   PetscInt        low,high,low1,high1,i;

 39:   ierr=VecGetOwnershipRange(x,&low,&high);
 40:   ierr=VecGetOwnershipRange(gv,&low1,&high1);

 42:   VecGetArrayRead(x,&xptr);
 43:   VecGetArray(gv,&gptr);
 44:   for (i = 0; i < high-low; i++) {
 45:     if (xptr[i] < 0.0)               gptr[i] = gptr[i] - lambda;
 46:     else if (xptr[i] > 0.0)          gptr[i] = gptr[i] + lambda;
 47:     else if (gptr[i] + lambda < 0.0) gptr[i] = gptr[i] + lambda;
 48:     else if (gptr[i] - lambda > 0.0) gptr[i] = gptr[i] - lambda;
 49:     else                             gptr[i] = 0.0;
 50:   }
 51:   VecRestoreArray(gv,&gptr);
 52:   VecRestoreArrayRead(x,&xptr);
 53:   return(0);
 54: }

 56: static PetscErrorCode TaoSolve_OWLQN(Tao tao)
 57: {
 58:   TAO_OWLQN                    *lmP = (TAO_OWLQN *)tao->data;
 59:   PetscReal                    f, fold, gdx, gnorm;
 60:   PetscReal                    step = 1.0;
 61:   PetscReal                    delta;
 62:   PetscErrorCode               ierr;
 63:   PetscInt                     stepType;
 64:   PetscInt                     iter = 0;
 65:   TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

 68:   if (tao->XL || tao->XU || tao->ops->computebounds) {
 69:     PetscInfo(tao,"WARNING: Variable bounds have been set but will be ignored by owlqn algorithm\n");
 70:   }

 72:   /* Check convergence criteria */
 73:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
 74:   VecCopy(tao->gradient, lmP->GV);
 75:   ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);
 76:   VecNorm(lmP->GV,NORM_2,&gnorm);
 77:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PetscObjectComm((PetscObject)tao),PETSC_ERR_USER, "User provided compute function generated Inf or NaN");

 79:   tao->reason = TAO_CONTINUE_ITERATING;
 80:   TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
 81:   TaoMonitor(tao,iter,f,gnorm,0.0,step);
 82:   (*tao->ops->convergencetest)(tao,tao->cnvP);
 83:   if (tao->reason != TAO_CONTINUE_ITERATING) return(0);

 85:   /* Set initial scaling for the function */
 86:   delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
 87:   MatLMVMSetJ0Scale(lmP->M, delta);

 89:   /* Set counter for gradient/reset steps */
 90:   lmP->bfgs = 0;
 91:   lmP->sgrad = 0;
 92:   lmP->grad = 0;

 94:   /* Have not converged; continue with Newton method */
 95:   while (tao->reason == TAO_CONTINUE_ITERATING) {
 96:     /* Call general purpose update function */
 97:     if (tao->ops->update) {
 98:       (*tao->ops->update)(tao, tao->niter, tao->user_update);
 99:     }

101:     /* Compute direction */
102:     MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);
103:     MatSolve(lmP->M, lmP->GV, lmP->D);

105:     ProjDirect_OWLQN(lmP->D,lmP->GV);

107:     ++lmP->bfgs;

109:     /* Check for success (descent direction) */
110:     VecDot(lmP->D, lmP->GV , &gdx);
111:     if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {

113:       /* Step is not descent or direction produced not a number
114:          We can assert bfgsUpdates > 1 in this case because
115:          the first solve produces the scaled gradient direction,
116:          which is guaranteed to be descent

118:          Use steepest descent direction (scaled) */
119:       ++lmP->grad;

121:       delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
122:       MatLMVMSetJ0Scale(lmP->M, delta);
123:       MatLMVMReset(lmP->M, PETSC_FALSE);
124:       MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
125:       MatSolve(lmP->M,lmP->GV, lmP->D);

127:       ProjDirect_OWLQN(lmP->D,lmP->GV);

129:       lmP->bfgs = 1;
130:       ++lmP->sgrad;
131:       stepType = OWLQN_SCALED_GRADIENT;
132:     } else {
133:       if (1 == lmP->bfgs) {
134:         /* The first BFGS direction is always the scaled gradient */
135:         ++lmP->sgrad;
136:         stepType = OWLQN_SCALED_GRADIENT;
137:       } else {
138:         ++lmP->bfgs;
139:         stepType = OWLQN_BFGS;
140:       }
141:     }

143:     VecScale(lmP->D, -1.0);

145:     /* Perform the linesearch */
146:     fold = f;
147:     VecCopy(tao->solution, lmP->Xold);
148:     VecCopy(tao->gradient, lmP->Gold);

150:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step,&ls_status);
151:     TaoAddLineSearchCounts(tao);

153:     while (((int)ls_status < 0) && (stepType != OWLQN_GRADIENT)) {

155:       /* Reset factors and use scaled gradient step */
156:       f = fold;
157:       VecCopy(lmP->Xold, tao->solution);
158:       VecCopy(lmP->Gold, tao->gradient);
159:       VecCopy(tao->gradient, lmP->GV);

161:       ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);

163:       switch(stepType) {
164:       case OWLQN_BFGS:
165:         /* Failed to obtain acceptable iterate with BFGS step
166:            Attempt to use the scaled gradient direction */

168:         delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm*gnorm);
169:         MatLMVMSetJ0Scale(lmP->M, delta);
170:         MatLMVMReset(lmP->M, PETSC_FALSE);
171:         MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
172:         MatSolve(lmP->M, lmP->GV, lmP->D);

174:         ProjDirect_OWLQN(lmP->D,lmP->GV);

176:         lmP->bfgs = 1;
177:         ++lmP->sgrad;
178:         stepType = OWLQN_SCALED_GRADIENT;
179:         break;

181:       case OWLQN_SCALED_GRADIENT:
182:         /* The scaled gradient step did not produce a new iterate;
183:            attempt to use the gradient direction.
184:            Need to make sure we are not using a different diagonal scaling */
185:         MatLMVMSetJ0Scale(lmP->M, 1.0);
186:         MatLMVMReset(lmP->M, PETSC_FALSE);
187:         MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
188:         MatSolve(lmP->M, lmP->GV, lmP->D);

190:         ProjDirect_OWLQN(lmP->D,lmP->GV);

192:         lmP->bfgs = 1;
193:         ++lmP->grad;
194:         stepType = OWLQN_GRADIENT;
195:         break;
196:       }
197:       VecScale(lmP->D, -1.0);

199:       /* Perform the linesearch */
200:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status);
201:       TaoAddLineSearchCounts(tao);
202:     }

204:     if ((int)ls_status < 0) {
205:       /* Failed to find an improving point*/
206:       f = fold;
207:       VecCopy(lmP->Xold, tao->solution);
208:       VecCopy(lmP->Gold, tao->gradient);
209:       VecCopy(tao->gradient, lmP->GV);
210:       step = 0.0;
211:     } else {
212:       /* a little hack here, because that gv is used to store g */
213:       VecCopy(lmP->GV, tao->gradient);
214:     }

216:     ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);

218:     /* Check for termination */

220:     VecNorm(lmP->GV,NORM_2,&gnorm);

222:     iter++;
223:     TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
224:     TaoMonitor(tao,iter,f,gnorm,0.0,step);
225:     (*tao->ops->convergencetest)(tao,tao->cnvP);

227:     if ((int)ls_status < 0) break;
228:   }
229:   return(0);
230: }

232: static PetscErrorCode TaoSetUp_OWLQN(Tao tao)
233: {
234:   TAO_OWLQN      *lmP = (TAO_OWLQN *)tao->data;
235:   PetscInt       n,N;

239:   /* Existence of tao->solution checked in TaoSetUp() */
240:   if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient);  }
241:   if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection);  }
242:   if (!lmP->D) {VecDuplicate(tao->solution,&lmP->D);  }
243:   if (!lmP->GV) {VecDuplicate(tao->solution,&lmP->GV);  }
244:   if (!lmP->Xold) {VecDuplicate(tao->solution,&lmP->Xold);  }
245:   if (!lmP->Gold) {VecDuplicate(tao->solution,&lmP->Gold);  }

247:   /* Create matrix for the limited memory approximation */
248:   VecGetLocalSize(tao->solution,&n);
249:   VecGetSize(tao->solution,&N);
250:   MatCreateLMVMBFGS(((PetscObject)tao)->comm,n,N,&lmP->M);
251:   MatLMVMAllocate(lmP->M,tao->solution,tao->gradient);
252:   return(0);
253: }

255: /* ---------------------------------------------------------- */
256: static PetscErrorCode TaoDestroy_OWLQN(Tao tao)
257: {
258:   TAO_OWLQN      *lmP = (TAO_OWLQN *)tao->data;

262:   if (tao->setupcalled) {
263:     VecDestroy(&lmP->Xold);
264:     VecDestroy(&lmP->Gold);
265:     VecDestroy(&lmP->D);
266:     MatDestroy(&lmP->M);
267:     VecDestroy(&lmP->GV);
268:   }
269:   PetscFree(tao->data);
270:   return(0);
271: }

273: /*------------------------------------------------------------*/
274: static PetscErrorCode TaoSetFromOptions_OWLQN(PetscOptionItems *PetscOptionsObject,Tao tao)
275: {
276:   TAO_OWLQN      *lmP = (TAO_OWLQN *)tao->data;

280:   PetscOptionsHead(PetscOptionsObject,"Orthant-Wise Limited-memory method for Quasi-Newton unconstrained optimization");
281:   PetscOptionsReal("-tao_owlqn_lambda", "regulariser weight","", 100,&lmP->lambda,NULL);
282:   PetscOptionsTail();
283:   TaoLineSearchSetFromOptions(tao->linesearch);
284:   return(0);
285: }

287: /*------------------------------------------------------------*/
288: static PetscErrorCode TaoView_OWLQN(Tao tao, PetscViewer viewer)
289: {
290:   TAO_OWLQN      *lm = (TAO_OWLQN *)tao->data;
291:   PetscBool      isascii;

295:   PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
296:   if (isascii) {
297:     PetscViewerASCIIPushTab(viewer);
298:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", lm->bfgs);
299:     PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", lm->sgrad);
300:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", lm->grad);
301:     PetscViewerASCIIPopTab(viewer);
302:   }
303:   return(0);
304: }

306: /* ---------------------------------------------------------- */
307: /*MC
308:   TAOOWLQN - orthant-wise limited memory quasi-newton algorithm

310: . - tao_owlqn_lambda - regulariser weight

312:   Level: beginner
313: M*/


316: PETSC_EXTERN PetscErrorCode TaoCreate_OWLQN(Tao tao)
317: {
318:   TAO_OWLQN      *lmP;
319:   const char     *owarmijo_type = TAOLINESEARCHOWARMIJO;

323:   tao->ops->setup = TaoSetUp_OWLQN;
324:   tao->ops->solve = TaoSolve_OWLQN;
325:   tao->ops->view = TaoView_OWLQN;
326:   tao->ops->setfromoptions = TaoSetFromOptions_OWLQN;
327:   tao->ops->destroy = TaoDestroy_OWLQN;

329:   PetscNewLog(tao,&lmP);
330:   lmP->D = NULL;
331:   lmP->M = NULL;
332:   lmP->GV = NULL;
333:   lmP->Xold = NULL;
334:   lmP->Gold = NULL;
335:   lmP->lambda = 1.0;

337:   tao->data = (void*)lmP;
338:   /* Override default settings (unless already changed) */
339:   if (!tao->max_it_changed) tao->max_it = 2000;
340:   if (!tao->max_funcs_changed) tao->max_funcs = 4000;

342:   TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
343:   PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
344:   TaoLineSearchSetType(tao->linesearch,owarmijo_type);
345:   TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
346:   TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
347:   return(0);
348: }