Actual source code: owlqn.c
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/unconstrained/impls/owlqn/owlqn.h>
4: #define OWLQN_BFGS 0
5: #define OWLQN_SCALED_GRADIENT 1
6: #define OWLQN_GRADIENT 2
8: static PetscErrorCode ProjDirect_OWLQN(Vec d, Vec g)
9: {
10: const PetscReal *gptr;
11: PetscReal *dptr;
12: PetscInt low, high, low1, high1, i;
14: PetscFunctionBegin;
15: PetscCall(VecGetOwnershipRange(d, &low, &high));
16: PetscCall(VecGetOwnershipRange(g, &low1, &high1));
18: PetscCall(VecGetArrayRead(g, &gptr));
19: PetscCall(VecGetArray(d, &dptr));
20: for (i = 0; i < high - low; i++) {
21: if (dptr[i] * gptr[i] <= 0.0) dptr[i] = 0.0;
22: }
23: PetscCall(VecRestoreArray(d, &dptr));
24: PetscCall(VecRestoreArrayRead(g, &gptr));
25: PetscFunctionReturn(PETSC_SUCCESS);
26: }
28: static PetscErrorCode ComputePseudoGrad_OWLQN(Vec x, Vec gv, PetscReal lambda)
29: {
30: const PetscReal *xptr;
31: PetscReal *gptr;
32: PetscInt low, high, low1, high1, i;
34: PetscFunctionBegin;
35: PetscCall(VecGetOwnershipRange(x, &low, &high));
36: PetscCall(VecGetOwnershipRange(gv, &low1, &high1));
38: PetscCall(VecGetArrayRead(x, &xptr));
39: PetscCall(VecGetArray(gv, &gptr));
40: for (i = 0; i < high - low; i++) {
41: if (xptr[i] < 0.0) gptr[i] = gptr[i] - lambda;
42: else if (xptr[i] > 0.0) gptr[i] = gptr[i] + lambda;
43: else if (gptr[i] + lambda < 0.0) gptr[i] = gptr[i] + lambda;
44: else if (gptr[i] - lambda > 0.0) gptr[i] = gptr[i] - lambda;
45: else gptr[i] = 0.0;
46: }
47: PetscCall(VecRestoreArray(gv, &gptr));
48: PetscCall(VecRestoreArrayRead(x, &xptr));
49: PetscFunctionReturn(PETSC_SUCCESS);
50: }
52: static PetscErrorCode TaoSolve_OWLQN(Tao tao)
53: {
54: TAO_OWLQN *lmP = (TAO_OWLQN *)tao->data;
55: PetscReal f, fold, gdx, gnorm;
56: PetscReal step = 1.0;
57: PetscReal delta;
58: PetscInt stepType;
59: PetscInt iter = 0;
60: TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
62: PetscFunctionBegin;
63: if (tao->XL || tao->XU || tao->ops->computebounds) PetscCall(PetscInfo(tao, "WARNING: Variable bounds have been set but will be ignored by owlqn algorithm\n"));
65: /* Check convergence criteria */
66: PetscCall(TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient));
67: PetscCall(VecCopy(tao->gradient, lmP->GV));
68: PetscCall(ComputePseudoGrad_OWLQN(tao->solution, lmP->GV, lmP->lambda));
69: PetscCall(VecNorm(lmP->GV, NORM_2, &gnorm));
70: PetscCheck(!PetscIsInfOrNanReal(f) && !PetscIsInfOrNanReal(gnorm), PetscObjectComm((PetscObject)tao), PETSC_ERR_USER, "User provided compute function generated Inf or NaN");
72: tao->reason = TAO_CONTINUE_ITERATING;
73: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
74: PetscCall(TaoMonitor(tao, iter, f, gnorm, 0.0, step));
75: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
76: if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(PETSC_SUCCESS);
78: /* Set initial scaling for the function */
79: delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm * gnorm);
80: PetscCall(MatLMVMSetJ0Scale(lmP->M, delta));
82: /* Set counter for gradient/reset steps */
83: lmP->bfgs = 0;
84: lmP->sgrad = 0;
85: lmP->grad = 0;
87: /* Have not converged; continue with Newton method */
88: while (tao->reason == TAO_CONTINUE_ITERATING) {
89: /* Call general purpose update function */
90: PetscTryTypeMethod(tao, update, tao->niter, tao->user_update);
92: /* Compute direction */
93: PetscCall(MatLMVMUpdate(lmP->M, tao->solution, tao->gradient));
94: PetscCall(MatSolve(lmP->M, lmP->GV, lmP->D));
96: PetscCall(ProjDirect_OWLQN(lmP->D, lmP->GV));
98: ++lmP->bfgs;
100: /* Check for success (descent direction) */
101: PetscCall(VecDot(lmP->D, lmP->GV, &gdx));
102: if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
103: /* Step is not descent or direction produced not a number
104: We can assert bfgsUpdates > 1 in this case because
105: the first solve produces the scaled gradient direction,
106: which is guaranteed to be descent
108: Use steepest descent direction (scaled) */
109: ++lmP->grad;
111: delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm * gnorm);
112: PetscCall(MatLMVMSetJ0Scale(lmP->M, delta));
113: PetscCall(MatLMVMReset(lmP->M, PETSC_FALSE));
114: PetscCall(MatLMVMUpdate(lmP->M, tao->solution, tao->gradient));
115: PetscCall(MatSolve(lmP->M, lmP->GV, lmP->D));
117: PetscCall(ProjDirect_OWLQN(lmP->D, lmP->GV));
119: lmP->bfgs = 1;
120: ++lmP->sgrad;
121: stepType = OWLQN_SCALED_GRADIENT;
122: } else {
123: if (1 == lmP->bfgs) {
124: /* The first BFGS direction is always the scaled gradient */
125: ++lmP->sgrad;
126: stepType = OWLQN_SCALED_GRADIENT;
127: } else {
128: ++lmP->bfgs;
129: stepType = OWLQN_BFGS;
130: }
131: }
133: PetscCall(VecScale(lmP->D, -1.0));
135: /* Perform the linesearch */
136: fold = f;
137: PetscCall(VecCopy(tao->solution, lmP->Xold));
138: PetscCall(VecCopy(tao->gradient, lmP->Gold));
140: PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status));
141: PetscCall(TaoAddLineSearchCounts(tao));
143: while (((int)ls_status < 0) && (stepType != OWLQN_GRADIENT)) {
144: /* Reset factors and use scaled gradient step */
145: f = fold;
146: PetscCall(VecCopy(lmP->Xold, tao->solution));
147: PetscCall(VecCopy(lmP->Gold, tao->gradient));
148: PetscCall(VecCopy(tao->gradient, lmP->GV));
150: PetscCall(ComputePseudoGrad_OWLQN(tao->solution, lmP->GV, lmP->lambda));
152: switch (stepType) {
153: case OWLQN_BFGS:
154: /* Failed to obtain acceptable iterate with BFGS step
155: Attempt to use the scaled gradient direction */
157: delta = 2.0 * PetscMax(1.0, PetscAbsScalar(f)) / (gnorm * gnorm);
158: PetscCall(MatLMVMSetJ0Scale(lmP->M, delta));
159: PetscCall(MatLMVMReset(lmP->M, PETSC_FALSE));
160: PetscCall(MatLMVMUpdate(lmP->M, tao->solution, tao->gradient));
161: PetscCall(MatSolve(lmP->M, lmP->GV, lmP->D));
163: PetscCall(ProjDirect_OWLQN(lmP->D, lmP->GV));
165: lmP->bfgs = 1;
166: ++lmP->sgrad;
167: stepType = OWLQN_SCALED_GRADIENT;
168: break;
170: case OWLQN_SCALED_GRADIENT:
171: /* The scaled gradient step did not produce a new iterate;
172: attempt to use the gradient direction.
173: Need to make sure we are not using a different diagonal scaling */
174: PetscCall(MatLMVMSetJ0Scale(lmP->M, 1.0));
175: PetscCall(MatLMVMReset(lmP->M, PETSC_FALSE));
176: PetscCall(MatLMVMUpdate(lmP->M, tao->solution, tao->gradient));
177: PetscCall(MatSolve(lmP->M, lmP->GV, lmP->D));
179: PetscCall(ProjDirect_OWLQN(lmP->D, lmP->GV));
181: lmP->bfgs = 1;
182: ++lmP->grad;
183: stepType = OWLQN_GRADIENT;
184: break;
185: }
186: PetscCall(VecScale(lmP->D, -1.0));
188: /* Perform the linesearch */
189: PetscCall(TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status));
190: PetscCall(TaoAddLineSearchCounts(tao));
191: }
193: if ((int)ls_status < 0) {
194: /* Failed to find an improving point*/
195: f = fold;
196: PetscCall(VecCopy(lmP->Xold, tao->solution));
197: PetscCall(VecCopy(lmP->Gold, tao->gradient));
198: PetscCall(VecCopy(tao->gradient, lmP->GV));
199: step = 0.0;
200: } else {
201: /* a little hack here, because that gv is used to store g */
202: PetscCall(VecCopy(lmP->GV, tao->gradient));
203: }
205: PetscCall(ComputePseudoGrad_OWLQN(tao->solution, lmP->GV, lmP->lambda));
207: /* Check for termination */
209: PetscCall(VecNorm(lmP->GV, NORM_2, &gnorm));
211: iter++;
212: PetscCall(TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its));
213: PetscCall(TaoMonitor(tao, iter, f, gnorm, 0.0, step));
214: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
216: if ((int)ls_status < 0) break;
217: }
218: PetscFunctionReturn(PETSC_SUCCESS);
219: }
221: static PetscErrorCode TaoSetUp_OWLQN(Tao tao)
222: {
223: TAO_OWLQN *lmP = (TAO_OWLQN *)tao->data;
224: PetscInt n, N;
226: PetscFunctionBegin;
227: /* Existence of tao->solution checked in TaoSetUp() */
228: if (!tao->gradient) PetscCall(VecDuplicate(tao->solution, &tao->gradient));
229: if (!tao->stepdirection) PetscCall(VecDuplicate(tao->solution, &tao->stepdirection));
230: if (!lmP->D) PetscCall(VecDuplicate(tao->solution, &lmP->D));
231: if (!lmP->GV) PetscCall(VecDuplicate(tao->solution, &lmP->GV));
232: if (!lmP->Xold) PetscCall(VecDuplicate(tao->solution, &lmP->Xold));
233: if (!lmP->Gold) PetscCall(VecDuplicate(tao->solution, &lmP->Gold));
235: /* Create matrix for the limited memory approximation */
236: PetscCall(VecGetLocalSize(tao->solution, &n));
237: PetscCall(VecGetSize(tao->solution, &N));
238: PetscCall(MatCreateLMVMBFGS(((PetscObject)tao)->comm, n, N, &lmP->M));
239: PetscCall(MatLMVMAllocate(lmP->M, tao->solution, tao->gradient));
240: PetscFunctionReturn(PETSC_SUCCESS);
241: }
243: /* ---------------------------------------------------------- */
244: static PetscErrorCode TaoDestroy_OWLQN(Tao tao)
245: {
246: TAO_OWLQN *lmP = (TAO_OWLQN *)tao->data;
248: PetscFunctionBegin;
249: if (tao->setupcalled) {
250: PetscCall(VecDestroy(&lmP->Xold));
251: PetscCall(VecDestroy(&lmP->Gold));
252: PetscCall(VecDestroy(&lmP->D));
253: PetscCall(MatDestroy(&lmP->M));
254: PetscCall(VecDestroy(&lmP->GV));
255: }
256: PetscCall(PetscFree(tao->data));
257: PetscFunctionReturn(PETSC_SUCCESS);
258: }
260: /*------------------------------------------------------------*/
261: static PetscErrorCode TaoSetFromOptions_OWLQN(Tao tao, PetscOptionItems *PetscOptionsObject)
262: {
263: TAO_OWLQN *lmP = (TAO_OWLQN *)tao->data;
265: PetscFunctionBegin;
266: PetscOptionsHeadBegin(PetscOptionsObject, "Orthant-Wise Limited-memory method for Quasi-Newton unconstrained optimization");
267: PetscCall(PetscOptionsReal("-tao_owlqn_lambda", "regulariser weight", "", 100, &lmP->lambda, NULL));
268: PetscOptionsHeadEnd();
269: PetscCall(TaoLineSearchSetFromOptions(tao->linesearch));
270: PetscFunctionReturn(PETSC_SUCCESS);
271: }
273: /*------------------------------------------------------------*/
274: static PetscErrorCode TaoView_OWLQN(Tao tao, PetscViewer viewer)
275: {
276: TAO_OWLQN *lm = (TAO_OWLQN *)tao->data;
277: PetscBool isascii;
279: PetscFunctionBegin;
280: PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
281: if (isascii) {
282: PetscCall(PetscViewerASCIIPushTab(viewer));
283: PetscCall(PetscViewerASCIIPrintf(viewer, "BFGS steps: %" PetscInt_FMT "\n", lm->bfgs));
284: PetscCall(PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %" PetscInt_FMT "\n", lm->sgrad));
285: PetscCall(PetscViewerASCIIPrintf(viewer, "Gradient steps: %" PetscInt_FMT "\n", lm->grad));
286: PetscCall(PetscViewerASCIIPopTab(viewer));
287: }
288: PetscFunctionReturn(PETSC_SUCCESS);
289: }
291: /* ---------------------------------------------------------- */
292: /*MC
293: TAOOWLQN - orthant-wise limited memory quasi-newton algorithm
295: . - tao_owlqn_lambda - regulariser weight
297: Level: beginner
298: M*/
300: PETSC_EXTERN PetscErrorCode TaoCreate_OWLQN(Tao tao)
301: {
302: TAO_OWLQN *lmP;
303: const char *owarmijo_type = TAOLINESEARCHOWARMIJO;
305: PetscFunctionBegin;
306: tao->ops->setup = TaoSetUp_OWLQN;
307: tao->ops->solve = TaoSolve_OWLQN;
308: tao->ops->view = TaoView_OWLQN;
309: tao->ops->setfromoptions = TaoSetFromOptions_OWLQN;
310: tao->ops->destroy = TaoDestroy_OWLQN;
312: PetscCall(PetscNew(&lmP));
313: lmP->D = NULL;
314: lmP->M = NULL;
315: lmP->GV = NULL;
316: lmP->Xold = NULL;
317: lmP->Gold = NULL;
318: lmP->lambda = 1.0;
320: tao->data = (void *)lmP;
321: /* Override default settings (unless already changed) */
322: if (!tao->max_it_changed) tao->max_it = 2000;
323: if (!tao->max_funcs_changed) tao->max_funcs = 4000;
325: PetscCall(TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch));
326: PetscCall(PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1));
327: PetscCall(TaoLineSearchSetType(tao->linesearch, owarmijo_type));
328: PetscCall(TaoLineSearchUseTaoRoutines(tao->linesearch, tao));
329: PetscCall(TaoLineSearchSetOptionsPrefix(tao->linesearch, tao->hdr.prefix));
330: PetscFunctionReturn(PETSC_SUCCESS);
331: }