Actual source code: blmvm.c
petsc-3.12.5 2020-03-29
1: #include <petsctaolinesearch.h>
2: #include <../src/tao/unconstrained/impls/lmvm/lmvm.h>
3: #include <../src/tao/bound/impls/blmvm/blmvm.h>
5: /*------------------------------------------------------------*/
6: static PetscErrorCode TaoSolve_BLMVM(Tao tao)
7: {
8: PetscErrorCode ierr;
9: TAO_BLMVM *blmP = (TAO_BLMVM *)tao->data;
10: TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
11: PetscReal f, fold, gdx, gnorm, gnorm2;
12: PetscReal stepsize = 1.0,delta;
15: /* Project initial point onto bounds */
16: TaoComputeVariableBounds(tao);
17: VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);
18: TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);
21: /* Check convergence criteria */
22: TaoComputeObjectiveAndGradient(tao, tao->solution,&f,blmP->unprojected_gradient);
23: VecBoundGradientProjection(blmP->unprojected_gradient,tao->solution, tao->XL,tao->XU,tao->gradient);
25: TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);
26: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
28: tao->reason = TAO_CONTINUE_ITERATING;
29: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
30: TaoMonitor(tao,tao->niter,f,gnorm,0.0,stepsize);
31: (*tao->ops->convergencetest)(tao,tao->cnvP);
32: if (tao->reason != TAO_CONTINUE_ITERATING) return(0);
34: /* Set counter for gradient/reset steps */
35: if (!blmP->recycle) {
36: blmP->grad = 0;
37: blmP->reset = 0;
38: MatLMVMReset(blmP->M, PETSC_FALSE);
39: }
41: /* Have not converged; continue with Newton method */
42: while (tao->reason == TAO_CONTINUE_ITERATING) {
43: /* Call general purpose update function */
44: if (tao->ops->update) {
45: (*tao->ops->update)(tao, tao->niter, tao->user_update);
46: }
47: /* Compute direction */
48: gnorm2 = gnorm*gnorm;
49: if (gnorm2 == 0.0) gnorm2 = PETSC_MACHINE_EPSILON;
50: if (f == 0.0) {
51: delta = 2.0 / gnorm2;
52: } else {
53: delta = 2.0 * PetscAbsScalar(f) / gnorm2;
54: }
55: MatSymBrdnSetDelta(blmP->M, delta);
56: MatLMVMUpdate(blmP->M, tao->solution, tao->gradient);
57: MatSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);
58: VecBoundGradientProjection(tao->stepdirection,tao->solution,tao->XL,tao->XU,tao->gradient);
60: /* Check for success (descent direction) */
61: VecDot(blmP->unprojected_gradient, tao->gradient, &gdx);
62: if (gdx <= 0) {
63: /* Step is not descent or solve was not successful
64: Use steepest descent direction (scaled) */
65: ++blmP->grad;
67: MatLMVMReset(blmP->M, PETSC_FALSE);
68: MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);
69: MatSolve(blmP->M,blmP->unprojected_gradient, tao->stepdirection);
70: }
71: VecScale(tao->stepdirection,-1.0);
73: /* Perform the linesearch */
74: fold = f;
75: VecCopy(tao->solution, blmP->Xold);
76: VecCopy(blmP->unprojected_gradient, blmP->Gold);
77: TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
78: TaoLineSearchApply(tao->linesearch, tao->solution, &f, blmP->unprojected_gradient, tao->stepdirection, &stepsize, &ls_status);
79: TaoAddLineSearchCounts(tao);
81: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
82: /* Linesearch failed
83: Reset factors and use scaled (projected) gradient step */
84: ++blmP->reset;
86: f = fold;
87: VecCopy(blmP->Xold, tao->solution);
88: VecCopy(blmP->Gold, blmP->unprojected_gradient);
90: MatLMVMReset(blmP->M, PETSC_FALSE);
91: MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);
92: MatSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);
93: VecScale(tao->stepdirection, -1.0);
95: /* This may be incorrect; linesearch has values for stepmax and stepmin
96: that should be reset. */
97: TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
98: TaoLineSearchApply(tao->linesearch,tao->solution,&f, blmP->unprojected_gradient, tao->stepdirection, &stepsize, &ls_status);
99: TaoAddLineSearchCounts(tao);
101: if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
102: tao->reason = TAO_DIVERGED_LS_FAILURE;
103: break;
104: }
105: }
107: /* Check for converged */
108: VecBoundGradientProjection(blmP->unprojected_gradient, tao->solution, tao->XL, tao->XU, tao->gradient);
109: TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);
112: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Not-a-Number");
113: tao->niter++;
114: TaoLogConvergenceHistory(tao,f,gnorm,0.0,tao->ksp_its);
115: TaoMonitor(tao,tao->niter,f,gnorm,0.0,stepsize);
116: (*tao->ops->convergencetest)(tao,tao->cnvP);
117: }
118: return(0);
119: }
121: static PetscErrorCode TaoSetup_BLMVM(Tao tao)
122: {
123: TAO_BLMVM *blmP = (TAO_BLMVM *)tao->data;
127: /* Existence of tao->solution checked in TaoSetup() */
128: VecDuplicate(tao->solution,&blmP->Xold);
129: VecDuplicate(tao->solution,&blmP->Gold);
130: VecDuplicate(tao->solution, &blmP->unprojected_gradient);
132: if (!tao->stepdirection) {
133: VecDuplicate(tao->solution, &tao->stepdirection);
134: }
135: if (!tao->gradient) {
136: VecDuplicate(tao->solution,&tao->gradient);
137: }
138: if (!tao->XL) {
139: VecDuplicate(tao->solution,&tao->XL);
140: VecSet(tao->XL,PETSC_NINFINITY);
141: }
142: if (!tao->XU) {
143: VecDuplicate(tao->solution,&tao->XU);
144: VecSet(tao->XU,PETSC_INFINITY);
145: }
146: /* Allocate matrix for the limited memory approximation */
147: MatLMVMAllocate(blmP->M,tao->solution,blmP->unprojected_gradient);
149: /* If the user has set a matrix to solve as the initial H0, set the options prefix here, and set up the KSP */
150: if (blmP->H0) {
151: MatLMVMSetJ0(blmP->M, blmP->H0);
152: }
153: return(0);
154: }
156: /* ---------------------------------------------------------- */
157: static PetscErrorCode TaoDestroy_BLMVM(Tao tao)
158: {
159: TAO_BLMVM *blmP = (TAO_BLMVM *)tao->data;
163: if (tao->setupcalled) {
164: VecDestroy(&blmP->unprojected_gradient);
165: VecDestroy(&blmP->Xold);
166: VecDestroy(&blmP->Gold);
167: }
168: MatDestroy(&blmP->M);
169: if (blmP->H0) {
170: PetscObjectDereference((PetscObject)blmP->H0);
171: }
172: PetscFree(tao->data);
173: return(0);
174: }
176: /*------------------------------------------------------------*/
177: static PetscErrorCode TaoSetFromOptions_BLMVM(PetscOptionItems* PetscOptionsObject,Tao tao)
178: {
179: TAO_BLMVM *blmP = (TAO_BLMVM *)tao->data;
181: PetscBool is_spd;
184: PetscOptionsHead(PetscOptionsObject,"Limited-memory variable-metric method for bound constrained optimization");
185: PetscOptionsBool("-tao_blmvm_recycle","enable recycling of the BFGS matrix between subsequent TaoSolve() calls","",blmP->recycle,&blmP->recycle,NULL);
186: PetscOptionsTail();
187: TaoLineSearchSetFromOptions(tao->linesearch);
188: MatSetFromOptions(blmP->M);
189: MatGetOption(blmP->M, MAT_SPD, &is_spd);
190: if (!is_spd) SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_INCOMP, "LMVM matrix must be symmetric positive-definite");
191: return(0);
192: }
195: /*------------------------------------------------------------*/
196: static PetscErrorCode TaoView_BLMVM(Tao tao, PetscViewer viewer)
197: {
198: TAO_BLMVM *lmP = (TAO_BLMVM *)tao->data;
199: PetscBool isascii;
203: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
204: if (isascii) {
205: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", lmP->grad);
206: PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);
207: MatView(lmP->M, viewer);
208: PetscViewerPopFormat(viewer);
209: }
210: return(0);
211: }
213: static PetscErrorCode TaoComputeDual_BLMVM(Tao tao, Vec DXL, Vec DXU)
214: {
215: TAO_BLMVM *blm = (TAO_BLMVM *) tao->data;
222: if (!tao->gradient || !blm->unprojected_gradient) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Dual variables don't exist yet or no longer exist.\n");
224: VecCopy(tao->gradient,DXL);
225: VecAXPY(DXL,-1.0,blm->unprojected_gradient);
226: VecSet(DXU,0.0);
227: VecPointwiseMax(DXL,DXL,DXU);
229: VecCopy(blm->unprojected_gradient,DXU);
230: VecAXPY(DXU,-1.0,tao->gradient);
231: VecAXPY(DXU,1.0,DXL);
232: return(0);
233: }
235: /* ---------------------------------------------------------- */
236: /*MC
237: TAOBLMVM - Bounded limited memory variable metric is a quasi-Newton method
238: for nonlinear minimization with bound constraints. It is an extension
239: of TAOLMVM
241: Options Database Keys:
242: . -tao_lmm_recycle - enable recycling of LMVM information between subsequent TaoSolve calls
244: Level: beginner
245: M*/
246: PETSC_EXTERN PetscErrorCode TaoCreate_BLMVM(Tao tao)
247: {
248: TAO_BLMVM *blmP;
249: const char *morethuente_type = TAOLINESEARCHMT;
253: tao->ops->setup = TaoSetup_BLMVM;
254: tao->ops->solve = TaoSolve_BLMVM;
255: tao->ops->view = TaoView_BLMVM;
256: tao->ops->setfromoptions = TaoSetFromOptions_BLMVM;
257: tao->ops->destroy = TaoDestroy_BLMVM;
258: tao->ops->computedual = TaoComputeDual_BLMVM;
260: PetscNewLog(tao,&blmP);
261: blmP->H0 = NULL;
262: blmP->recycle = PETSC_FALSE;
263: tao->data = (void*)blmP;
265: /* Override default settings (unless already changed) */
266: if (!tao->max_it_changed) tao->max_it = 2000;
267: if (!tao->max_funcs_changed) tao->max_funcs = 4000;
269: TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
270: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
271: TaoLineSearchSetType(tao->linesearch, morethuente_type);
272: TaoLineSearchUseTaoRoutines(tao->linesearch,tao);
273: TaoLineSearchSetOptionsPrefix(tao->linesearch,tao->hdr.prefix);
274:
275: KSPInitializePackage();
276: MatCreate(((PetscObject)tao)->comm, &blmP->M);
277: MatSetType(blmP->M, MATLMVMBFGS);
278: PetscObjectIncrementTabLevel((PetscObject)blmP->M, (PetscObject)tao, 1);
279: MatSetOptionsPrefix(blmP->M, "tao_blmvm_");
280: return(0);
281: }
283: /*@
284: TaoLMVMRecycle - Enable/disable recycling of the QN history between subsequent TaoSolve calls.
286: Input Parameters:
287: + tao - the Tao solver context
288: - flg - Boolean flag for recycling (PETSC_TRUE or PETSC_FALSE)
290: Level: intermediate
291: @*/
292: PetscErrorCode TaoLMVMRecycle(Tao tao, PetscBool flg)
293: {
294: TAO_LMVM *lmP;
295: TAO_BLMVM *blmP;
296: TaoType type;
297: PetscBool is_lmvm, is_blmvm;
299:
301: TaoGetType(tao, &type);
302: PetscStrcmp(type, TAOLMVM, &is_lmvm);
303: PetscStrcmp(type, TAOBLMVM, &is_blmvm);
304:
305: if (is_lmvm) {
306: lmP = (TAO_LMVM *)tao->data;
307: lmP->recycle = flg;
308: } else if (is_blmvm) {
309: blmP = (TAO_BLMVM *)tao->data;
310: blmP->recycle = flg;
311: } else SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_WRONGSTATE, "This routine applies to TAO_LMVM and TAO_BLMVM.");
312: return(0);
313: }
315: /*@
316: TaoLMVMSetH0 - Set the initial Hessian for the QN approximation
318: Input Parameters:
319: + tao - the Tao solver context
320: - H0 - Mat object for the initial Hessian
322: Level: advanced
324: .seealso: TaoLMVMGetH0(), TaoLMVMGetH0KSP()
325: @*/
326: PetscErrorCode TaoLMVMSetH0(Tao tao, Mat H0)
327: {
328: TAO_LMVM *lmP;
329: TAO_BLMVM *blmP;
330: TaoType type;
331: PetscBool is_lmvm, is_blmvm;
333:
335: TaoGetType(tao, &type);
336: PetscStrcmp(type, TAOLMVM, &is_lmvm);
337: PetscStrcmp(type, TAOBLMVM, &is_blmvm);
339: if (is_lmvm) {
340: lmP = (TAO_LMVM *)tao->data;
341: PetscObjectReference((PetscObject)H0);
342: lmP->H0 = H0;
343: } else if (is_blmvm) {
344: blmP = (TAO_BLMVM *)tao->data;
345: PetscObjectReference((PetscObject)H0);
346: blmP->H0 = H0;
347: } else SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_WRONGSTATE, "This routine applies to TAO_LMVM and TAO_BLMVM.");
348: return(0);
349: }
351: /*@
352: TaoLMVMGetH0 - Get the matrix object for the QN initial Hessian
354: Input Parameters:
355: . tao - the Tao solver context
357: Output Parameters:
358: . H0 - Mat object for the initial Hessian
360: Level: advanced
362: .seealso: TaoLMVMSetH0(), TaoLMVMGetH0KSP()
363: @*/
364: PetscErrorCode TaoLMVMGetH0(Tao tao, Mat *H0)
365: {
366: TAO_LMVM *lmP;
367: TAO_BLMVM *blmP;
368: TaoType type;
369: PetscBool is_lmvm, is_blmvm;
370: Mat M;
373:
375: TaoGetType(tao, &type);
376: PetscStrcmp(type, TAOLMVM, &is_lmvm);
377: PetscStrcmp(type, TAOBLMVM, &is_blmvm);
379: if (is_lmvm) {
380: lmP = (TAO_LMVM *)tao->data;
381: M = lmP->M;
382: } else if (is_blmvm) {
383: blmP = (TAO_BLMVM *)tao->data;
384: M = blmP->M;
385: } else SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_WRONGSTATE, "This routine applies to TAO_LMVM and TAO_BLMVM.");
386: MatLMVMGetJ0(M, H0);
387: return(0);
388: }
390: /*@
391: TaoLMVMGetH0KSP - Get the iterative solver for applying the inverse of the QN initial Hessian
393: Input Parameters:
394: . tao - the Tao solver context
396: Output Parameters:
397: . ksp - KSP solver context for the initial Hessian
399: Level: advanced
401: .seealso: TaoLMVMGetH0(), TaoLMVMGetH0KSP()
402: @*/
403: PetscErrorCode TaoLMVMGetH0KSP(Tao tao, KSP *ksp)
404: {
405: TAO_LMVM *lmP;
406: TAO_BLMVM *blmP;
407: TaoType type;
408: PetscBool is_lmvm, is_blmvm;
409: Mat M;
412: TaoGetType(tao, &type);
413: PetscStrcmp(type, TAOLMVM, &is_lmvm);
414: PetscStrcmp(type, TAOBLMVM, &is_blmvm);
416: if (is_lmvm) {
417: lmP = (TAO_LMVM *)tao->data;
418: M = lmP->M;
419: } else if (is_blmvm) {
420: blmP = (TAO_BLMVM *)tao->data;
421: M = blmP->M;
422: } else SETERRQ(PetscObjectComm((PetscObject)tao), PETSC_ERR_ARG_WRONGSTATE, "This routine applies to TAO_LMVM and TAO_BLMVM.");
423: MatLMVMGetJ0KSP(M, ksp);
424: return(0);
425: }