Actual source code: almm.c
1: #include <../src/tao/constrained/impls/almm/almm.h>
2: #include <petsctao.h>
3: #include <petsc/private/petscimpl.h>
4: #include <petsc/private/vecimpl.h>
6: static PetscErrorCode TaoALMMCombinePrimal_Private(Tao,Vec,Vec,Vec);
7: static PetscErrorCode TaoALMMCombineDual_Private(Tao,Vec,Vec,Vec);
8: static PetscErrorCode TaoALMMSplitPrimal_Private(Tao,Vec,Vec,Vec);
9: static PetscErrorCode TaoALMMComputeOptimalityNorms_Private(Tao);
10: static PetscErrorCode TaoALMMComputeAugLagAndGradient_Private(Tao);
11: static PetscErrorCode TaoALMMComputePHRLagAndGradient_Private(Tao);
13: static PetscErrorCode TaoSolve_ALMM(Tao tao)
14: {
15: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
16: TaoConvergedReason reason;
17: PetscReal updated;
19: /* reset initial multiplier/slack guess */
20: if (!tao->recycle) {
21: if (tao->ineq_constrained) {
22: VecZeroEntries(auglag->Ps);
23: TaoALMMCombinePrimal_Private(tao, auglag->Px, auglag->Ps, auglag->P);
24: VecZeroEntries(auglag->Yi);
25: }
26: if (tao->eq_constrained) {
27: VecZeroEntries(auglag->Ye);
28: }
29: }
31: /* compute initial nonlinear Lagrangian and its derivatives */
32: (*auglag->sub_obj)(tao);
33: TaoALMMComputeOptimalityNorms_Private(tao);
34: /* print initial step and check convergence */
35: PetscInfo(tao,"Solving with %s formulation\n",TaoALMMTypes[auglag->type]);
36: TaoLogConvergenceHistory(tao, auglag->Lval, auglag->gnorm, auglag->cnorm, tao->ksp_its);
37: TaoMonitor(tao, tao->niter, auglag->fval, auglag->gnorm, auglag->cnorm, 0.0);
38: (*tao->ops->convergencetest)(tao, tao->cnvP);
39: /* set initial penalty factor and inner solver tolerance */
40: switch (auglag->type) {
41: case TAO_ALMM_CLASSIC:
42: auglag->mu = auglag->mu0;
43: break;
44: case TAO_ALMM_PHR:
45: auglag->cenorm = 0.0;
46: if (tao->eq_constrained) {
47: VecDot(auglag->Ce, auglag->Ce, &auglag->cenorm);
48: }
49: auglag->cinorm = 0.0;
50: if (tao->ineq_constrained) {
51: VecCopy(auglag->Ci, auglag->Ciwork);
52: VecScale(auglag->Ciwork, -1.0);
53: VecPointwiseMax(auglag->Ciwork, auglag->Cizero, auglag->Ciwork);
54: VecDot(auglag->Ciwork, auglag->Ciwork, &auglag->cinorm);
55: }
56: /* determine initial penalty factor based on the balance of constraint violation and objective function value */
57: auglag->mu = PetscMax(1.e-6, PetscMin(10.0, 2.0*PetscAbsReal(auglag->fval)/(auglag->cenorm + auglag->cinorm)));
58: break;
59: default:
60: break;
61: }
62: auglag->gtol = auglag->gtol0;
63: PetscInfo(tao,"Initial penalty: %.2f\n",auglag->mu);
65: /* start aug-lag outer loop */
66: while (tao->reason == TAO_CONTINUE_ITERATING) {
67: ++tao->niter;
68: /* update subsolver tolerance */
69: PetscInfo(tao,"Subsolver tolerance: ||G|| <= %e\n",auglag->gtol);
70: TaoSetTolerances(auglag->subsolver, auglag->gtol, 0.0, 0.0);
71: /* solve the bound-constrained or unconstrained subproblem */
72: TaoSolve(auglag->subsolver);
73: TaoGetConvergedReason(auglag->subsolver, &reason);
74: tao->ksp_its += auglag->subsolver->ksp_its;
75: if (reason != TAO_CONVERGED_GATOL) {
76: PetscInfo(tao,"Subsolver failed to converge, reason: %s\n",TaoConvergedReasons[reason]);
77: }
78: /* evaluate solution and test convergence */
79: (*auglag->sub_obj)(tao);
80: TaoALMMComputeOptimalityNorms_Private(tao);
81: /* decide whether to update multipliers or not */
82: updated = 0.0;
83: if (auglag->cnorm <= auglag->ytol) {
84: PetscInfo(tao,"Multipliers updated: ||C|| <= %e\n",auglag->ytol);
85: /* constraints are good, update multipliers and convergence tolerances */
86: if (tao->eq_constrained) {
87: VecAXPY(auglag->Ye, auglag->mu, auglag->Ce);
88: VecSet(auglag->Cework, auglag->ye_max);
89: VecPointwiseMin(auglag->Ye, auglag->Cework, auglag->Ye);
90: VecSet(auglag->Cework, auglag->ye_min);
91: VecPointwiseMax(auglag->Ye, auglag->Cework, auglag->Ye);
92: }
93: if (tao->ineq_constrained) {
94: VecAXPY(auglag->Yi, auglag->mu, auglag->Ci);
95: VecSet(auglag->Ciwork, auglag->yi_max);
96: VecPointwiseMin(auglag->Yi, auglag->Ciwork, auglag->Yi);
97: VecSet(auglag->Ciwork, auglag->yi_min);
98: VecPointwiseMax(auglag->Yi, auglag->Ciwork, auglag->Yi);
99: }
100: /* tolerances are updated only for non-PHR methods */
101: if (auglag->type != TAO_ALMM_PHR) {
102: auglag->ytol = PetscMax(tao->catol, auglag->ytol/PetscPowReal(auglag->mu, auglag->mu_pow_good));
103: auglag->gtol = PetscMax(tao->gatol, auglag->gtol/auglag->mu);
104: }
105: updated = 1.0;
106: } else {
107: /* constraints are bad, update penalty factor */
108: auglag->mu = PetscMin(auglag->mu_max, auglag->mu_fac*auglag->mu);
109: /* tolerances are reset only for non-PHR methods */
110: if (auglag->type != TAO_ALMM_PHR) {
111: auglag->ytol = PetscMax(tao->catol, 0.1/PetscPowReal(auglag->mu, auglag->mu_pow_bad));
112: auglag->gtol = PetscMax(tao->gatol, 1.0/auglag->mu);
113: }
114: PetscInfo(tao,"Penalty increased: mu = %.2f\n",auglag->mu);
115: }
116: TaoLogConvergenceHistory(tao, auglag->fval, auglag->gnorm, auglag->cnorm, tao->ksp_its);
117: TaoMonitor(tao, tao->niter, auglag->fval, auglag->gnorm, auglag->cnorm, updated);
118: (*tao->ops->convergencetest)(tao, tao->cnvP);
119: }
121: return 0;
122: }
124: static PetscErrorCode TaoView_ALMM(Tao tao,PetscViewer viewer)
125: {
126: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
127: PetscBool isascii;
129: PetscViewerASCIIPushTab(viewer);
130: TaoView(auglag->subsolver,viewer);
131: PetscViewerASCIIPopTab(viewer);
132: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
133: if (isascii) {
134: PetscViewerASCIIPushTab(viewer);
135: PetscViewerASCIIPrintf(viewer, "ALMM Formulation Type: %s\n", TaoALMMTypes[auglag->type]);
136: PetscViewerASCIIPopTab(viewer);
137: }
138: return 0;
139: }
141: static PetscErrorCode TaoSetUp_ALMM(Tao tao)
142: {
143: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
144: VecType vec_type;
145: Vec SL, SU;
146: PetscBool is_cg = PETSC_FALSE, is_lmvm = PETSC_FALSE;
150: TaoComputeVariableBounds(tao);
151: /* alias base vectors and create extras */
152: VecGetType(tao->solution, &vec_type);
153: auglag->Px = tao->solution;
154: if (!tao->gradient) { /* base gradient */
155: VecDuplicate(tao->solution, &tao->gradient);
156: }
157: auglag->LgradX = tao->gradient;
158: if (!auglag->Xwork) { /* opt var work vector */
159: VecDuplicate(tao->solution, &auglag->Xwork);
160: }
161: if (tao->eq_constrained) {
162: auglag->Ce = tao->constraints_equality;
163: auglag->Ae = tao->jacobian_equality;
164: if (!auglag->Ye) { /* equality multipliers */
165: VecDuplicate(auglag->Ce, &auglag->Ye);
166: }
167: if (!auglag->Cework) {
168: VecDuplicate(auglag->Ce, &auglag->Cework);
169: }
170: }
171: if (tao->ineq_constrained) {
172: auglag->Ci = tao->constraints_inequality;
173: auglag->Ai = tao->jacobian_inequality;
174: if (!auglag->Yi) { /* inequality multipliers */
175: VecDuplicate(auglag->Ci, &auglag->Yi);
176: }
177: if (!auglag->Ciwork) {
178: VecDuplicate(auglag->Ci, &auglag->Ciwork);
179: }
180: if (!auglag->Cizero) {
181: VecDuplicate(auglag->Ci, &auglag->Cizero);
182: VecZeroEntries(auglag->Cizero);
183: }
184: if (!auglag->Ps) { /* slack vars */
185: VecDuplicate(auglag->Ci, &auglag->Ps);
186: }
187: if (!auglag->LgradS) { /* slack component of Lagrangian gradient */
188: VecDuplicate(auglag->Ci, &auglag->LgradS);
189: }
190: /* create vector for combined primal space and the associated communication objects */
191: if (!auglag->P) {
192: PetscMalloc1(2, &auglag->Parr);
193: auglag->Parr[0] = auglag->Px; auglag->Parr[1] = auglag->Ps;
194: VecConcatenate(2, auglag->Parr, &auglag->P, &auglag->Pis);
195: PetscMalloc1(2, &auglag->Pscatter);
196: VecScatterCreate(auglag->P, auglag->Pis[0], auglag->Px, NULL, &auglag->Pscatter[0]);
197: VecScatterCreate(auglag->P, auglag->Pis[1], auglag->Ps, NULL, &auglag->Pscatter[1]);
198: }
199: if (tao->eq_constrained) {
200: /* create vector for combined dual space and the associated communication objects */
201: if (!auglag->Y) {
202: PetscMalloc1(2, &auglag->Yarr);
203: auglag->Yarr[0] = auglag->Ye; auglag->Yarr[1] = auglag->Yi;
204: VecConcatenate(2, auglag->Yarr, &auglag->Y, &auglag->Yis);
205: PetscMalloc1(2, &auglag->Yscatter);
206: VecScatterCreate(auglag->Y, auglag->Yis[0], auglag->Ye, NULL, &auglag->Yscatter[0]);
207: VecScatterCreate(auglag->Y, auglag->Yis[1], auglag->Yi, NULL, &auglag->Yscatter[1]);
208: }
209: if (!auglag->C) {
210: VecDuplicate(auglag->Y, &auglag->C);
211: }
212: } else {
213: if (!auglag->C) {
214: auglag->C = auglag->Ci;
215: }
216: if (!auglag->Y) {
217: auglag->Y = auglag->Yi;
218: }
219: }
220: } else {
221: if (!auglag->P) {
222: auglag->P = auglag->Px;
223: }
224: if (!auglag->G) {
225: auglag->G = auglag->LgradX;
226: }
227: if (!auglag->C) {
228: auglag->C = auglag->Ce;
229: }
230: if (!auglag->Y) {
231: auglag->Y = auglag->Ye;
232: }
233: }
234: /* initialize parameters */
235: if (auglag->type == TAO_ALMM_PHR) {
236: auglag->mu_fac = 10.0;
237: auglag->yi_min = 0.0;
238: auglag->ytol0 = 0.5;
239: auglag->gtol0 = tao->gatol;
240: if (tao->gatol_changed && tao->catol_changed) {
241: PetscInfo(tao,"TAOALMM with PHR: different gradient and constraint tolerances are not supported, setting catol = gatol\n");
242: tao->catol = tao->gatol;
243: }
244: }
245: /* set the Lagrangian formulation type for the subsolver */
246: switch (auglag->type) {
247: case TAO_ALMM_CLASSIC:
248: auglag->sub_obj = TaoALMMComputeAugLagAndGradient_Private;
249: break;
250: case TAO_ALMM_PHR:
251: auglag->sub_obj = TaoALMMComputePHRLagAndGradient_Private;
252: break;
253: default:
254: break;
255: }
256: /* set up the subsolver */
257: TaoSetSolution(auglag->subsolver, auglag->P);
258: TaoSetObjective(auglag->subsolver, TaoALMMSubsolverObjective_Private, (void*)auglag);
259: TaoSetObjectiveAndGradient(auglag->subsolver, NULL, TaoALMMSubsolverObjectiveAndGradient_Private, (void*)auglag);
260: if (tao->bounded) {
261: /* make sure that the subsolver is a bound-constrained method */
262: PetscObjectTypeCompare((PetscObject)auglag->subsolver, TAOCG, &is_cg);
263: PetscObjectTypeCompare((PetscObject)auglag->subsolver, TAOLMVM, &is_lmvm);
264: if (is_cg) {
265: TaoSetType(auglag->subsolver, TAOBNCG);
266: PetscInfo(tao,"TAOCG detected for bound-constrained problem, switching to TAOBNCG instead.");
267: }
268: if (is_lmvm) {
269: TaoSetType(auglag->subsolver, TAOBQNLS);
270: PetscInfo(tao,"TAOLMVM detected for bound-constrained problem, switching to TAOBQNLS instead.");
271: }
272: /* create lower and upper bound clone vectors for subsolver */
273: if (!auglag->PL) {
274: VecDuplicate(auglag->P, &auglag->PL);
275: }
276: if (!auglag->PU) {
277: VecDuplicate(auglag->P, &auglag->PU);
278: }
279: if (tao->ineq_constrained) {
280: /* create lower and upper bounds for slack, set lower to 0 */
281: VecDuplicate(auglag->Ci, &SL);
282: VecSet(SL, 0.0);
283: VecDuplicate(auglag->Ci, &SU);
284: VecSet(SU, PETSC_INFINITY);
285: /* combine opt var bounds with slack bounds */
286: TaoALMMCombinePrimal_Private(tao, tao->XL, SL, auglag->PL);
287: TaoALMMCombinePrimal_Private(tao, tao->XU, SU, auglag->PU);
288: /* destroy work vectors */
289: VecDestroy(&SL);
290: VecDestroy(&SU);
291: } else {
292: /* no inequality constraints, just copy bounds into the subsolver */
293: VecCopy(tao->XL, auglag->PL);
294: VecCopy(tao->XU, auglag->PU);
295: }
296: TaoSetVariableBounds(auglag->subsolver, auglag->PL, auglag->PU);
297: }
298: TaoSetUp(auglag->subsolver);
299: auglag->G = auglag->subsolver->gradient;
301: return 0;
302: }
304: static PetscErrorCode TaoDestroy_ALMM(Tao tao)
305: {
306: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
308: TaoDestroy(&auglag->subsolver);
309: if (tao->setupcalled) {
310: VecDestroy(&auglag->Xwork); /* opt work */
311: if (tao->eq_constrained) {
312: VecDestroy(&auglag->Ye); /* equality multipliers */
313: VecDestroy(&auglag->Cework); /* equality work vector */
314: }
315: if (tao->ineq_constrained) {
316: VecDestroy(&auglag->Ps); /* slack vars */
317: auglag->Parr[0] = NULL; /* clear pointer to tao->solution, will be destroyed by TaoDestroy() shell */
318: PetscFree(auglag->Parr); /* array of primal vectors */
319: VecDestroy(&auglag->LgradS); /* slack grad */
320: VecDestroy(&auglag->Cizero); /* zero vector for pointwise max */
321: VecDestroy(&auglag->Yi); /* inequality multipliers */
322: VecDestroy(&auglag->Ciwork); /* inequality work vector */
323: VecDestroy(&auglag->P); /* combo primal */
324: ISDestroy(&auglag->Pis[0]); /* index set for X inside P */
325: ISDestroy(&auglag->Pis[1]); /* index set for S inside P */
326: PetscFree(auglag->Pis); /* array of P index sets */
327: VecScatterDestroy(&auglag->Pscatter[0]);
328: VecScatterDestroy(&auglag->Pscatter[1]);
329: PetscFree(auglag->Pscatter);
330: if (tao->eq_constrained) {
331: VecDestroy(&auglag->Y); /* combo multipliers */
332: PetscFree(auglag->Yarr); /* array of dual vectors */
333: VecDestroy(&auglag->C); /* combo constraints */
334: ISDestroy(&auglag->Yis[0]); /* index set for Ye inside Y */
335: ISDestroy(&auglag->Yis[1]); /* index set for Yi inside Y */
336: PetscFree(auglag->Yis);
337: VecScatterDestroy(&auglag->Yscatter[0]);
338: VecScatterDestroy(&auglag->Yscatter[1]);
339: PetscFree(auglag->Yscatter);
340: }
341: }
342: if (tao->bounded) {
343: VecDestroy(&auglag->PL); /* lower bounds for subsolver */
344: VecDestroy(&auglag->PU); /* upper bounds for subsolver */
345: }
346: }
347: PetscFree(tao->data);
348: return 0;
349: }
351: static PetscErrorCode TaoSetFromOptions_ALMM(PetscOptionItems *PetscOptionsObject,Tao tao)
352: {
353: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
354: PetscInt i;
356: PetscOptionsHead(PetscOptionsObject,"Augmented Lagrangian multiplier method solves problems with general constraints by converting them into a sequence of unconstrained problems.");
357: PetscOptionsReal("-tao_almm_mu_init","initial penalty parameter","",auglag->mu0,&auglag->mu0,NULL);
358: PetscOptionsReal("-tao_almm_mu_factor","increase factor for the penalty parameter","",auglag->mu_fac,&auglag->mu_fac,NULL);
359: PetscOptionsReal("-tao_almm_mu_power_good","exponential for penalty parameter when multiplier update is accepted","",auglag->mu_pow_good,&auglag->mu_pow_good,NULL);
360: PetscOptionsReal("-tao_almm_mu_power_bad","exponential for penalty parameter when multiplier update is rejected","",auglag->mu_pow_bad,&auglag->mu_pow_bad,NULL);
361: PetscOptionsReal("-tao_almm_mu_max","maximum safeguard for penalty parameter updates","",auglag->mu_max,&auglag->mu_max,NULL);
362: PetscOptionsReal("-tao_almm_ye_min","minimum safeguard for equality multiplier updates","",auglag->ye_min,&auglag->ye_min,NULL);
363: PetscOptionsReal("-tao_almm_ye_max","maximum safeguard for equality multipliers updates","",auglag->ye_max,&auglag->ye_max,NULL);
364: PetscOptionsReal("-tao_almm_yi_min","minimum safeguard for inequality multipliers updates","",auglag->yi_min,&auglag->yi_min,NULL);
365: PetscOptionsReal("-tao_almm_yi_max","maximum safeguard for inequality multipliers updates","",auglag->yi_max,&auglag->yi_max,NULL);
366: PetscOptionsEnum("-tao_almm_type","augmented Lagrangian formulation type for the subproblem","TaoALMMType",TaoALMMTypes,(PetscEnum)auglag->type,(PetscEnum*)&auglag->type,NULL);
367: PetscOptionsTail();
368: TaoSetOptionsPrefix(auglag->subsolver,((PetscObject)tao)->prefix);
369: TaoAppendOptionsPrefix(auglag->subsolver,"tao_almm_subsolver_");
370: TaoSetFromOptions(auglag->subsolver);
371: for (i=0; i<tao->numbermonitors; i++) {
372: PetscObjectReference((PetscObject)tao->monitorcontext[i]);
373: TaoSetMonitor(auglag->subsolver, tao->monitor[i], tao->monitorcontext[i], tao->monitordestroy[i]);
374: if (tao->monitor[i] == TaoMonitorDefault || tao->monitor[i] == TaoDefaultCMonitor || tao->monitor[i] == TaoDefaultGMonitor || tao->monitor[i] == TaoDefaultSMonitor) {
375: auglag->info = PETSC_TRUE;
376: }
377: }
378: return 0;
379: }
381: /* -------------------------------------------------------- */
383: /*MC
384: TaoALMM - Augmented Lagrangian multiplier method for solving nonlinear optimization problems with general constraints.
386: Options Database Keys:
387: + -tao_almm_mu_init <real> - initial penalty parameter (default: 10.)
388: . -tao_almm_mu_factor <real> - increase factor for the penalty parameter (default: 100.)
389: . -tao_almm_mu_max <real> - maximum safeguard for penalty parameter updates (default: 1.e20)
390: . -tao_almm_mu_power_good <real> - exponential for penalty parameter when multiplier update is accepted (default: 0.9)
391: . -tao_almm_mu_power_bad <real> - exponential for penalty parameter when multiplier update is rejected (default: 0.1)
392: . -tao_almm_ye_min <real> - minimum safeguard for equality multiplier updates (default: -1.e20)
393: . -tao_almm_ye_max <real> - maximum safeguard for equality multiplier updates (default: 1.e20)
394: . -tao_almm_yi_min <real> - minimum safeguard for inequality multiplier updates (default: -1.e20)
395: . -tao_almm_yi_max <real> - maximum safeguard for inequality multiplier updates (default: 1.e20)
396: - -tao_almm_type <classic,phr> - change formulation of the augmented Lagrangian merit function for the subproblem (default: classic)
398: Level: beginner
400: Notes:
401: This method converts a constrained problem into a sequence of unconstrained problems via the augmented
402: Lagrangian merit function. Bound constraints are pushed down to the subproblem without any modifications.
404: Two formulations are offered for the subproblem: canonical Hestenes-Powell augmented Lagrangian with slack
405: variables for inequality constraints, and a slack-less Powell-Hestenes-Rockafellar (PHR) formulation utilizing a
406: pointwise max() penalty on inequality constraints. The canonical augmented Lagrangian formulation typically
407: converges faster for most problems. However, PHR may be desirable for problems featuring a large number
408: of inequality constraints because it avoids inflating the size of the subproblem with slack variables.
410: The subproblem is solved using a nested first-order TAO solver. The user can retrieve a pointer to
411: the subsolver via TaoALMMGetSubsolver() or pass command line arguments to it using the
412: "-tao_almm_subsolver_" prefix. Currently, TaoALMM does not support second-order methods for the
413: subproblem. It is also highly recommended that the subsolver chosen by the user utilize a trust-region
414: strategy for globalization (default: TAOBQNKTR) especially if the outer problem features bound constraints.
416: .vb
417: while unconverged
418: solve argmin_x L(x) s.t. l <= x <= u
419: if ||c|| <= y_tol
420: if ||c|| <= c_tol && ||Lgrad|| <= g_tol:
421: problem converged, return solution
422: else
423: constraints sufficiently improved
424: update multipliers and tighten tolerances
425: endif
426: else
427: constraints did not improve
428: update penalty and loosen tolerances
429: endif
430: endwhile
431: .ve
433: .seealso: TaoALMMGetType(), TaoALMMSetType(), TaoALMMSetSubsolver(), TaoALMMGetSubsolver(),
434: TaoALMMGetMultipliers(), TaoALMMSetMultipliers(), TaoALMMGetPrimalIS(), TaoALMMGetDualIS()
435: M*/
436: PETSC_EXTERN PetscErrorCode TaoCreate_ALMM(Tao tao)
437: {
438: TAO_ALMM *auglag;
440: PetscNewLog(tao, &auglag);
442: tao->ops->destroy = TaoDestroy_ALMM;
443: tao->ops->setup = TaoSetUp_ALMM;
444: tao->ops->setfromoptions = TaoSetFromOptions_ALMM;
445: tao->ops->view = TaoView_ALMM;
446: tao->ops->solve = TaoSolve_ALMM;
448: tao->gatol = 1.e-5;
449: tao->grtol = 0.0;
450: tao->gttol = 0.0;
451: tao->catol = 1.e-5;
452: tao->crtol = 0.0;
454: tao->data = (void*)auglag;
455: auglag->parent = tao;
456: auglag->mu0 = 10.0;
457: auglag->mu = auglag->mu0;
458: auglag->mu_fac = 10.0;
459: auglag->mu_max = PETSC_INFINITY;
460: auglag->mu_pow_good = 0.9;
461: auglag->mu_pow_bad = 0.1;
462: auglag->ye_min = PETSC_NINFINITY;
463: auglag->ye_max = PETSC_INFINITY;
464: auglag->yi_min = PETSC_NINFINITY;
465: auglag->yi_max = PETSC_INFINITY;
466: auglag->ytol0 = 0.1/PetscPowReal(auglag->mu0, auglag->mu_pow_bad);
467: auglag->ytol = auglag->ytol0;
468: auglag->gtol0 = 1.0/auglag->mu0;
469: auglag->gtol = auglag->gtol0;
471: auglag->sub_obj = TaoALMMComputeAugLagAndGradient_Private;
472: auglag->type = TAO_ALMM_CLASSIC;
473: auglag->info = PETSC_FALSE;
475: TaoCreate(PetscObjectComm((PetscObject)tao),&auglag->subsolver);
476: TaoSetType(auglag->subsolver, TAOBQNKTR);
477: TaoSetTolerances(auglag->subsolver, auglag->gtol, 0.0, 0.0);
478: TaoSetMaximumIterations(auglag->subsolver, 1000);
479: TaoSetMaximumFunctionEvaluations(auglag->subsolver, 10000);
480: TaoSetFunctionLowerBound(auglag->subsolver, PETSC_NINFINITY);
481: PetscObjectIncrementTabLevel((PetscObject)auglag->subsolver,(PetscObject)tao,1);
483: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetType_C", TaoALMMGetType_Private);
484: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetType_C", TaoALMMSetType_Private);
485: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetSubsolver_C", TaoALMMGetSubsolver_Private);
486: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetSubsolver_C", TaoALMMSetSubsolver_Private);
487: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetMultipliers_C", TaoALMMGetMultipliers_Private);
488: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetMultipliers_C", TaoALMMSetMultipliers_Private);
489: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetPrimalIS_C", TaoALMMGetPrimalIS_Private);
490: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetDualIS_C", TaoALMMGetDualIS_Private);
491: return 0;
492: }
494: static PetscErrorCode TaoALMMCombinePrimal_Private(Tao tao, Vec X, Vec S, Vec P)
495: {
496: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
498: if (tao->ineq_constrained) {
499: VecScatterBegin(auglag->Pscatter[0], X, P, INSERT_VALUES, SCATTER_REVERSE);
500: VecScatterEnd(auglag->Pscatter[0], X, P, INSERT_VALUES, SCATTER_REVERSE);
501: VecScatterBegin(auglag->Pscatter[1], S, P, INSERT_VALUES, SCATTER_REVERSE);
502: VecScatterEnd(auglag->Pscatter[1], S, P, INSERT_VALUES, SCATTER_REVERSE);
503: } else {
504: VecCopy(X, P);
505: }
506: return 0;
507: }
509: static PetscErrorCode TaoALMMCombineDual_Private(Tao tao, Vec EQ, Vec IN, Vec Y)
510: {
511: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
513: if (tao->eq_constrained) {
514: if (tao->ineq_constrained) {
515: VecScatterBegin(auglag->Yscatter[0], EQ, Y, INSERT_VALUES, SCATTER_REVERSE);
516: VecScatterEnd(auglag->Yscatter[0], EQ, Y, INSERT_VALUES, SCATTER_REVERSE);
517: VecScatterBegin(auglag->Yscatter[1], IN, Y, INSERT_VALUES, SCATTER_REVERSE);
518: VecScatterEnd(auglag->Yscatter[1], IN, Y, INSERT_VALUES, SCATTER_REVERSE);
519: } else {
520: VecCopy(EQ, Y);
521: }
522: } else {
523: VecCopy(IN, Y);
524: }
525: return 0;
526: }
528: static PetscErrorCode TaoALMMSplitPrimal_Private(Tao tao, Vec P, Vec X, Vec S)
529: {
530: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
532: if (tao->ineq_constrained) {
533: VecScatterBegin(auglag->Pscatter[0], P, X, INSERT_VALUES, SCATTER_FORWARD);
534: VecScatterEnd(auglag->Pscatter[0], P, X, INSERT_VALUES, SCATTER_FORWARD);
535: VecScatterBegin(auglag->Pscatter[1], P, S, INSERT_VALUES, SCATTER_FORWARD);
536: VecScatterEnd(auglag->Pscatter[1], P, S, INSERT_VALUES, SCATTER_FORWARD);
537: } else {
538: VecCopy(P, X);
539: }
540: return 0;
541: }
543: /* this assumes that the latest constraints are stored in Ce and Ci, and also combined in C */
544: static PetscErrorCode TaoALMMComputeOptimalityNorms_Private(Tao tao)
545: {
546: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
548: /* if bounded, project the gradient */
549: if (tao->bounded) {
550: VecBoundGradientProjection(auglag->LgradX, auglag->Px, tao->XL, tao->XU, auglag->LgradX);
551: }
552: if (auglag->type == TAO_ALMM_PHR) {
553: VecNorm(auglag->LgradX, NORM_INFINITY, &auglag->gnorm);
554: auglag->cenorm = 0.0;
555: if (tao->eq_constrained) {
556: VecNorm(auglag->Ce, NORM_INFINITY, &auglag->cenorm);
557: }
558: auglag->cinorm = 0.0;
559: if (tao->ineq_constrained) {
560: VecCopy(auglag->Yi, auglag->Ciwork);
561: VecScale(auglag->Ciwork, -1.0/auglag->mu);
562: VecPointwiseMax(auglag->Ciwork, auglag->Ci, auglag->Ciwork);
563: VecNorm(auglag->Ciwork, NORM_INFINITY, &auglag->cinorm);
564: }
565: auglag->cnorm_old = auglag->cnorm;
566: auglag->cnorm = PetscMax(auglag->cenorm, auglag->cinorm);
567: auglag->ytol = auglag->ytol0 * auglag->cnorm_old;
568: } else {
569: VecNorm(auglag->LgradX, NORM_2, &auglag->gnorm);
570: VecNorm(auglag->C, NORM_2, &auglag->cnorm);
571: }
572: return 0;
573: }
575: static PetscErrorCode TaoALMMEvaluateIterate_Private(Tao tao, Vec P)
576: {
577: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
579: /* split solution into primal and slack components */
580: TaoALMMSplitPrimal_Private(tao, auglag->P, auglag->Px, auglag->Ps);
582: /* compute f, df/dx and the constraints */
583: TaoComputeObjectiveAndGradient(tao, auglag->Px, &auglag->fval, auglag->LgradX);
584: if (tao->eq_constrained) {
585: TaoComputeEqualityConstraints(tao, auglag->Px, auglag->Ce);
586: TaoComputeJacobianEquality(tao, auglag->Px, auglag->Ae, auglag->Ae);
587: }
588: if (tao->ineq_constrained) {
589: TaoComputeInequalityConstraints(tao, auglag->Px, auglag->Ci);
590: TaoComputeJacobianInequality(tao, auglag->Px, auglag->Ai, auglag->Ai);
591: switch (auglag->type) {
592: case TAO_ALMM_CLASSIC:
593: /* classic formulation converts inequality to equality constraints via slack variables */
594: VecAXPY(auglag->Ci, -1.0, auglag->Ps);
595: break;
596: case TAO_ALMM_PHR:
597: /* PHR is based on Ci <= 0 while TAO defines Ci >= 0 so we hit it with a negative sign */
598: VecScale(auglag->Ci, -1.0);
599: MatScale(auglag->Ai, -1.0);
600: break;
601: default:
602: break;
603: }
604: }
605: /* combine constraints into one vector */
606: TaoALMMCombineDual_Private(tao, auglag->Ce, auglag->Ci, auglag->C);
607: return 0;
608: }
610: /*
611: Lphr = f + 0.5*mu*[ (Ce + Ye/mu)^T (Ce + Ye/mu) + pmin(0, Ci + Yi/mu)^T pmin(0, Ci + Yi/mu)]
613: dLphr/dX = dF/dX + mu*[ (Ce + Ye/mu)^T Ae + pmin(0, Ci + Yi/mu)^T Ai]
615: dLphr/dS = 0
616: */
617: static PetscErrorCode TaoALMMComputePHRLagAndGradient_Private(Tao tao)
618: {
619: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
620: PetscReal eq_norm=0.0, ineq_norm=0.0;
622: TaoALMMEvaluateIterate_Private(tao, auglag->P);
623: if (tao->eq_constrained) {
624: /* Ce_work = mu*(Ce + Ye/mu) */
625: VecWAXPY(auglag->Cework, 1.0/auglag->mu, auglag->Ye, auglag->Ce);
626: VecDot(auglag->Cework, auglag->Cework, &eq_norm); /* contribution to scalar Lagrangian */
627: VecScale(auglag->Cework, auglag->mu);
628: /* dL/dX += mu*(Ce + Ye/mu)^T Ae */
629: MatMultTransposeAdd(auglag->Ae, auglag->Cework, auglag->LgradX, auglag->LgradX);
630: }
631: if (tao->ineq_constrained) {
632: /* Ci_work = mu * pmax(0, Ci + Yi/mu) where pmax() is pointwise max() */
633: VecWAXPY(auglag->Ciwork, 1.0/auglag->mu, auglag->Yi, auglag->Ci);
634: VecPointwiseMax(auglag->Ciwork, auglag->Cizero, auglag->Ciwork);
635: VecDot(auglag->Ciwork, auglag->Ciwork, &ineq_norm); /* contribution to scalar Lagrangian */
636: /* dL/dX += mu * pmax(0, Ci + Yi/mu)^T Ai */
637: VecScale(auglag->Ciwork, auglag->mu);
638: MatMultTransposeAdd(auglag->Ai, auglag->Ciwork, auglag->LgradX, auglag->LgradX);
639: /* dL/dS = 0 because there are no slacks in PHR */
640: VecZeroEntries(auglag->LgradS);
641: }
642: /* combine gradient together */
643: TaoALMMCombinePrimal_Private(tao, auglag->LgradX, auglag->LgradS, auglag->G);
644: /* compute L = f + 0.5 * mu * [(Ce + Ye/mu)^T (Ce + Ye/mu) + pmax(0, Ci + Yi/mu)^T pmax(0, Ci + Yi/mu)] */
645: auglag->Lval = auglag->fval + 0.5*auglag->mu*(eq_norm + ineq_norm);
646: return 0;
647: }
649: /*
650: Lc = F + Ye^TCe + Yi^T(Ci - S) + 0.5*mu*[Ce^TCe + (Ci - S)^T(Ci - S)]
652: dLc/dX = dF/dX + Ye^TAe + Yi^TAi + 0.5*mu*[Ce^TAe + (Ci - S)^TAi]
654: dLc/dS = -[Yi + mu*(Ci - S)]
655: */
656: static PetscErrorCode TaoALMMComputeAugLagAndGradient_Private(Tao tao)
657: {
658: TAO_ALMM *auglag = (TAO_ALMM*)tao->data;
659: PetscReal yeTce=0.0, yiTcims=0.0, ceTce=0.0, cimsTcims=0.0;
661: TaoALMMEvaluateIterate_Private(tao, auglag->P);
662: if (tao->eq_constrained) {
663: /* compute scalar contributions */
664: VecDot(auglag->Ye, auglag->Ce, &yeTce);
665: VecDot(auglag->Ce, auglag->Ce, &ceTce);
666: /* dL/dX += ye^T Ae */
667: MatMultTransposeAdd(auglag->Ae, auglag->Ye, auglag->LgradX, auglag->LgradX);
668: /* dL/dX += mu * ce^T Ae */
669: MatMultTranspose(auglag->Ae, auglag->Ce, auglag->Xwork);
670: VecAXPY(auglag->LgradX, auglag->mu, auglag->Xwork);
671: }
672: if (tao->ineq_constrained) {
673: /* compute scalar contributions */
674: VecDot(auglag->Yi, auglag->Ci, &yiTcims);
675: VecDot(auglag->Ci, auglag->Ci, &cimsTcims);
676: /* dL/dX += yi^T Ai */
677: MatMultTransposeAdd(auglag->Ai, auglag->Yi, auglag->LgradX, auglag->LgradX);
678: /* dL/dX += mu * (ci - s)^T Ai */
679: MatMultTranspose(auglag->Ai, auglag->Ci, auglag->Xwork);
680: VecAXPY(auglag->LgradX, auglag->mu, auglag->Xwork);
681: /* dL/dS = -[yi + mu*(ci - s)] */
682: VecWAXPY(auglag->LgradS, auglag->mu, auglag->Ci, auglag->Yi);
683: VecScale(auglag->LgradS, -1.0);
684: }
685: /* combine gradient together */
686: TaoALMMCombinePrimal_Private(tao, auglag->LgradX, auglag->LgradS, auglag->G);
687: /* compute L = f + ye^T ce + yi^T (ci - s) + 0.5*mu*||ce||^2 + 0.5*mu*||ci - s||^2 */
688: auglag->Lval = auglag->fval + yeTce + yiTcims + 0.5*auglag->mu*(ceTce + cimsTcims);
689: return 0;
690: }
692: PetscErrorCode TaoALMMSubsolverObjective_Private(Tao tao, Vec P, PetscReal *Lval, void *ctx)
693: {
694: TAO_ALMM *auglag = (TAO_ALMM*)ctx;
696: VecCopy(P, auglag->P);
697: (*auglag->sub_obj)(auglag->parent);
698: *Lval = auglag->Lval;
699: return 0;
700: }
702: PetscErrorCode TaoALMMSubsolverObjectiveAndGradient_Private(Tao tao, Vec P, PetscReal *Lval, Vec G, void *ctx)
703: {
704: TAO_ALMM *auglag = (TAO_ALMM*)ctx;
706: VecCopy(P, auglag->P);
707: (*auglag->sub_obj)(auglag->parent);
708: VecCopy(auglag->G, G);
709: *Lval = auglag->Lval;
710: return 0;
711: }