1: #include <../src/tao/complementarity/impls/ssls/ssls.h>
2: /*
3: Context for ASXLS
4: -- active-set - reduced matrices formed
5: - inherit properties of original system
6: -- semismooth (S) - function not differentiable
7: - merit function continuously differentiable
8: - Fischer-Burmeister reformulation of complementarity
9: - Billups composition for two finite bounds
10: -- infeasible (I) - iterates not guaranteed to remain within bounds
11: -- feasible (F) - iterates guaranteed to remain within bounds
12: -- linesearch (LS) - Armijo rule on direction
14: Many other reformulations are possible and combinations of
15: feasible/infeasible and linesearch/trust region are possible.
17: Basic theory
18: Fischer-Burmeister reformulation is semismooth with a continuously
19: differentiable merit function and strongly semismooth if the F has
20: lipschitz continuous derivatives.
22: Every accumulation point generated by the algorithm is a stationary
23: point for the merit function. Stationary points of the merit function
24: are solutions of the complementarity problem if
25: a. the stationary point has a BD-regular subdifferential, or
26: b. the Schur complement F'/F'_ff is a P_0-matrix where ff is the
27: index set corresponding to the free variables.
29: If one of the accumulation points has a BD-regular subdifferential then
30: a. the entire sequence converges to this accumulation point at
31: a local q-superlinear rate
32: b. if in addition the reformulation is strongly semismooth near
33: this accumulation point, then the algorithm converges at a
34: local q-quadratic rate.
36: The theory for the feasible version follows from the feasible descent
37: algorithm framework.
39: References:
40: Billups, "Algorithms for Complementarity Problems and Generalized
41: Equations," Ph.D thesis, University of Wisconsin - Madison, 1995.
42: De Luca, Facchinei, Kanzow, "A Semismooth Equation Approach to the
43: Solution of Nonlinear Complementarity Problems," Mathematical
44: Programming, 75, pages 407-439, 1996.
45: Ferris, Kanzow, Munson, "Feasible Descent Algorithms for Mixed
46: Complementarity Problems," Mathematical Programming, 86,
47: pages 475-497, 1999.
48: Fischer, "A Special Newton-type Optimization Method," Optimization,
49: 24, pages 269-284, 1992
50: Munson, Facchinei, Ferris, Fischer, Kanzow, "The Semismooth Algorithm
51: for Large Scale Complementarity Problems," Technical Report 99-06,
52: University of Wisconsin - Madison, 1999.
53: */
58: PetscErrorCode TaoSetUp_ASILS(Tao tao) 59: {
60: TAO_SSLS *asls = (TAO_SSLS *)tao->data;
64: VecDuplicate(tao->solution,&tao->gradient);
65: VecDuplicate(tao->solution,&tao->stepdirection);
66: VecDuplicate(tao->solution,&asls->ff);
67: VecDuplicate(tao->solution,&asls->dpsi);
68: VecDuplicate(tao->solution,&asls->da);
69: VecDuplicate(tao->solution,&asls->db);
70: VecDuplicate(tao->solution,&asls->t1);
71: VecDuplicate(tao->solution,&asls->t2);
72: asls->fixed = NULL;
73: asls->free = NULL;
74: asls->J_sub = NULL;
75: asls->Jpre_sub = NULL;
76: asls->w = NULL;
77: asls->r1 = NULL;
78: asls->r2 = NULL;
79: asls->r3 = NULL;
80: asls->dxfree = NULL;
81: return(0);
82: }
86: static PetscErrorCode Tao_ASLS_FunctionGradient(TaoLineSearch ls, Vec X, PetscReal *fcn, Vec G, void *ptr) 87: {
88: Tao tao = (Tao)ptr;
89: TAO_SSLS *asls = (TAO_SSLS *)tao->data;
93: TaoComputeConstraints(tao, X, tao->constraints);
94: VecFischer(X,tao->constraints,tao->XL,tao->XU,asls->ff);
95: VecNorm(asls->ff,NORM_2,&asls->merit);
96: *fcn = 0.5*asls->merit*asls->merit;
98: TaoComputeJacobian(tao,tao->solution,tao->jacobian,tao->jacobian_pre);
99: MatDFischer(tao->jacobian, tao->solution, tao->constraints,tao->XL, tao->XU, asls->t1, asls->t2,asls->da, asls->db);
100: VecPointwiseMult(asls->t1, asls->ff, asls->db);
101: MatMultTranspose(tao->jacobian,asls->t1,G);
102: VecPointwiseMult(asls->t1, asls->ff, asls->da);
103: VecAXPY(G,1.0,asls->t1);
104: return(0);
105: }
109: static PetscErrorCode TaoDestroy_ASILS(Tao tao)110: {
111: TAO_SSLS *ssls = (TAO_SSLS *)tao->data;
115: VecDestroy(&ssls->ff);
116: VecDestroy(&ssls->dpsi);
117: VecDestroy(&ssls->da);
118: VecDestroy(&ssls->db);
119: VecDestroy(&ssls->w);
120: VecDestroy(&ssls->t1);
121: VecDestroy(&ssls->t2);
122: VecDestroy(&ssls->r1);
123: VecDestroy(&ssls->r2);
124: VecDestroy(&ssls->r3);
125: VecDestroy(&ssls->dxfree);
126: MatDestroy(&ssls->J_sub);
127: MatDestroy(&ssls->Jpre_sub);
128: ISDestroy(&ssls->fixed);
129: ISDestroy(&ssls->free);
130: PetscFree(tao->data);
131: return(0);
132: }
136: static PetscErrorCode TaoSolve_ASILS(Tao tao)137: {
138: TAO_SSLS *asls = (TAO_SSLS *)tao->data;
139: PetscReal psi,ndpsi, normd, innerd, t=0;
140: PetscInt iter=0, nf;
141: PetscErrorCode ierr;
142: TaoConvergedReason reason;
143: TaoLineSearchConvergedReason ls_reason;
146: /* Assume that Setup has been called!
147: Set the structure for the Jacobian and create a linear solver. */
149: TaoComputeVariableBounds(tao);
150: TaoLineSearchSetObjectiveAndGradientRoutine(tao->linesearch,Tao_ASLS_FunctionGradient,tao);
151: TaoLineSearchSetObjectiveRoutine(tao->linesearch,Tao_SSLS_Function,tao);
153: /* Calculate the function value and fischer function value at the
154: current iterate */
155: TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&psi,asls->dpsi);
156: VecNorm(asls->dpsi,NORM_2,&ndpsi);
158: while (1) {
159: /* Check the termination criteria */
160: PetscInfo3(tao,"iter %D, merit: %g, ||dpsi||: %g\n",iter, (double)asls->merit, (double)ndpsi);
161: TaoMonitor(tao, iter++, asls->merit, ndpsi, 0.0, t, &reason);
162: if (TAO_CONTINUE_ITERATING != reason) break;
164: /* We are going to solve a linear system of equations. We need to
165: set the tolerances for the solve so that we maintain an asymptotic
166: rate of convergence that is superlinear.
167: Note: these tolerances are for the reduced system. We really need
168: to make sure that the full system satisfies the full-space conditions.
170: This rule gives superlinear asymptotic convergence
171: asls->atol = min(0.5, asls->merit*sqrt(asls->merit));
172: asls->rtol = 0.0;
174: This rule gives quadratic asymptotic convergence
175: asls->atol = min(0.5, asls->merit*asls->merit);
176: asls->rtol = 0.0;
178: Calculate a free and fixed set of variables. The fixed set of
179: variables are those for the d_b is approximately equal to zero.
180: The definition of approximately changes as we approach the solution
181: to the problem.
183: No one rule is guaranteed to work in all cases. The following
184: definition is based on the norm of the Jacobian matrix. If the
185: norm is large, the tolerance becomes smaller. */
186: MatNorm(tao->jacobian,NORM_1,&asls->identifier);
187: asls->identifier = PetscMin(asls->merit, 1e-2) / (1 + asls->identifier);
189: VecSet(asls->t1,-asls->identifier);
190: VecSet(asls->t2, asls->identifier);
192: ISDestroy(&asls->fixed);
193: ISDestroy(&asls->free);
194: VecWhichBetweenOrEqual(asls->t1, asls->db, asls->t2, &asls->fixed);
195: ISComplementVec(asls->fixed,asls->t1, &asls->free);
197: ISGetSize(asls->fixed,&nf);
198: PetscInfo1(tao,"Number of fixed variables: %D\n", nf);
200: /* We now have our partition. Now calculate the direction in the
201: fixed variable space. */
202: TaoVecGetSubVec(asls->ff, asls->fixed, tao->subset_type, 0.0, &asls->r1);
203: TaoVecGetSubVec(asls->da, asls->fixed, tao->subset_type, 1.0, &asls->r2);
204: VecPointwiseDivide(asls->r1,asls->r1,asls->r2);
205: VecSet(tao->stepdirection,0.0);
206: VecISAXPY(tao->stepdirection, asls->fixed,1.0,asls->r1);
208: /* Our direction in the Fixed Variable Set is fixed. Calculate the
209: information needed for the step in the Free Variable Set. To
210: do this, we need to know the diagonal perturbation and the
211: right hand side. */
213: TaoVecGetSubVec(asls->da, asls->free, tao->subset_type, 0.0, &asls->r1);
214: TaoVecGetSubVec(asls->ff, asls->free, tao->subset_type, 0.0, &asls->r2);
215: TaoVecGetSubVec(asls->db, asls->free, tao->subset_type, 1.0, &asls->r3);
216: VecPointwiseDivide(asls->r1,asls->r1, asls->r3);
217: VecPointwiseDivide(asls->r2,asls->r2, asls->r3);
219: /* r1 is the diagonal perturbation
220: r2 is the right hand side
221: r3 is no longer needed
223: Now need to modify r2 for our direction choice in the fixed
224: variable set: calculate t1 = J*d, take the reduced vector
225: of t1 and modify r2. */
227: MatMult(tao->jacobian, tao->stepdirection, asls->t1);
228: TaoVecGetSubVec(asls->t1,asls->free,tao->subset_type,0.0,&asls->r3);
229: VecAXPY(asls->r2, -1.0, asls->r3);
231: /* Calculate the reduced problem matrix and the direction */
232: if (!asls->w && (tao->subset_type == TAO_SUBSET_MASK || tao->subset_type == TAO_SUBSET_MATRIXFREE)) {
233: VecDuplicate(tao->solution, &asls->w);
234: }
235: TaoMatGetSubMat(tao->jacobian, asls->free, asls->w, tao->subset_type,&asls->J_sub);
236: if (tao->jacobian != tao->jacobian_pre) {
237: TaoMatGetSubMat(tao->jacobian_pre, asls->free, asls->w, tao->subset_type, &asls->Jpre_sub);
238: } else {
239: MatDestroy(&asls->Jpre_sub);
240: asls->Jpre_sub = asls->J_sub;
241: PetscObjectReference((PetscObject)(asls->Jpre_sub));
242: }
243: MatDiagonalSet(asls->J_sub, asls->r1,ADD_VALUES);
244: TaoVecGetSubVec(tao->stepdirection, asls->free, tao->subset_type, 0.0, &asls->dxfree);
245: VecSet(asls->dxfree, 0.0);
247: /* Calculate the reduced direction. (Really negative of Newton
248: direction. Therefore, rest of the code uses -d.) */
249: KSPReset(tao->ksp);
250: KSPSetOperators(tao->ksp, asls->J_sub, asls->Jpre_sub);
251: KSPSolve(tao->ksp, asls->r2, asls->dxfree);
253: /* Add the direction in the free variables back into the real direction. */
254: VecISAXPY(tao->stepdirection, asls->free, 1.0,asls->dxfree);
256: /* Check the real direction for descent and if not, use the negative
257: gradient direction. */
258: VecNorm(tao->stepdirection, NORM_2, &normd);
259: VecDot(tao->stepdirection, asls->dpsi, &innerd);
261: if (innerd <= asls->delta*pow(normd, asls->rho)) {
262: PetscInfo1(tao,"Gradient direction: %5.4e.\n", (double)innerd);
263: PetscInfo1(tao, "Iteration %D: newton direction not descent\n", iter);
264: VecCopy(asls->dpsi, tao->stepdirection);
265: VecDot(asls->dpsi, tao->stepdirection, &innerd);
266: }
268: VecScale(tao->stepdirection, -1.0);
269: innerd = -innerd;
271: /* We now have a correct descent direction. Apply a linesearch to
272: find the new iterate. */
273: TaoLineSearchSetInitialStepLength(tao->linesearch, 1.0);
274: TaoLineSearchApply(tao->linesearch, tao->solution, &psi,asls->dpsi, tao->stepdirection, &t, &ls_reason);
275: VecNorm(asls->dpsi, NORM_2, &ndpsi);
276: }
277: return(0);
278: }
280: /* ---------------------------------------------------------- */
281: /*MC
282: TAOASILS - Active-set infeasible linesearch algorithm for solving
283: complementarity constraints
285: Options Database Keys:
286: + -tao_ssls_delta - descent test fraction
287: - -tao_ssls_rho - descent test power
289: Level: beginner
290: M*/
291: EXTERN_C_BEGIN
294: PetscErrorCode TaoCreate_ASILS(Tao tao)295: {
296: TAO_SSLS *asls;
298: const char *armijo_type = TAOLINESEARCHARMIJO;
301: PetscNewLog(tao,&asls);
302: tao->data = (void*)asls;
303: tao->ops->solve = TaoSolve_ASILS;
304: tao->ops->setup = TaoSetUp_ASILS;
305: tao->ops->view = TaoView_SSLS;
306: tao->ops->setfromoptions = TaoSetFromOptions_SSLS;
307: tao->ops->destroy = TaoDestroy_ASILS;
308: tao->subset_type = TAO_SUBSET_SUBVEC;
309: asls->delta = 1e-10;
310: asls->rho = 2.1;
311: asls->fixed = NULL;
312: asls->free = NULL;
313: asls->J_sub = NULL;
314: asls->Jpre_sub = NULL;
315: asls->w = NULL;
316: asls->r1 = NULL;
317: asls->r2 = NULL;
318: asls->r3 = NULL;
319: asls->t1 = NULL;
320: asls->t2 = NULL;
321: asls->dxfree = NULL;
323: asls->identifier = 1e-5;
325: TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
326: TaoLineSearchSetType(tao->linesearch, armijo_type);
327: TaoLineSearchSetFromOptions(tao->linesearch);
329: KSPCreate(((PetscObject)tao)->comm, &tao->ksp);
330: KSPSetFromOptions(tao->ksp);
331: tao->max_it = 2000;
332: tao->max_funcs = 4000;
333: tao->fatol = 0;
334: tao->frtol = 0;
335: tao->gttol = 0;
336: tao->grtol = 0;
337: #if defined(PETSC_USE_REAL_SINGLE)
338: tao->gatol = 1.0e-6;
339: tao->fmin = 1.0e-4;
340: #else
341: tao->gatol = 1.0e-16;
342: tao->fmin = 1.0e-8;
343: #endif
344: return(0);
345: }
346: EXTERN_C_END