Actual source code: bmrm.c

petsc-3.11.4 2019-09-28
Report Typos and Errors
  1:  #include <../src/tao/unconstrained/impls/bmrm/bmrm.h>

  3: static PetscErrorCode init_df_solver(TAO_DF*);
  4: static PetscErrorCode ensure_df_space(PetscInt, TAO_DF*);
  5: static PetscErrorCode destroy_df_solver(TAO_DF*);
  6: static PetscReal phi(PetscReal*,PetscInt,PetscReal,PetscReal*,PetscReal,PetscReal*,PetscReal*,PetscReal*);
  7: static PetscInt project(PetscInt,PetscReal*,PetscReal,PetscReal*,PetscReal*,PetscReal*,PetscReal*,PetscReal*,TAO_DF*);
  8: static PetscErrorCode solve(TAO_DF*);


 11: /*------------------------------------------------------------*/
 12: /* The main solver function

 14:    f = Remp(W)          This is what the user provides us from the application layer
 15:    So the ComputeGradient function for instance should get us back the subgradient of Remp(W)

 17:    Regularizer assumed to be L2 norm = lambda*0.5*W'W ()
 18: */

 20: static PetscErrorCode make_grad_node(Vec X, Vec_Chain **p)
 21: {

 25:   PetscNew(p);
 26:   VecDuplicate(X, &(*p)->V);
 27:   VecCopy(X, (*p)->V);
 28:   (*p)->next = NULL;
 29:   return(0);
 30: }

 32: static PetscErrorCode destroy_grad_list(Vec_Chain *head)
 33: {
 35:   Vec_Chain      *p = head->next, *q;

 38:   while(p) {
 39:     q = p->next;
 40:     VecDestroy(&p->V);
 41:     PetscFree(p);
 42:     p = q;
 43:   }
 44:   head->next = NULL;
 45:   return(0);
 46: }


 49: static PetscErrorCode TaoSolve_BMRM(Tao tao)
 50: {
 51:   PetscErrorCode     ierr;
 52:   TAO_DF             df;
 53:   TAO_BMRM           *bmrm = (TAO_BMRM*)tao->data;

 55:   /* Values and pointers to parts of the optimization problem */
 56:   PetscReal          f = 0.0;
 57:   Vec                W = tao->solution;
 58:   Vec                G = tao->gradient;
 59:   PetscReal          lambda;
 60:   PetscReal          bt;
 61:   Vec_Chain          grad_list, *tail_glist, *pgrad;
 62:   PetscInt           i;
 63:   PetscMPIInt        rank;

 65:   /* Used in converged criteria check */
 66:   PetscReal          reg;
 67:   PetscReal          jtwt = 0.0, max_jtwt, pre_epsilon, epsilon, jw, min_jw;
 68:   PetscReal          innerSolverTol;
 69:   MPI_Comm           comm;

 72:   PetscObjectGetComm((PetscObject)tao,&comm);
 73:   MPI_Comm_rank(comm, &rank);
 74:   lambda = bmrm->lambda;

 76:   /* Check Stopping Condition */
 77:   tao->step = 1.0;
 78:   max_jtwt = -BMRM_INFTY;
 79:   min_jw = BMRM_INFTY;
 80:   innerSolverTol = 1.0;
 81:   epsilon = 0.0;

 83:   if (!rank) {
 84:     init_df_solver(&df);
 85:     grad_list.next = NULL;
 86:     tail_glist = &grad_list;
 87:   }

 89:   df.tol = 1e-6;
 90:   tao->reason = TAO_CONTINUE_ITERATING;

 92:   /*-----------------Algorithm Begins------------------------*/
 93:   /* make the scatter */
 94:   VecScatterCreateToZero(W, &bmrm->scatter, &bmrm->local_w);
 95:   VecAssemblyBegin(bmrm->local_w);
 96:   VecAssemblyEnd(bmrm->local_w);

 98:   /* NOTE: In application pass the sub-gradient of Remp(W) */
 99:   TaoComputeObjectiveAndGradient(tao, W, &f, G);
100:   TaoLogConvergenceHistory(tao,f,1.0,0.0,tao->ksp_its);
101:   TaoMonitor(tao,tao->niter,f,1.0,0.0,tao->step);
102:   (*tao->ops->convergencetest)(tao,tao->cnvP);
103: 
104:   while (tao->reason == TAO_CONTINUE_ITERATING) {
105:     /* Call general purpose update function */
106:     if (tao->ops->update) {
107:       (*tao->ops->update)(tao, tao->niter, tao->user_update);
108:     }
109: 
110:     /* compute bt = Remp(Wt-1) - <Wt-1, At> */
111:     VecDot(W, G, &bt);
112:     bt = f - bt;

114:     /* First gather the gradient to the master node */
115:     VecScatterBegin(bmrm->scatter, G, bmrm->local_w, INSERT_VALUES, SCATTER_FORWARD);
116:     VecScatterEnd(bmrm->scatter, G, bmrm->local_w, INSERT_VALUES, SCATTER_FORWARD);

118:     /* Bring up the inner solver */
119:     if (!rank) {
120:       ensure_df_space(tao->niter+1, &df);
121:       make_grad_node(bmrm->local_w, &pgrad);
122:       tail_glist->next = pgrad;
123:       tail_glist = pgrad;

125:       df.a[tao->niter] = 1.0;
126:       df.f[tao->niter] = -bt;
127:       df.u[tao->niter] = 1.0;
128:       df.l[tao->niter] = 0.0;

130:       /* set up the Q */
131:       pgrad = grad_list.next;
132:       for (i=0; i<=tao->niter; i++) {
133:         if (!pgrad) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Assert that there are at least tao->niter+1 pgrad available");
134:         VecDot(pgrad->V, bmrm->local_w, &reg);
135:         df.Q[i][tao->niter] = df.Q[tao->niter][i] = reg / lambda;
136:         pgrad = pgrad->next;
137:       }

139:       if (tao->niter > 0) {
140:         df.x[tao->niter] = 0.0;
141:         solve(&df);
142:       } else
143:         df.x[0] = 1.0;

145:       /* now computing Jt*(alpha_t) which should be = Jt(wt) to check convergence */
146:       jtwt = 0.0;
147:       VecSet(bmrm->local_w, 0.0);
148:       pgrad = grad_list.next;
149:       for (i=0; i<=tao->niter; i++) {
150:         jtwt -= df.x[i] * df.f[i];
151:         VecAXPY(bmrm->local_w, -df.x[i] / lambda, pgrad->V);
152:         pgrad = pgrad->next;
153:       }

155:       VecNorm(bmrm->local_w, NORM_2, &reg);
156:       reg = 0.5*lambda*reg*reg;
157:       jtwt -= reg;
158:     } /* end if rank == 0 */

160:     /* scatter the new W to all nodes */
161:     VecScatterBegin(bmrm->scatter,bmrm->local_w,W,INSERT_VALUES,SCATTER_REVERSE);
162:     VecScatterEnd(bmrm->scatter,bmrm->local_w,W,INSERT_VALUES,SCATTER_REVERSE);

164:     TaoComputeObjectiveAndGradient(tao, W, &f, G);

166:     MPI_Bcast(&jtwt,1,MPIU_REAL,0,comm);
167:     MPI_Bcast(&reg,1,MPIU_REAL,0,comm);

169:     jw = reg + f;                                       /* J(w) = regularizer + Remp(w) */
170:     if (jw < min_jw) min_jw = jw;
171:     if (jtwt > max_jtwt) max_jtwt = jtwt;

173:     pre_epsilon = epsilon;
174:     epsilon = min_jw - jtwt;

176:     if (!rank) {
177:       if (innerSolverTol > epsilon) innerSolverTol = epsilon;
178:       else if (innerSolverTol < 1e-7) innerSolverTol = 1e-7;

180:       /* if the annealing doesn't work well, lower the inner solver tolerance */
181:       if(pre_epsilon < epsilon) innerSolverTol *= 0.2;

183:       df.tol = innerSolverTol*0.5;
184:     }

186:     tao->niter++;
187:     TaoLogConvergenceHistory(tao,min_jw,epsilon,0.0,tao->ksp_its);
188:     TaoMonitor(tao,tao->niter,min_jw,epsilon,0.0,tao->step);
189:     (*tao->ops->convergencetest)(tao,tao->cnvP);
190:   }

192:   /* free all the memory */
193:   if (!rank) {
194:     destroy_grad_list(&grad_list);
195:     destroy_df_solver(&df);
196:   }

198:   VecDestroy(&bmrm->local_w);
199:   VecScatterDestroy(&bmrm->scatter);
200:   return(0);
201: }


204: /* ---------------------------------------------------------- */

206: static PetscErrorCode TaoSetup_BMRM(Tao tao)
207: {


212:   /* Allocate some arrays */
213:   if (!tao->gradient) {
214:     VecDuplicate(tao->solution, &tao->gradient);
215:   }
216:   return(0);
217: }

219: /*------------------------------------------------------------*/
220: static PetscErrorCode TaoDestroy_BMRM(Tao tao)
221: {

225:   PetscFree(tao->data);
226:   return(0);
227: }

229: static PetscErrorCode TaoSetFromOptions_BMRM(PetscOptionItems *PetscOptionsObject,Tao tao)
230: {
232:   TAO_BMRM*      bmrm = (TAO_BMRM*)tao->data;

235:   PetscOptionsHead(PetscOptionsObject,"BMRM for regularized risk minimization");
236:   PetscOptionsReal("-tao_bmrm_lambda", "regulariser weight","", 100,&bmrm->lambda,NULL);
237:   PetscOptionsTail();
238:   return(0);
239: }

241: /*------------------------------------------------------------*/
242: static PetscErrorCode TaoView_BMRM(Tao tao, PetscViewer viewer)
243: {
244:   PetscBool      isascii;

248:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
249:   if (isascii) {
250:     PetscViewerASCIIPushTab(viewer);
251:     PetscViewerASCIIPopTab(viewer);
252:   }
253:   return(0);
254: }

256: /*------------------------------------------------------------*/
257: /*MC
258:   TAOBMRM - bundle method for regularized risk minimization

260:   Options Database Keys:
261: . - tao_bmrm_lambda - regulariser weight

263:   Level: beginner
264: M*/

266: PETSC_EXTERN PetscErrorCode TaoCreate_BMRM(Tao tao)
267: {
268:   TAO_BMRM       *bmrm;

272:   tao->ops->setup = TaoSetup_BMRM;
273:   tao->ops->solve = TaoSolve_BMRM;
274:   tao->ops->view  = TaoView_BMRM;
275:   tao->ops->setfromoptions = TaoSetFromOptions_BMRM;
276:   tao->ops->destroy = TaoDestroy_BMRM;

278:   PetscNewLog(tao,&bmrm);
279:   bmrm->lambda = 1.0;
280:   tao->data = (void*)bmrm;

282:   /* Override default settings (unless already changed) */
283:   if (!tao->max_it_changed) tao->max_it = 2000;
284:   if (!tao->max_funcs_changed) tao->max_funcs = 4000;
285:   if (!tao->gatol_changed) tao->gatol = 1.0e-12;
286:   if (!tao->grtol_changed) tao->grtol = 1.0e-12;

288:   return(0);
289: }

291: PetscErrorCode init_df_solver(TAO_DF *df)
292: {
293:   PetscInt       i, n = INCRE_DIM;

297:   /* default values */
298:   df->maxProjIter = 200;
299:   df->maxPGMIter = 300000;
300:   df->b = 1.0;

302:   /* memory space required by Dai-Fletcher */
303:   df->cur_num_cp = n;
304:   PetscMalloc1(n, &df->f);
305:   PetscMalloc1(n, &df->a);
306:   PetscMalloc1(n, &df->l);
307:   PetscMalloc1(n, &df->u);
308:   PetscMalloc1(n, &df->x);
309:   PetscMalloc1(n, &df->Q);

311:   for (i = 0; i < n; i ++) {
312:     PetscMalloc1(n, &df->Q[i]);
313:   }

315:   PetscMalloc1(n, &df->g);
316:   PetscMalloc1(n, &df->y);
317:   PetscMalloc1(n, &df->tempv);
318:   PetscMalloc1(n, &df->d);
319:   PetscMalloc1(n, &df->Qd);
320:   PetscMalloc1(n, &df->t);
321:   PetscMalloc1(n, &df->xplus);
322:   PetscMalloc1(n, &df->tplus);
323:   PetscMalloc1(n, &df->sk);
324:   PetscMalloc1(n, &df->yk);

326:   PetscMalloc1(n, &df->ipt);
327:   PetscMalloc1(n, &df->ipt2);
328:   PetscMalloc1(n, &df->uv);
329:   return(0);
330: }

332: PetscErrorCode ensure_df_space(PetscInt dim, TAO_DF *df)
333: {
335:   PetscReal      *tmp, **tmp_Q;
336:   PetscInt       i, n, old_n;

339:   df->dim = dim;
340:   if (dim <= df->cur_num_cp) return(0);

342:   old_n = df->cur_num_cp;
343:   df->cur_num_cp += INCRE_DIM;
344:   n = df->cur_num_cp;

346:   /* memory space required by dai-fletcher */
347:   PetscMalloc1(n, &tmp);
348:   PetscMemcpy(tmp, df->f, sizeof(PetscReal)*old_n);
349:   PetscFree(df->f);
350:   df->f = tmp;

352:   PetscMalloc1(n, &tmp);
353:   PetscMemcpy(tmp, df->a, sizeof(PetscReal)*old_n);
354:   PetscFree(df->a);
355:   df->a = tmp;

357:   PetscMalloc1(n, &tmp);
358:   PetscMemcpy(tmp, df->l, sizeof(PetscReal)*old_n);
359:   PetscFree(df->l);
360:   df->l = tmp;

362:   PetscMalloc1(n, &tmp);
363:   PetscMemcpy(tmp, df->u, sizeof(PetscReal)*old_n);
364:   PetscFree(df->u);
365:   df->u = tmp;

367:   PetscMalloc1(n, &tmp);
368:   PetscMemcpy(tmp, df->x, sizeof(PetscReal)*old_n);
369:   PetscFree(df->x);
370:   df->x = tmp;

372:   PetscMalloc1(n, &tmp_Q);
373:   for (i = 0; i < n; i ++) {
374:     PetscMalloc1(n, &tmp_Q[i]);
375:     if (i < old_n) {
376:       PetscMemcpy(tmp_Q[i], df->Q[i], sizeof(PetscReal)*old_n);
377:       PetscFree(df->Q[i]);
378:     }
379:   }

381:   PetscFree(df->Q);
382:   df->Q = tmp_Q;

384:   PetscFree(df->g);
385:   PetscMalloc1(n, &df->g);

387:   PetscFree(df->y);
388:   PetscMalloc1(n, &df->y);

390:   PetscFree(df->tempv);
391:   PetscMalloc1(n, &df->tempv);

393:   PetscFree(df->d);
394:   PetscMalloc1(n, &df->d);

396:   PetscFree(df->Qd);
397:   PetscMalloc1(n, &df->Qd);

399:   PetscFree(df->t);
400:   PetscMalloc1(n, &df->t);

402:   PetscFree(df->xplus);
403:   PetscMalloc1(n, &df->xplus);

405:   PetscFree(df->tplus);
406:   PetscMalloc1(n, &df->tplus);

408:   PetscFree(df->sk);
409:   PetscMalloc1(n, &df->sk);

411:   PetscFree(df->yk);
412:   PetscMalloc1(n, &df->yk);

414:   PetscFree(df->ipt);
415:   PetscMalloc1(n, &df->ipt);

417:   PetscFree(df->ipt2);
418:   PetscMalloc1(n, &df->ipt2);

420:   PetscFree(df->uv);
421:   PetscMalloc1(n, &df->uv);
422:   return(0);
423: }

425: PetscErrorCode destroy_df_solver(TAO_DF *df)
426: {
428:   PetscInt       i;

431:   PetscFree(df->f);
432:   PetscFree(df->a);
433:   PetscFree(df->l);
434:   PetscFree(df->u);
435:   PetscFree(df->x);

437:   for (i = 0; i < df->cur_num_cp; i ++) {
438:     PetscFree(df->Q[i]);
439:   }
440:   PetscFree(df->Q);
441:   PetscFree(df->ipt);
442:   PetscFree(df->ipt2);
443:   PetscFree(df->uv);
444:   PetscFree(df->g);
445:   PetscFree(df->y);
446:   PetscFree(df->tempv);
447:   PetscFree(df->d);
448:   PetscFree(df->Qd);
449:   PetscFree(df->t);
450:   PetscFree(df->xplus);
451:   PetscFree(df->tplus);
452:   PetscFree(df->sk);
453:   PetscFree(df->yk);
454:   return(0);
455: }

457: /* Piecewise linear monotone target function for the Dai-Fletcher projector */
458: PetscReal phi(PetscReal *x,PetscInt n,PetscReal lambda,PetscReal *a,PetscReal b,PetscReal *c,PetscReal *l,PetscReal *u)
459: {
460:   PetscReal r = 0.0;
461:   PetscInt  i;

463:   for (i = 0; i < n; i++){
464:     x[i] = -c[i] + lambda*a[i];
465:     if (x[i] > u[i])     x[i] = u[i];
466:     else if(x[i] < l[i]) x[i] = l[i];
467:     r += a[i]*x[i];
468:   }
469:   return r - b;
470: }

472: /** Modified Dai-Fletcher QP projector solves the problem:
473:  *
474:  *      minimise  0.5*x'*x - c'*x
475:  *      subj to   a'*x = b
476:  *                l \leq x \leq u
477:  *
478:  *  \param c The point to be projected onto feasible set
479:  */
480: PetscInt project(PetscInt n,PetscReal *a,PetscReal b,PetscReal *c,PetscReal *l,PetscReal *u,PetscReal *x,PetscReal *lam_ext,TAO_DF *df)
481: {
482:   PetscReal      lambda, lambdal, lambdau, dlambda, lambda_new;
483:   PetscReal      r, rl, ru, s;
484:   PetscInt       innerIter;
485:   PetscBool      nonNegativeSlack = PETSC_FALSE;

488:   *lam_ext = 0;
489:   lambda  = 0;
490:   dlambda = 0.5;
491:   innerIter = 1;

493:   /*  \phi(x;lambda) := 0.5*x'*x + c'*x - lambda*(a'*x-b)
494:    *
495:    *  Optimality conditions for \phi:
496:    *
497:    *  1. lambda   <= 0
498:    *  2. r        <= 0
499:    *  3. r*lambda == 0
500:    */

502:   /* Bracketing Phase */
503:   r = phi(x, n, lambda, a, b, c, l, u);

505:   if(nonNegativeSlack) {
506:     /* inequality constraint, i.e., with \xi >= 0 constraint */
507:     if (r < TOL_R) return 0;
508:   } else  {
509:     /* equality constraint ,i.e., without \xi >= 0 constraint */
510:     if (PetscAbsReal(r) < TOL_R) return 0;
511:   }

513:   if (r < 0.0){
514:     lambdal = lambda;
515:     rl      = r;
516:     lambda  = lambda + dlambda;
517:     r       = phi(x, n, lambda, a, b, c, l, u);
518:     while (r < 0.0 && dlambda < BMRM_INFTY)  {
519:       lambdal = lambda;
520:       s       = rl/r - 1.0;
521:       if (s < 0.1) s = 0.1;
522:       dlambda = dlambda + dlambda/s;
523:       lambda  = lambda + dlambda;
524:       rl      = r;
525:       r       = phi(x, n, lambda, a, b, c, l, u);
526:     }
527:     lambdau = lambda;
528:     ru      = r;
529:   } else {
530:     lambdau = lambda;
531:     ru      = r;
532:     lambda  = lambda - dlambda;
533:     r       = phi(x, n, lambda, a, b, c, l, u);
534:     while (r > 0.0 && dlambda > -BMRM_INFTY) {
535:       lambdau = lambda;
536:       s       = ru/r - 1.0;
537:       if (s < 0.1) s = 0.1;
538:       dlambda = dlambda + dlambda/s;
539:       lambda  = lambda - dlambda;
540:       ru      = r;
541:       r       = phi(x, n, lambda, a, b, c, l, u);
542:     }
543:     lambdal = lambda;
544:     rl      = r;
545:   }

547:   if(PetscAbsReal(dlambda) > BMRM_INFTY) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"L2N2_DaiFletcherPGM detected Infeasible QP problem!");

549:   if(ru == 0){
550:     return innerIter;
551:   }

553:   /* Secant Phase */
554:   s       = 1.0 - rl/ru;
555:   dlambda = dlambda/s;
556:   lambda  = lambdau - dlambda;
557:   r       = phi(x, n, lambda, a, b, c, l, u);

559:   while (PetscAbsReal(r) > TOL_R
560:          && dlambda > TOL_LAM * (1.0 + PetscAbsReal(lambda))
561:          && innerIter < df->maxProjIter){
562:     innerIter++;
563:     if (r > 0.0){
564:       if (s <= 2.0){
565:         lambdau = lambda;
566:         ru      = r;
567:         s       = 1.0 - rl/ru;
568:         dlambda = (lambdau - lambdal) / s;
569:         lambda  = lambdau - dlambda;
570:       } else {
571:         s          = ru/r-1.0;
572:         if (s < 0.1) s = 0.1;
573:         dlambda    = (lambdau - lambda) / s;
574:         lambda_new = 0.75*lambdal + 0.25*lambda;
575:         if (lambda_new < (lambda - dlambda))
576:           lambda_new = lambda - dlambda;
577:         lambdau    = lambda;
578:         ru         = r;
579:         lambda     = lambda_new;
580:         s          = (lambdau - lambdal) / (lambdau - lambda);
581:       }
582:     } else {
583:       if (s >= 2.0){
584:         lambdal = lambda;
585:         rl      = r;
586:         s       = 1.0 - rl/ru;
587:         dlambda = (lambdau - lambdal) / s;
588:         lambda  = lambdau - dlambda;
589:       } else {
590:         s          = rl/r - 1.0;
591:         if (s < 0.1) s = 0.1;
592:         dlambda    = (lambda-lambdal) / s;
593:         lambda_new = 0.75*lambdau + 0.25*lambda;
594:         if (lambda_new > (lambda + dlambda))
595:           lambda_new = lambda + dlambda;
596:         lambdal    = lambda;
597:         rl         = r;
598:         lambda     = lambda_new;
599:         s          = (lambdau - lambdal) / (lambdau-lambda);
600:       }
601:     }
602:     r = phi(x, n, lambda, a, b, c, l, u);
603:   }

605:   *lam_ext = lambda;
606:   if(innerIter >= df->maxProjIter) {
607:     PetscInfo(NULL,"WARNING: DaiFletcher max iterations\n");
608:   }
609:   return innerIter;
610: }


613: PetscErrorCode solve(TAO_DF *df)
614: {
616:   PetscInt       i, j, innerIter, it, it2, luv, info, lscount = 0, projcount = 0;
617:   PetscReal      gd, max, ak, bk, akold, bkold, lamnew, alpha, kktlam=0.0, lam_ext;
618:   PetscReal      DELTAsv, ProdDELTAsv;
619:   PetscReal      c, *tempQ;
620:   PetscReal      *x = df->x, *a = df->a, b = df->b, *l = df->l, *u = df->u, tol = df->tol;
621:   PetscReal      *tempv = df->tempv, *y = df->y, *g = df->g, *d = df->d, *Qd = df->Qd;
622:   PetscReal      *xplus = df->xplus, *tplus = df->tplus, *sk = df->sk, *yk = df->yk;
623:   PetscReal      **Q = df->Q, *f = df->f, *t = df->t;
624:   PetscInt       dim = df->dim, *ipt = df->ipt, *ipt2 = df->ipt2, *uv = df->uv;

626:   /*** variables for the adaptive nonmonotone linesearch ***/
627:   PetscInt    L, llast;
628:   PetscReal   fr, fbest, fv, fc, fv0;

630:   c = BMRM_INFTY;

632:   DELTAsv = EPS_SV;
633:   if (tol <= 1.0e-5 || dim <= 20) ProdDELTAsv = 0.0F;
634:   else  ProdDELTAsv = EPS_SV;

636:   for (i = 0; i < dim; i++)  tempv[i] = -x[i];

638:   lam_ext = 0.0;

640:   /* Project the initial solution */
641:   projcount += project(dim, a, b, tempv, l, u, x, &lam_ext, df);

643:   /* Compute gradient
644:      g = Q*x + f; */

646:   it = 0;
647:   for (i = 0; i < dim; i++) {
648:     if (PetscAbsReal(x[i]) > ProdDELTAsv) ipt[it++] = i;
649:   }

651:   PetscMemzero(t, dim*sizeof(PetscReal));
652:   for (i = 0; i < it; i++){
653:     tempQ = Q[ipt[i]];
654:     for (j = 0; j < dim; j++) t[j] += (tempQ[j]*x[ipt[i]]);
655:   }
656:   for (i = 0; i < dim; i++){
657:     g[i] = t[i] + f[i];
658:   }


661:   /* y = -(x_{k} - g_{k}) */
662:   for (i = 0; i < dim; i++){
663:     y[i] = g[i] - x[i];
664:   }

666:   /* Project x_{k} - g_{k} */
667:   projcount += project(dim, a, b, y, l, u, tempv, &lam_ext, df);

669:   /* y = P(x_{k} - g_{k}) - x_{k} */
670:   max = ALPHA_MIN;
671:   for (i = 0; i < dim; i++){
672:     y[i] = tempv[i] - x[i];
673:     if (PetscAbsReal(y[i]) > max) max = PetscAbsReal(y[i]);
674:   }

676:   if (max < tol*1e-3){
677:     return 0;
678:   }

680:   alpha = 1.0 / max;

682:   /* fv0 = f(x_{0}). Recall t = Q x_{k}  */
683:   fv0   = 0.0;
684:   for (i = 0; i < dim; i++) fv0 += x[i] * (0.5*t[i] + f[i]);

686:   /*** adaptive nonmonotone linesearch ***/
687:   L     = 2;
688:   fr    = ALPHA_MAX;
689:   fbest = fv0;
690:   fc    = fv0;
691:   llast = 0;
692:   akold = bkold = 0.0;

694:   /***      Iterator begins     ***/
695:   for (innerIter = 1; innerIter <= df->maxPGMIter; innerIter++) {

697:     /* tempv = -(x_{k} - alpha*g_{k}) */
698:     for (i = 0; i < dim; i++)  tempv[i] = alpha*g[i] - x[i];

700:     /* Project x_{k} - alpha*g_{k} */
701:     projcount += project(dim, a, b, tempv, l, u, y, &lam_ext, df);


704:     /* gd = \inner{d_{k}}{g_{k}}
705:         d = P(x_{k} - alpha*g_{k}) - x_{k}
706:     */
707:     gd = 0.0;
708:     for (i = 0; i < dim; i++){
709:       d[i] = y[i] - x[i];
710:       gd  += d[i] * g[i];
711:     }

713:     /* Gradient computation  */

715:     /* compute Qd = Q*d  or  Qd = Q*y - t depending on their sparsity */

717:     it = it2 = 0;
718:     for (i = 0; i < dim; i++){
719:       if (PetscAbsReal(d[i]) > (ProdDELTAsv*1.0e-2)) ipt[it++]   = i;
720:     }
721:     for (i = 0; i < dim; i++) {
722:       if (PetscAbsReal(y[i]) > ProdDELTAsv) ipt2[it2++] = i;
723:     }

725:     PetscMemzero(Qd, dim*sizeof(PetscReal));
726:     /* compute Qd = Q*d */
727:     if (it < it2){
728:       for (i = 0; i < it; i++){
729:         tempQ = Q[ipt[i]];
730:         for (j = 0; j < dim; j++) Qd[j] += (tempQ[j] * d[ipt[i]]);
731:       }
732:     } else { /* compute Qd = Q*y-t */
733:       for (i = 0; i < it2; i++){
734:         tempQ = Q[ipt2[i]];
735:         for (j = 0; j < dim; j++) Qd[j] += (tempQ[j] * y[ipt2[i]]);
736:       }
737:       for (j = 0; j < dim; j++) Qd[j] -= t[j];
738:     }

740:     /* ak = inner{d_{k}}{d_{k}} */
741:     ak = 0.0;
742:     for (i = 0; i < dim; i++) ak += d[i] * d[i];

744:     bk = 0.0;
745:     for (i = 0; i < dim; i++) bk += d[i]*Qd[i];

747:     if (bk > EPS*ak && gd < 0.0)  lamnew = -gd/bk;
748:     else lamnew = 1.0;

750:     /* fv is computing f(x_{k} + d_{k}) */
751:     fv = 0.0;
752:     for (i = 0; i < dim; i++){
753:       xplus[i] = x[i] + d[i];
754:       tplus[i] = t[i] + Qd[i];
755:       fv      += xplus[i] * (0.5*tplus[i] + f[i]);
756:     }

758:     /* fr is fref */
759:     if ((innerIter == 1 && fv >= fv0) || (innerIter > 1 && fv >= fr)){
760:       lscount++;
761:       fv = 0.0;
762:       for (i = 0; i < dim; i++){
763:         xplus[i] = x[i] + lamnew*d[i];
764:         tplus[i] = t[i] + lamnew*Qd[i];
765:         fv      += xplus[i] * (0.5*tplus[i] + f[i]);
766:       }
767:     }

769:     for (i = 0; i < dim; i++){
770:       sk[i] = xplus[i] - x[i];
771:       yk[i] = tplus[i] - t[i];
772:       x[i]  = xplus[i];
773:       t[i]  = tplus[i];
774:       g[i]  = t[i] + f[i];
775:     }

777:     /* update the line search control parameters */
778:     if (fv < fbest){
779:       fbest = fv;
780:       fc    = fv;
781:       llast = 0;
782:     } else {
783:       fc = (fc > fv ? fc : fv);
784:       llast++;
785:       if (llast == L){
786:         fr    = fc;
787:         fc    = fv;
788:         llast = 0;
789:       }
790:     }

792:     ak = bk = 0.0;
793:     for (i = 0; i < dim; i++){
794:       ak += sk[i] * sk[i];
795:       bk += sk[i] * yk[i];
796:     }

798:     if (bk <= EPS*ak) alpha = ALPHA_MAX;
799:     else {
800:       if (bkold < EPS*akold) alpha = ak/bk;
801:       else alpha = (akold+ak)/(bkold+bk);

803:       if (alpha > ALPHA_MAX) alpha = ALPHA_MAX;
804:       else if (alpha < ALPHA_MIN) alpha = ALPHA_MIN;
805:     }

807:     akold = ak;
808:     bkold = bk;

810:     /*** stopping criterion based on KKT conditions ***/
811:     /* at optimal, gradient of lagrangian w.r.t. x is zero */

813:     bk = 0.0;
814:     for (i = 0; i < dim; i++) bk +=  x[i] * x[i];

816:     if (PetscSqrtReal(ak) < tol*10 * PetscSqrtReal(bk)){
817:       it     = 0;
818:       luv    = 0;
819:       kktlam = 0.0;
820:       for (i = 0; i < dim; i++){
821:         /* x[i] is active hence lagrange multipliers for box constraints
822:                 are zero. The lagrange multiplier for ineq. const. is then
823:                 defined as below
824:         */
825:         if ((x[i] > DELTAsv) && (x[i] < c-DELTAsv)){
826:           ipt[it++] = i;
827:           kktlam    = kktlam - a[i]*g[i];
828:         } else  uv[luv++] = i;
829:       }

831:       if (it == 0 && PetscSqrtReal(ak) < tol*0.5 * PetscSqrtReal(bk)) return 0;
832:       else {
833:         kktlam = kktlam/it;
834:         info   = 1;
835:         for (i = 0; i < it; i++) {
836:           if (PetscAbsReal(a[ipt[i]] * g[ipt[i]] + kktlam) > tol) {
837:             info = 0;
838:             break;
839:           }
840:         }
841:         if (info == 1)  {
842:           for (i = 0; i < luv; i++)  {
843:             if (x[uv[i]] <= DELTAsv){
844:               /* x[i] == lower bound, hence, lagrange multiplier (say, beta) for lower bound may
845:                      not be zero. So, the gradient without beta is > 0
846:               */
847:               if (g[uv[i]] + kktlam*a[uv[i]] < -tol){
848:                 info = 0;
849:                 break;
850:               }
851:             } else {
852:               /* x[i] == upper bound, hence, lagrange multiplier (say, eta) for upper bound may
853:                      not be zero. So, the gradient without eta is < 0
854:               */
855:               if (g[uv[i]] + kktlam*a[uv[i]] > tol) {
856:                 info = 0;
857:                 break;
858:               }
859:             }
860:           }
861:         }

863:         if (info == 1) return 0;
864:       }
865:     }
866:   }
867:   return 0;
868: }