CouenneTNLP.cpp
Go to the documentation of this file.
1 /* $Id: CouenneTNLP.cpp 1147 2015-05-04 14:01:51Z stefan $
2  *
3  * Name: CouenneTNLP.cpp
4  * Authors: Pietro Belotti
5  * Purpose: Implementation of an NLP interface with gradient/Jacobian/etc
6  *
7  * This file is licensed under the Eclipse Public License (EPL)
8  */
9 
10 #include "IpTNLP.hpp"
11 #include "IpIpoptApplication.hpp"
12 
13 #include "CouenneSparseMatrix.hpp"
14 #include "CouenneProblem.hpp"
15 #include "CouenneProblemElem.hpp"
16 #include "CouenneExprVar.hpp"
17 #include "CouenneExprJac.hpp"
18 #include "CouenneExprHess.hpp"
19 #include "CouenneTNLP.hpp"
20 
21 #include <stdio.h>
22 
23 #include "CoinHelperFunctions.hpp"
24 #include "CoinFinite.hpp"
25 
26 //#define DEBUG
27 
28 using namespace Ipopt;
29 using namespace Couenne;
30 
32 CouenneTNLP::CouenneTNLP ():
33 
34  problem_ (NULL),
35  sol0_ (NULL),
36  sol_ (NULL),
37  HLa_ (NULL),
38 
39  optHessian_ (NULL),
40  saveOptHessian_ (false) {}
41 
42 
45 
46  if (sol0_) delete [] sol0_;
47  if (sol_) delete [] sol_;
48  if (HLa_) delete HLa_;
49  if (optHessian_) delete optHessian_;
50 
51  for (std::vector <std::pair <int, expression *> >::iterator i = gradient_. begin ();
52  i != gradient_. end (); ++i)
53  delete (*i). second;
54 }
55 
57 int PSDize (int n, double *A, double *B, bool doSqrRoot);
58 
59 
62 
63  problem_ (p),
64  sol0_ (NULL),
65  sol_ (NULL),
66  bestZ_ (COIN_DBL_MAX),
67  Jac_ (p),
68  HLa_ (new ExprHess (p)),
69  optHessian_ (NULL),
70  saveOptHessian_ (false) {
71 
72  std::set <int> objDep;
73 
74  expression *obj = problem_ -> Obj (0) -> Body ();
75 
76  // objective of entering problem is reformulated, no need to go
77  // further
78  obj -> DepList (objDep, STOP_AT_AUX);
79 
80  for (std::set <int>::iterator i = objDep.begin (); i != objDep. end (); ++i) {
81 
82  expression *gradcomp = obj -> differentiate (*i);
83  gradcomp -> realign (problem_);
84  gradient_ . push_back (std::pair <int, expression *> (*i, gradcomp));
85  }
86 
87  // create data structures for nonlinear variables (see
88  // get_[number|list]_of_nonlinear_variables () below)
89 
90  // constraints
91 
92  for (int i = 0; i < problem_ -> nCons (); i++) {
93 
94  expression *e = problem_ -> Con (i) -> Body ();
95 
96  // if constraint is single variable, don't treat it as constraint
97  // but rather as variable bound
98 
99  if (e -> Type () == AUX ||
100  e -> Type () == VAR ||
101  e -> Linearity () <= LINEAR)
102  continue;
103 
104  // constraint is nonlinear, get all variables its left-hand side
105  // depends on and make them nonlinear
106 
107  e -> DepList (nonLinVars_, STOP_AT_AUX);
108  }
109 
110  // auxiliaries
111 
112  for (int i = 0; i < problem_ -> nVars (); i++) {
113 
114  exprVar *e = problem_ -> Var (i);
115 
116  if ((e -> Type () != AUX) ||
117  (e -> Multiplicity () <= 0) ||
118  (e -> Linearity () <= LINEAR))
119  continue;
120 
121  e -> Image () -> DepList (nonLinVars_, STOP_AT_AUX);
122  }
123 }
124 
125 
128 {operator= (rhs);}
129 
130 
133 
134  problem_ = rhs.problem_;
135 
136  sol0_ = rhs.sol0_ && problem_ ? CoinCopyOfArray (rhs.sol0_, problem_ -> nVars ()) : NULL;
137  sol_ = rhs.sol_ && problem_ ? CoinCopyOfArray (rhs.sol_, problem_ -> nVars ()) : NULL;
138 
139  bestZ_ = rhs.bestZ_;
140  gradient_ = rhs.gradient_;
141  nonLinVars_ = rhs.nonLinVars_;
142 
143  Jac_ = rhs.Jac_;
144  HLa_ = rhs.HLa_ ? new ExprHess (*(rhs.HLa_)) : NULL;
145 
146  optHessian_ = rhs.optHessian_ ? new CouenneSparseMatrix (*(rhs.optHessian_)) : NULL;
148 
149  return *this;
150 }
151 
154 {return new CouenneTNLP (*this);}
155 
156 
157 // overload this method to return the number of variables and
158 // constraints, and the number of non-zeros in the jacobian and the
159 // hessian. The index_style parameter lets you specify C or Fortran
160 // style indexing for the sparse matrix iRow and jCol parameters.
161 // C_STYLE is 0-based, and FORTRAN_STYLE is 1-based.
163  Index& m,
164  Index& nnz_jac_g,
165  Index& nnz_h_lag,
166  IndexStyleEnum& index_style) {
167  n = problem_ -> nVars ();
168  m = Jac_. nRows ();
169 
170  nnz_jac_g = Jac_ . nnz ();
171  nnz_h_lag = HLa_ -> nnz ();
172 
173  index_style = C_STYLE; // what else? ;-)
174 
175  return true;
176 }
177 
178 
180 void CouenneTNLP::setInitSol (const double *sol) {
181 
182  if (sol) {
183  if (!sol0_)
184  sol0_ = new CouNumber [problem_ -> nVars ()];
185  CoinCopyN (sol, problem_ -> nVars (), sol0_);
186  }
187 }
188 
189 // overload this method to return the information about the bound on
190 // the variables and constraints. The value that indicates that a
191 // bound does not exist is specified in the parameters
192 // nlp_lower_bound_inf and nlp_upper_bound_inf. By default,
193 // nlp_lower_bound_inf is -1e19 and nlp_upper_bound_inf is 1e19. (see
194 // TNLPAdapter)
195 bool CouenneTNLP::get_bounds_info (Index n, Number* x_l, Number* x_u,
196  Index m, Number* g_l, Number* g_u) {
197 
198  // constraints
199 
200 #ifdef DEBUG
201  printf ("get_bounds_info on %d cons, %d vars\n", m, n);
202 #endif
203 
204  for (int i = 0; i < problem_ -> nCons (); i++) {
205 
206  CouenneConstraint *c = problem_ -> Con (i);
207 
208  if (c -> Body () -> Type () == AUX ||
209  c -> Body () -> Type () == VAR)
210  continue;
211 
212  CouNumber
213  clb = (*c -> Lb ()) (),
214  cub = (*c -> Ub ()) ();
215 
216  // prevent ipopt from exiting on inconsistent bounds
217  if (clb <= cub) {*g_l++ = clb; *g_u++ = cub;}
218  else {*g_l++ = cub; *g_u++ = clb;}
219  }
220 
221  // auxiliaries
222 
223  for (int i = 0; i < problem_ -> nVars (); i++) {
224 
225  exprVar *e = problem_ -> Var (i);
226 
227  if (e -> Multiplicity () <= 0)
228  *x_l++ = *x_u++ = 0.;
229  else {
230 
231  CouNumber
232  lb = e -> lb (),
233  ub = e -> ub ();
234 
235  // prevent ipopt from exiting on inconsistent bounds
236  if (lb <= ub) {*x_l++ = lb; *x_u++ = ub;}
237  else {*x_l++ = ub; *x_u++ = lb;}
238  }
239 
240  if ((e -> Type () != AUX) ||
241  (e -> Multiplicity () <= 0))
242  continue;
243 
244  *g_l = (e -> sign () != expression::AUX_GEQ) ? 0. : -COIN_DBL_MAX;
245  *g_u = (e -> sign () != expression::AUX_LEQ) ? 0. : COIN_DBL_MAX;
246 
247  ++g_l;
248  ++g_u;
249  }
250 
251  return true;
252 }
253 
254 
255 // overload this method to return the variables linearity
256 // (TNLP::LINEAR or TNLP::NON_LINEAR). The var_types array should be
257 // allocated with length at least n. (default implementation just
258 // returns false and does not fill the array).
259 bool CouenneTNLP::get_variables_linearity (Index n, Ipopt::TNLP::LinearityType* var_types) {
260 
261  CoinFillN (var_types, n, Ipopt::TNLP::LINEAR);
262 
263  for (std::set <int>:: iterator i = nonLinVars_. begin (); i != nonLinVars_. end (); ++i)
264  var_types [*i] = Ipopt::TNLP::NON_LINEAR;
265 
266  return true;
267 }
268 
269 // overload this method to return the constraint linearity. array
270 // should be alocated with length at least n. (default implementation
271 // just returns false and does not fill the array).
272 bool CouenneTNLP::get_constraints_linearity (Index m, Ipopt::TNLP::LinearityType* const_types) {
273 
274  // constraints
275 
276  for (int i = 0; i < problem_ -> nCons (); i++) {
277 
278  expression *b = problem_ -> Con (i) -> Body ();
279 
280  if (b -> Type () == AUX ||
281  b -> Type () == VAR)
282  continue;
283 
284  *const_types++ =
285  (b -> Linearity () > LINEAR) ?
286  Ipopt::TNLP::NON_LINEAR :
288  }
289 
290  // auxiliaries
291 
292  for (int i = 0; i < problem_ -> nVars (); i++) {
293 
294  exprVar *e = problem_ -> Var (i);
295 
296  if ((e -> Type () != AUX) ||
297  (e -> Multiplicity () <= 0))
298  continue;
299 
300  *const_types++ =
301  (e -> Image () -> Linearity () > LINEAR) ?
302  Ipopt::TNLP::NON_LINEAR :
304  }
305 
306  return true;
307 }
308 
309 
310 // overload this method to return the starting point. The bool
311 // variables indicate whether the algorithm wants you to initialize x,
312 // z_L/z_u, and lambda, respectively. If, for some reason, the
313 // algorithm wants you to initialize these and you cannot, return
314 // false, which will cause Ipopt to stop. You will have to run Ipopt
315 // with different options then.
317  bool init_x, Number* x,
318  bool init_z, Number* z_L, Number* z_U,
319  Index m,
320  bool init_lambda, Number* lambda) {
321  if (init_x)
322  CoinCopyN (sol0_, n, x);
323 
324  assert (!init_z); // can't initialize bound multipliers
325  assert (!init_lambda); // can't initialize Lagrangian multipliers
326 
327  return true;
328 }
329 
330 
331 // overload this method to return the value of the objective function
332 bool CouenneTNLP::eval_f (Index n, const Number* x, bool new_x,
333  Number& obj_value) {
334  if (new_x)
335  CoinCopyN (x, n, problem_ -> X ()); // can't push domain as we
336  // don't know when to pop
337 
338  obj_value = (*(problem_ -> Obj (0) -> Body ())) ();
339  return true;
340 }
341 
342 
343 // overload this method to return the vector of the gradient of
344 // the objective w.r.t. x
345 bool CouenneTNLP::eval_grad_f (Index n, const Number* x, bool new_x,
346  Number* grad_f) {
347 
348 #ifdef DEBUG
349  printf ("eval_grad_f: [");
350  for (int i=0; i<n; i++)
351  printf ("%.2g ", x [i]);
352  printf ("] --> [");
353 #endif
354 
355  if (new_x)
356  CoinCopyN (x, n, problem_ -> X ()); // can't push domain as we
357  // don't know when to pop
358 
359  CoinFillN (grad_f, n, 0.);
360 
361  for (std::vector <std::pair <int, expression *> >::iterator i = gradient_. begin ();
362  i != gradient_. end (); ++i)
363  grad_f [i -> first] = (*(i -> second)) ();
364 
365 #ifdef DEBUG
366  for (int i=0; i<n; i++)
367  printf ("%.2g ", grad_f [i]);
368  printf ("]\n");
369 #endif
370 
371  return true;
372 }
373 
374 
375 // overload this method to return the vector of constraint values
376 bool CouenneTNLP::eval_g (Index n, const Number* x, bool new_x,
377  Index m, Number* g) {
378 
379  if (new_x)
380  CoinCopyN (x, n, problem_ -> X ()); // can't push domain as we
381  // don't know when to pop
382 
383 #ifdef DEBUG
384  if (x) {
385  printf ("eval_g: [");
386  for (int i=0; i<n; i++)
387  printf ("%.2g ", x [i]);
388  printf ("] --> [");
389  }
390 #endif
391 
392  int nEntries = 0; // FIXME: needs to go
393 
394  for (int i = 0; i < problem_ -> nCons (); i++) {
395 
396  expression *b = problem_ -> Con (i) -> Body ();
397 
398  if (b -> Type () == AUX ||
399  b -> Type () == VAR)
400  continue;
401 
402  nEntries ++;
403 
404  *g++ = (*b) (); // this element of g is the evaluation of the constraint
405  }
406 
407  // auxiliaries
408 
409  assert (n == problem_ -> nVars ());
410 
411  for (int i = 0; i < problem_ -> nVars (); i++) {
412 
413  exprVar *e = problem_ -> Var (i);
414 
415  if ((e -> Type () != AUX) ||
416  (e -> Multiplicity () <= 0))
417  continue;
418 
419  *g++ = (*(e -> Image ())) () - (*e) ();
420 
421  nEntries ++;
422  }
423 
424 #ifdef DEBUG
425  if (x) {
426  for (int i=0; i<nEntries; i++)
427  printf ("%.2g ", *(g - nEntries + i));
428  printf ("]\n");
429  }
430 #endif
431 
432  return true;
433 }
434 
435 
436 // overload this method to return the jacobian of the constraints. The
437 // vectors iRow and jCol only need to be set once. The first call is
438 // used to set the structure only (iRow and jCol will be non-NULL, and
439 // values will be NULL) For subsequent calls, iRow and jCol will be
440 // NULL.
441 bool CouenneTNLP::eval_jac_g (Index n, const Number* x, bool new_x,
442  Index m, Index nele_jac, Index* iRow,
443  Index *jCol, Number* values) {
444  if (new_x)
445  CoinCopyN (x, n, problem_ -> X ()); // can't push domain as we
446  // don't know when to pop
447 
448 #ifdef DEBUG
449  if (x) {
450  printf ("eval_jac_g: ["); fflush (stdout);
451  for (int i=0; i<n; i++)
452  {printf ("%.2g ", x [i]); fflush (stdout);}
453  printf ("] --> ["); fflush (stdout);
454  }
455 #endif
456 
457  if (values == NULL &&
458  iRow != NULL &&
459  jCol != NULL) {
460 
461  // initialization of the Jacobian's structure. This has been
462  // already prepared by the constructor, so simply copy it
463 
464  CoinCopyN (Jac_.iRow (), nele_jac, iRow);
465  CoinCopyN (Jac_.jCol (), nele_jac, jCol);
466 
467  } else {
468 
469  // fill in Jacobian's values. Evaluate each member using the
470  // domain modified above by the new value of x
471 
472  register expression **e = Jac_. expr ();
473 
474  for (register int i=nele_jac; i--;)
475  *values++ = (**(e++)) ();
476  }
477 
478 #ifdef DEBUG
479  if (values) {
480  for (int i=0; i<nele_jac; i++)
481  {printf ("%.2g ", *(values - nele_jac + i)); fflush (stdout);}
482  printf ("]\n");
483  } else printf ("empty\n");
484 #endif
485 
486  return true;
487 }
488 
489 
490 // Overload this method to return the hessian of the lagrangian. The
491 // vectors iRow and jCol only need to be set once (during the first
492 // call). The first call is used to set the structure only (iRow and
493 // jCol will be non-NULL, and values will be NULL) For subsequent
494 // calls, iRow and jCol will be NULL.
495 //
496 // This matrix is symmetric - specify the lower diagonal only.
497 //
498 // A default implementation is provided, in case the user wants to use
499 // quasi-Newton approximations to estimate the second derivatives and
500 // doesn't need to implement this method.
501 bool CouenneTNLP::eval_h (Index n, const Number* x, bool new_x, Number obj_factor,
502  Index m, const Number* lambda, bool new_lambda,
503  Index nele_hess,
504  Index* iRow, Index* jCol, Number* values) {
505 
506  if (new_x)
507  CoinCopyN (x, n, problem_ -> X ()); // can't push domain as we
508  // don't know when to pop
509 
510 #ifdef DEBUG
511  if (x) {
512  printf ("eval_h: ["); fflush (stdout);
513  for (int i=0; i<n; i++)
514  {printf ("%.2g ", x [i]); fflush (stdout);}
515  printf ("], lambda: ["); fflush (stdout);
516  for (int i=0; i<m; i++)
517  {printf ("%.2g ", lambda [i]); fflush (stdout);}
518  printf ("] --> ["); fflush (stdout);
519  }
520 #endif
521 
522  if (values == NULL &&
523  iRow != NULL &&
524  jCol != NULL) {
525 
527 
528  CoinCopyN (HLa_ -> iRow (), nele_hess, iRow);
529  CoinCopyN (HLa_ -> jCol (), nele_hess, jCol);
530 
531  } else {
532 
535 
536  CoinZeroN (values, nele_hess);
537 
538  for (int i=0; i<nele_hess; i++, values++) {
539 
540  int
541  numL = HLa_ -> numL () [i],
542  *lamI = HLa_ -> lamI () [i];
543 
544  expression
545  **expr = HLa_ -> expr () [i];
546 
547 #ifdef DEBUG
548  printf ("[%d %d] %d lambdas: ", HLa_ -> iRow () [i], HLa_ -> jCol () [i], numL); fflush (stdout);
549  for (int k=0; k<numL; k++) {
550  printf ("%d ", lamI [k]);
551  fflush (stdout);
552  expr [k] -> print ();
553  printf ("\n");
554  }
555 #endif
556 
557  // the objective's part of the Hessian can only have level index 0, avoid check
558 
559  if (0 == *lamI) {*values += obj_factor * (*(*expr++)) (); --numL; ++lamI;}
560  while (numL--) *values += lambda [*lamI++ - 1] * (*(*expr++)) ();
561  }
562  }
563 
564 
565 #ifdef DEBUG
566  if (values) {
567  for (int i=0; i<nele_hess; i++)
568  {printf ("%.2g ", *(values - nele_hess + i)); fflush (stdout);}
569  printf ("]\n");
570  } else printf ("empty\n");
571 #endif
572 
573  return true;
574 }
575 
576 // Change objective function and modify gradient expressions
577 // accordingly
579 
580  if (HLa_)
581  delete HLa_;
582 
583  // change the Hessian accordingly
584 
585  HLa_ = new ExprHess (problem_);
586 
587  std::set <int> objDep;
588 
589  // objective of entering problem is reformulated, no need to go
590  // further
591  newObj -> DepList (objDep, STOP_AT_AUX);
592 
593  for (std::vector <std::pair <int, expression *> >::iterator i = gradient_. begin ();
594  i != gradient_. end (); ++i)
595  delete (*i). second;
596 
597  gradient_ . erase (gradient_ . begin (), gradient_ . end ());
598 
599  for (std::set <int>::iterator i = objDep.begin (); i != objDep. end (); ++i) {
600 
601  expression *gradcomp = Simplified (newObj -> differentiate (*i));
602  //*gsimp = gradcomp -> simplify ();
603 
604  // if (gsimp) {
605  // delete gradcomp;
606  // gradcomp = gsimp;
607  // }
608 
609  gradcomp -> realign (problem_);
610  gradient_ . push_back (std::pair <int, expression *> (*i, gradcomp));
611  }
612 }
613 
614 // This method is called when the algorithm is complete so the TNLP
615 // can store/write the solution
616 void CouenneTNLP::finalize_solution (SolverReturn status,
617  Index n, const Number* x, const Number* z_L, const Number* z_U,
618  Index m, const Number* g, const Number* lambda,
619  Number obj_value,
620  const IpoptData* ip_data,
621  IpoptCalculatedQuantities* ip_cq) {
622 
623  //printf ("Ipopt[FP] solution (card %d): %12e\n", n, obj_value);
624 
625  bestZ_ = obj_value;
626 
627  if (sol_) CoinCopyN (x, n, sol_);
628  else sol_ = CoinCopyOfArray (x, n);
629 
630  // if a save-flag was set, save this solution's lagrangian hessian
631  // for later use by the FP
632 
633  if (!saveOptHessian_)
634  return;
635 
636  {
637  if (!optHessian_)
639 
640  problem_ -> domain () -> push (n, x, problem_ -> domain () -> current () -> lb (),
641  problem_ -> domain () -> current () -> ub ());
642  int nnz = HLa_ -> nnz ();
643 
644  // resize them to full size (and realloc them to optHessianNum_ later)
645 
646  double *&optHessianVal = optHessian_ -> val ();
647  int *&optHessianRow = optHessian_ -> row ();
648  int *&optHessianCol = optHessian_ -> col ();
649 
650  int &optHessianNum = optHessian_ -> num ();
651 
652  optHessianVal = (double *) realloc (optHessianVal, nnz * sizeof (double));
653  optHessianRow = (int *) realloc (optHessianRow, nnz * sizeof (int));
654  optHessianCol = (int *) realloc (optHessianCol, nnz * sizeof (int));
655 
656  optHessianNum = 0;
657 
658  for (int i=0; i < HLa_ -> nnz (); ++i) {
659 
660  double hessMember = 0.;
661  expression **elist = HLa_ -> expr () [i];
662 
663  for (int j=0; j < HLa_ -> numL () [i]; ++j) {
664 
665  int indLam = HLa_ -> lamI () [i][j];
666 
667  hessMember += (indLam == 0) ?
668  (*(elist [j])) () : // this is the objective
669  (*(elist [j])) () * lambda [indLam-1]; // this is a constraint
670  }
671 
672  if (fabs (hessMember) > COUENNE_EPS) {
673 
674  // printf ("saving: %d, %d --> %g\n",
675  // HLa_ -> iRow () [i],
676  // HLa_ -> jCol () [i], hessMember);
677 
678  optHessianVal [optHessianNum] = hessMember;
679  optHessianRow [optHessianNum] = HLa_ -> iRow () [i];
680  optHessianCol [optHessianNum++] = HLa_ -> jCol () [i];
681  }
682  }
683 
684  double *H = new double [n*n];
685  CoinZeroN (H, n*n);
686 
687  double *H_PSD = new double [n*n];
688 
689  for (int i=0; i < optHessianNum; ++i)
690  H [*optHessianRow++ * n + *optHessianCol++] = *optHessianVal++;
691 
692  optHessianRow -= optHessianNum;
693  optHessianCol -= optHessianNum;
694  optHessianVal -= optHessianNum;
695 
696  // transform matrix into a PSD one by eliminating the contribution
697  // of negative eigenvalues, return number of nonzeros
698  optHessianNum = PSDize (n, H, H_PSD, false);
699 
700  optHessianVal = (double *) realloc (optHessianVal, optHessianNum * sizeof (double));
701  optHessianRow = (int *) realloc (optHessianRow, optHessianNum * sizeof (int));
702  optHessianCol = (int *) realloc (optHessianCol, optHessianNum * sizeof (int));
703 
704  nnz = 0;
705  double val;
706 
707  for (int i=0; i<n; ++i)
708  for (int j=0; j<n; ++j)
709  if (fabs (val = *H_PSD++) > COUENNE_EPS) {
710  *optHessianRow++ = i;
711  *optHessianCol++ = j;
712  *optHessianVal++ = val;
713  ++nnz;
714  }
715 
716  H_PSD -= n*n;
717  optHessianNum = nnz;
718 
719  optHessianRow -= nnz;
720  optHessianCol -= nnz;
721  optHessianVal -= nnz;
722 
723  problem_ -> domain () -> pop ();
724 
725  delete [] H;
726  delete [] H_PSD;
727  }
728 }
729 
730 
731 // Intermediate Callback method for the user. Providing dummy default
732 // implementation. For details see IntermediateCallBack in IpNLP.hpp.
733 bool CouenneTNLP::intermediate_callback (AlgorithmMode mode,
734  Index iter, Number obj_value,
735  Number inf_pr, Number inf_du,
736  Number mu, Number d_norm,
737  Number regularization_size,
738  Number alpha_du, Number alpha_pr,
739  Index ls_trials,
740  const IpoptData* ip_data,
741  IpoptCalculatedQuantities* ip_cq) {
742 
743  //printf ("Ipopt FP: iter %4d obj %12e %12e %12e\n", iter, obj_value, inf_pr, inf_du);
744  return true;
745 }
746 
747 
748 // Methods for quasi-Newton approximation. If the second derivatives
749 // are approximated by Ipopt, it is better to do this only in the
750 // space of nonlinear variables. The following methods are called by
751 // Ipopt if the quasi-Newton approximation is selected. If -1 is
752 // returned as number of nonlinear variables, Ipopt assumes that all
753 // variables are nonlinear.
755 {return nonLinVars_. size ();}
756 
757 
758 // Otherwise, it calls get_list_of_nonlinear_variables with an array
759 // into which the indices of the nonlinear variables should be written
760 // - the array has the lengths num_nonlin_vars, which is identical
761 // with the return value of get_number_of_nonlinear_variables(). It
762 // is assumed that the indices are counted starting with 1 in the
763 // FORTRAN_STYLE, and 0 for the C_STYLE.
765  Index* pos_nonlin_vars) {
766 
767  for (std::set <int>:: iterator i = nonLinVars_. begin (); i != nonLinVars_. end (); ++i)
768  *pos_nonlin_vars++ = *i;
769 
770  return true;
771 }
double * values
CouNumber * sol_
Optimal solution.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
return the starting point.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
return the jacobian of the constraints.
expression matrices.
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
virtual ~CouenneTNLP()
Destructor.
Definition: CouenneTNLP.cpp:44
ExprHess * HLa_
Hessian — there are 1+m of them, but all are squeezed in a single object.
virtual Ipopt::Index get_number_of_nonlinear_variables()
Pointer to the object containing all info.
ExprJac Jac_
Jacobian.
CouenneTNLP & operator=(const CouenneTNLP &rhs)
Assignment.
CouenneProblem * problem_
Pointer to the object containing all info.
CouenneTNLP()
Empty constructor.
Definition: CouenneTNLP.cpp:32
CouenneSparseMatrix * optHessian_
Stores the values of the Hessian of the Lagrangian at optimum for later use.
Class for sparse Matrixs (used in modifying distances in FP)
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
return the value of the objective function
static char * j
Definition: OSdtoa.cpp:3622
CouNumber * sol0_
Initial solution.
std::vector< std::pair< int, expression * > > gradient_
expression gradient (packed sparse vector)
void fint fint fint real fint real real real real real real real real real * e
Class to represent nonlinear constraints.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
return the vector of the gradient of the objective w.r.t. x
CouNumber bestZ_
Value of the optimal solution.
virtual bool get_variables_linearity(Ipopt::Index n, Ipopt::TNLP::LinearityType *var_types)
return the variables linearity (TNLP::Linear or TNLP::NonLinear).
fint end
Class for MINLP problems with symbolic information.
void fint fint * k
#define COUENNE_EPS
int * jCol() const
virtual void setObjective(expression *newObj)
Change objective function and modify gradient expressions accordingly.
virtual bool intermediate_callback(Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value, Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu, Ipopt::Number d_norm, Ipopt::Number regularization_size, Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
Intermediate Callback method for the user.
double CouNumber
main number type in Couenne
bool saveOptHessian_
Flag to be set to save this solution&#39;s Lagrangian Hessian in above structure.
expression * Simplified(expression *complicated)
Macro to return already simplified expression without having to do the if part every time simplify ()...
virtual bool get_list_of_nonlinear_variables(Ipopt::Index num_nonlin_vars, Ipopt::Index *pos_nonlin_vars)
get real list
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
return the vector of constraint values
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, enum Ipopt::TNLP::IndexStyleEnum &index_style)
return the number of variables and constraints, and the number of non-zeros in the jacobian and the h...
CouenneTNLP * clone()
Clone.
variable-type operator
void fint fint fint real fint real real real real real real * g
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
return the information about the bound on the variables and constraints.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
return the hessian of the lagrangian.
virtual bool get_constraints_linearity(Ipopt::Index m, Ipopt::TNLP::LinearityType *const_types)
return the constraint linearity.
Expression base class.
Class for handling NLPs using CouenneProblem.
Definition: CouenneTNLP.hpp:27
void fint * m
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
int PSDize(int n, double *A, double *B, bool doSqrRoot)
project matrix onto the cone of positive semidefinite matrices (possibly take square root of eigenval...
int * iRow() const
std::set< int > nonLinVars_
list of nonlinear variables
void fint * n
return b
Definition: OSdtoa.cpp:1719
real c
void setInitSol(const double *sol)
set initial solution
void fint fint fint real fint real * x