11 #include "IpIpoptApplication.hpp"
23 #include "CoinHelperFunctions.hpp"
24 #include "CoinFinite.hpp"
28 using namespace Ipopt;
29 using namespace Couenne;
32 CouenneTNLP::CouenneTNLP ():
40 saveOptHessian_ (false) {}
51 for (std::vector <std::pair <int, expression *> >::iterator i =
gradient_. begin ();
57 int PSDize (
int n,
double *A,
double *B,
bool doSqrRoot);
66 bestZ_ (COIN_DBL_MAX),
70 saveOptHessian_ (false) {
72 std::set <int> objDep;
80 for (std::set <int>::iterator i = objDep.begin (); i != objDep.
end (); ++i) {
82 expression *gradcomp = obj -> differentiate (*i);
84 gradient_ . push_back (std::pair <int, expression *> (*i, gradcomp));
92 for (
int i = 0; i <
problem_ -> nCons (); i++) {
99 if (e -> Type () ==
AUX ||
100 e -> Type () ==
VAR ||
101 e -> Linearity () <=
LINEAR)
112 for (
int i = 0; i <
problem_ -> nVars (); i++) {
116 if ((e -> Type () !=
AUX) ||
117 (e -> Multiplicity () <= 0) ||
118 (e -> Linearity () <=
LINEAR))
166 IndexStyleEnum& index_style) {
173 index_style = C_STYLE;
196 Index
m, Number* g_l, Number* g_u) {
201 printf (
"get_bounds_info on %d cons, %d vars\n", m, n);
204 for (
int i = 0; i <
problem_ -> nCons (); i++) {
208 if (c -> Body () -> Type () ==
AUX ||
209 c -> Body () -> Type () ==
VAR)
213 clb = (*c -> Lb ()) (),
214 cub = (*c -> Ub ()) ();
217 if (clb <= cub) {*g_l++ = clb; *g_u++ = cub;}
218 else {*g_l++ = cub; *g_u++ = clb;}
223 for (
int i = 0; i <
problem_ -> nVars (); i++) {
227 if (e -> Multiplicity () <= 0)
228 *x_l++ = *x_u++ = 0.;
236 if (lb <= ub) {*x_l++ = lb; *x_u++ = ub;}
237 else {*x_l++ = ub; *x_u++ = lb;}
240 if ((e -> Type () !=
AUX) ||
241 (e -> Multiplicity () <= 0))
264 var_types [*i] = Ipopt::TNLP::NON_LINEAR;
276 for (
int i = 0; i <
problem_ -> nCons (); i++) {
280 if (b -> Type () ==
AUX ||
285 (b -> Linearity () >
LINEAR) ?
286 Ipopt::TNLP::NON_LINEAR :
292 for (
int i = 0; i <
problem_ -> nVars (); i++) {
296 if ((e -> Type () !=
AUX) ||
297 (e -> Multiplicity () <= 0))
301 (e -> Image () -> Linearity () >
LINEAR) ?
302 Ipopt::TNLP::NON_LINEAR :
317 bool init_x, Number*
x,
318 bool init_z, Number* z_L, Number* z_U,
320 bool init_lambda, Number* lambda) {
322 CoinCopyN (
sol0_, n, x);
325 assert (!init_lambda);
338 obj_value = (*(
problem_ -> Obj (0) -> Body ())) ();
349 printf (
"eval_grad_f: [");
350 for (
int i=0; i<
n; i++)
351 printf (
"%.2g ", x [i]);
359 CoinFillN (grad_f, n, 0.);
361 for (std::vector <std::pair <int, expression *> >::iterator i =
gradient_. begin ();
363 grad_f [i -> first] = (*(i -> second)) ();
366 for (
int i=0; i<
n; i++)
367 printf (
"%.2g ", grad_f [i]);
377 Index
m, Number*
g) {
385 printf (
"eval_g: [");
386 for (
int i=0; i<
n; i++)
387 printf (
"%.2g ", x [i]);
394 for (
int i = 0; i <
problem_ -> nCons (); i++) {
398 if (b -> Type () ==
AUX ||
411 for (
int i = 0; i <
problem_ -> nVars (); i++) {
415 if ((e -> Type () !=
AUX) ||
416 (e -> Multiplicity () <= 0))
419 *g++ = (*(e -> Image ())) () - (*e) ();
426 for (
int i=0; i<nEntries; i++)
427 printf (
"%.2g ", *(g - nEntries + i));
442 Index
m, Index nele_jac, Index* iRow,
443 Index *jCol, Number*
values) {
450 printf (
"eval_jac_g: ["); fflush (stdout);
451 for (
int i=0; i<
n; i++)
452 {printf (
"%.2g ", x [i]); fflush (stdout);}
453 printf (
"] --> ["); fflush (stdout);
457 if (values == NULL &&
464 CoinCopyN (
Jac_.
iRow (), nele_jac, iRow);
465 CoinCopyN (
Jac_.
jCol (), nele_jac, jCol);
474 for (
register int i=nele_jac; i--;)
475 *values++ = (**(e++)) ();
480 for (
int i=0; i<nele_jac; i++)
481 {printf (
"%.2g ", *(values - nele_jac + i)); fflush (stdout);}
483 }
else printf (
"empty\n");
502 Index
m,
const Number* lambda,
bool new_lambda,
504 Index* iRow, Index* jCol, Number*
values) {
512 printf (
"eval_h: ["); fflush (stdout);
513 for (
int i=0; i<
n; i++)
514 {printf (
"%.2g ", x [i]); fflush (stdout);}
515 printf (
"], lambda: ["); fflush (stdout);
516 for (
int i=0; i<
m; i++)
517 {printf (
"%.2g ", lambda [i]); fflush (stdout);}
518 printf (
"] --> ["); fflush (stdout);
522 if (values == NULL &&
528 CoinCopyN (
HLa_ -> iRow (), nele_hess, iRow);
529 CoinCopyN (
HLa_ -> jCol (), nele_hess, jCol);
536 CoinZeroN (values, nele_hess);
538 for (
int i=0; i<nele_hess; i++, values++) {
541 numL =
HLa_ -> numL () [i],
542 *lamI =
HLa_ -> lamI () [i];
545 **expr =
HLa_ -> expr () [i];
548 printf (
"[%d %d] %d lambdas: ",
HLa_ -> iRow () [i],
HLa_ -> jCol () [i], numL); fflush (stdout);
549 for (
int k=0;
k<numL;
k++) {
550 printf (
"%d ", lamI [
k]);
552 expr [
k] -> print ();
559 if (0 == *lamI) {*values += obj_factor * (*(*expr++)) (); --numL; ++lamI;}
560 while (numL--) *values += lambda [*lamI++ - 1] * (*(*expr++)) ();
567 for (
int i=0; i<nele_hess; i++)
568 {printf (
"%.2g ", *(values - nele_hess + i)); fflush (stdout);}
570 }
else printf (
"empty\n");
587 std::set <int> objDep;
593 for (std::vector <std::pair <int, expression *> >::iterator i =
gradient_. begin ();
599 for (std::set <int>::iterator i = objDep.begin (); i != objDep.
end (); ++i) {
610 gradient_ . push_back (std::pair <int, expression *> (*i, gradcomp));
617 Index
n,
const Number*
x,
const Number* z_L,
const Number* z_U,
618 Index
m,
const Number*
g,
const Number* lambda,
620 const IpoptData* ip_data,
621 IpoptCalculatedQuantities* ip_cq) {
628 else sol_ = CoinCopyOfArray (x, n);
640 problem_ -> domain () -> push (n, x,
problem_ -> domain () -> current () -> lb (),
641 problem_ -> domain () -> current () -> ub ());
652 optHessianVal = (
double *) realloc (optHessianVal, nnz *
sizeof (
double));
653 optHessianRow = (
int *) realloc (optHessianRow, nnz *
sizeof (
int));
654 optHessianCol = (
int *) realloc (optHessianCol, nnz *
sizeof (
int));
658 for (
int i=0; i <
HLa_ ->
nnz (); ++i) {
660 double hessMember = 0.;
663 for (
int j=0;
j <
HLa_ -> numL () [i]; ++
j) {
665 int indLam =
HLa_ -> lamI () [i][
j];
667 hessMember += (indLam == 0) ?
669 (*(elist [
j])) () * lambda [indLam-1];
678 optHessianVal [optHessianNum] = hessMember;
679 optHessianRow [optHessianNum] =
HLa_ -> iRow () [i];
680 optHessianCol [optHessianNum++] =
HLa_ -> jCol () [i];
684 double *H =
new double [n*
n];
687 double *H_PSD =
new double [n*
n];
689 for (
int i=0; i < optHessianNum; ++i)
690 H [*optHessianRow++ * n + *optHessianCol++] = *optHessianVal++;
692 optHessianRow -= optHessianNum;
693 optHessianCol -= optHessianNum;
694 optHessianVal -= optHessianNum;
698 optHessianNum =
PSDize (n, H, H_PSD,
false);
700 optHessianVal = (
double *) realloc (optHessianVal, optHessianNum *
sizeof (
double));
701 optHessianRow = (
int *) realloc (optHessianRow, optHessianNum *
sizeof (
int));
702 optHessianCol = (
int *) realloc (optHessianCol, optHessianNum *
sizeof (
int));
707 for (
int i=0; i<
n; ++i)
708 for (
int j=0;
j<
n; ++
j)
710 *optHessianRow++ = i;
711 *optHessianCol++ =
j;
712 *optHessianVal++ = val;
719 optHessianRow -=
nnz;
720 optHessianCol -=
nnz;
721 optHessianVal -=
nnz;
734 Index iter, Number obj_value,
735 Number inf_pr, Number inf_du,
736 Number mu, Number d_norm,
737 Number regularization_size,
738 Number alpha_du, Number alpha_pr,
740 const IpoptData* ip_data,
741 IpoptCalculatedQuantities* ip_cq) {
765 Index* pos_nonlin_vars) {
768 *pos_nonlin_vars++ = *i;
CouNumber * sol_
Optimal solution.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
return the starting point.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
return the jacobian of the constraints.
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
virtual ~CouenneTNLP()
Destructor.
ExprHess * HLa_
Hessian — there are 1+m of them, but all are squeezed in a single object.
virtual Ipopt::Index get_number_of_nonlinear_variables()
Pointer to the object containing all info.
CouenneTNLP & operator=(const CouenneTNLP &rhs)
Assignment.
CouenneProblem * problem_
Pointer to the object containing all info.
CouenneTNLP()
Empty constructor.
CouenneSparseMatrix * optHessian_
Stores the values of the Hessian of the Lagrangian at optimum for later use.
Class for sparse Matrixs (used in modifying distances in FP)
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
return the value of the objective function
CouNumber * sol0_
Initial solution.
std::vector< std::pair< int, expression * > > gradient_
expression gradient (packed sparse vector)
void fint fint fint real fint real real real real real real real real real * e
Class to represent nonlinear constraints.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
return the vector of the gradient of the objective w.r.t. x
CouNumber bestZ_
Value of the optimal solution.
virtual bool get_variables_linearity(Ipopt::Index n, Ipopt::TNLP::LinearityType *var_types)
return the variables linearity (TNLP::Linear or TNLP::NonLinear).
Class for MINLP problems with symbolic information.
virtual void setObjective(expression *newObj)
Change objective function and modify gradient expressions accordingly.
virtual bool intermediate_callback(Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value, Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu, Ipopt::Number d_norm, Ipopt::Number regularization_size, Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
Intermediate Callback method for the user.
double CouNumber
main number type in Couenne
bool saveOptHessian_
Flag to be set to save this solution's Lagrangian Hessian in above structure.
expression * Simplified(expression *complicated)
Macro to return already simplified expression without having to do the if part every time simplify ()...
virtual bool get_list_of_nonlinear_variables(Ipopt::Index num_nonlin_vars, Ipopt::Index *pos_nonlin_vars)
get real list
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
return the vector of constraint values
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, enum Ipopt::TNLP::IndexStyleEnum &index_style)
return the number of variables and constraints, and the number of non-zeros in the jacobian and the h...
CouenneTNLP * clone()
Clone.
void fint fint fint real fint real real real real real real * g
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
return the information about the bound on the variables and constraints.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
return the hessian of the lagrangian.
virtual bool get_constraints_linearity(Ipopt::Index m, Ipopt::TNLP::LinearityType *const_types)
return the constraint linearity.
Class for handling NLPs using CouenneProblem.
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
int PSDize(int n, double *A, double *B, bool doSqrRoot)
project matrix onto the cone of positive semidefinite matrices (possibly take square root of eigenval...
std::set< int > nonLinVars_
list of nonlinear variables
void setInitSol(const double *sol)
set initial solution
void fint fint fint real fint real * x