13 using namespace Ipopt;
27 int * jCol =
new int [
nnz_h];
28 int * iRow =
new int [
nnz_h];
32 nnz_h, jCol, iRow, NULL);
34 for(
int i = 0 ; i <
nnz_h ; i++){
38 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
39 std::make_pair(i, -1))).second;
40 assert(inserted ==
true);
45 assert(nnz_h == (
int)
H_.size());
55 quadRows_(other.quadRows_),
57 curr_nnz_jac_(other.curr_nnz_jac_),
64 int * jCol =
new int [
nnz_h];
65 int * iRow =
new int [
nnz_h];
69 (
int)nnz_h, jCol, iRow, NULL);
71 for(
size_t i = 0 ; i <
nnz_h ; i++){
75 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
76 std::make_pair(i, -1))).second;
77 assert(inserted ==
true);
82 assert(nnz_h ==
H_.size());
85 for(
size_t i = 0 ; i <
quadRows_.size() ; i++){
90 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
98 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
106 TNLP::IndexStyleEnum& index_style){
108 nnz_h_lag = (
int)
H_.size();
117 Index
m, Number* g_l, Number* g_u){
128 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
132 const_types[i] = TNLP::NON_LINEAR;
140 bool init_z, Number* z_L, Number* z_U,
141 Index
m,
bool init_lambda,
150 bool& use_x_scaling, Index
n,
152 bool& use_g_scaling, Index
m,
158 CoinFillN(g_scaling, (
int)
quadRows_.size(), 1.);}
173 assert(n == (
int)
obj_.size());
174 for(
int i = 0 ; i <
n ; i++){
175 obj_value +=
obj_[i] * x[i];
189 assert(n == (
int)
obj_.size());
190 for(
int i = 0 ; i <
n ; i++){
215 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
227 Index
m, Index nele_jac, Index* iRow,
228 Index *jCol, Number*
values){
234 n_ele_orig, iRow, jCol, values);
236 assert(iRow != NULL);
237 assert(jCol != NULL);
240 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
242 Ipopt::Index mi = m_orig + i + offset;
243 CoinFillN(iRow, nnz, mi);
244 quadRows_[i]->gradiant_struct(nnz, jCol, offset);
250 assert(iRow == NULL);
251 assert(jCol == NULL);
252 values += n_ele_orig;
253 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
255 quadRows_[i]->eval_grad(nnz, x, new_x, values);
263 Index i, Index& nele_grad_gi, Index* jCol,
273 assert(jCol != NULL);
275 quadRows_[i]->gradiant_struct(nele_grad_gi, jCol, offset);
278 assert(jCol == NULL);
279 quadRows_[i]->eval_grad(nele_grad_gi, x, new_x, values);
291 Number obj_factor, Index
m,
const Number* lambda,
292 bool new_lambda, Index nele_hess,
293 Index* iRow, Index* jCol, Number*
values){
294 if(!
obj_.empty()) obj_factor = 0;
296 assert(iRow != NULL);
297 assert(jCol != NULL);
299 std::cout<<
"Hessian structure"<<std::endl;
303 int nnz_sup = nnz_h_lag_orig;
304 for(AdjustableMat::iterator i =
H_.begin() ; i !=
H_.end() ; i++){
305 if(i->second.second == -1){
306 assert(i->second.first < nnz_h_lag_orig);
309 assert(i->second.second > 0);
310 assert(i->second.first >= nnz_h_lag_orig);
311 i->second.first = nnz_sup;
314 iRow[i->second.first] = i->first.first;
315 jCol[i->second.first] = i->first.second;
317 printf(
"iRow %i, jCol %i : nnz %i\n",
318 i->first.second, i->first.first,
324 assert(nnz == (
int)
H_.size());
329 std::cout<<
"Computing hessian"<<std::endl;
331 assert(iRow == NULL);
332 assert(jCol == NULL);
336 nnz_h_lag_orig, iRow, jCol, values);
337 CoinZeroN(values + nnz_h_lag_orig, (
int)
H_.size() - nnz_h_lag_orig);
338 for(
unsigned int i = 0 ; i <
quadRows_.size() ; i++){
339 quadRows_[i]->eval_hessian(lambda[i + m_orig], values);
349 assert(cuts.sizeColCuts() == 0);
351 printf(
"Adding %i cuts\n", cuts.sizeRowCuts());
360 for(
int i = 0 ; i <
n ; i++){
375 const OsiRowCut ** cuts){
377 printf(
"Adding %i cuts\n", numcuts);
380 g_l_.reserve(
g_l_.size() + numcuts);
381 g_u_.reserve(
g_u_.size() + numcuts);
383 for(
unsigned int i = 0 ; i < numcuts ; i++){
384 g_l_.push_back(cuts[i]->lb());
385 g_u_.push_back(cuts[i]->ub());
387 const QuadCut * quadCut =
dynamic_cast<const QuadCut *
> (cuts[i]);
402 assert(cuts.sizeColCuts() == 0);
404 printf(
"Adding %i cuts\n", cuts.sizeRowCuts());
407 const Cuts * quadCuts =
dynamic_cast<const Cuts *
>(&cuts);
417 int n = cuts.sizeRowCuts();
424 for(
int i = 0 ; i <
n ; i++){
425 g_l_.push_back(cuts.rowCut(i).lb());
426 g_u_.push_back(cuts.rowCut(i).ub());
428 assert(dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i)) == NULL);
431 const QuadCut * cut =
dynamic_cast<const QuadCut *
> (cuts.rowCutPtr(i));
453 for(
unsigned int k = 0;
k <
n ;
k++){
454 int idx = idxs[
k] - m_tminlp ;
460 for(
unsigned int i = 0 ; i < order.size() ; i++){
463 for(
unsigned int i = 0 ; i <
n ; i++){
464 assert(idxs[i] - m_tminlp >= 0);
465 order[ idxs[i] - m_tminlp ] = INT_MAX;
468 std::sort(order.begin(), order.end());
472 double *
g_l =
g_l_() + m_tminlp;
473 double *
g_u =
g_u_() + m_tminlp;
474 for(i = 0 ; order[i] < INT_MAX ; i++){
475 assert(order[i] >= i);
477 g_l[i] = g_l[order[i]];
478 g_u[i] = g_u[order[i]];
488 for(AdjustableMat::iterator i =
H_.begin() ; i !=
H_.end() ; i++){
489 std::cout<<
"nnz: "<<nnz
490 <<
"jCol: "<<i->first.first
491 <<
", iRow "<<i->first.second<<std::endl;
500 CoinCopyN(obj, n_var,
obj_());
void printH()
print H_ for debug.
vector< Ipopt::Number > g_l_
Lower bounds on constraints values.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Return the hessian of the lagrangian.
vector< Ipopt::Number > x_init_
Initial primal point.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
int curr_nnz_jac_
Current umber of entries in the jacobian.
vector< Ipopt::Number > x_l_
Current lower bounds on variables.
QuadCut & quadCut(int i)
Access to a quadratic cut by reference.
double c_
constant term in objective function.
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
TMINLP2TNLPQuadCuts()
Default Constructor.
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
Ipopt::Number * duals_init_
Initial values for all dual multipliers (constraints then lower bounds then upper bounds) ...
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
Ipopt::Index nnz_h_lag()
Get the nomber of nz in hessian.
Generalizes OsiCuts to handle quadratic cuts.
vector< double > obj_
Store user passed linear objective.
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
This call is just passed onto parent class and add bounds of quadratic cuts.
vector< QuadRow * > quadRows_
Some storage for quadratic cuts.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
This call is just passed onto parent class and add number of quadratic cuts.
AdjustableMat H_
Storage for the original hessian of the problem.
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
Stores a quadratic row of the form l < c + ax + x^T Q x < u.
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
Returns the constraint linearity.
vector< Ipopt::Number > g_u_
Upper bounds on constraints values.
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
The caller is allowed to modify the bounds, so this method returns the internal bounds information...
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
Ipopt::Index num_constraints() const
Get the number of constraints.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
void addCuts(const Cuts &cuts, bool safe)
Add some linear or quadratic cuts to the problem formulation if some of the OsiRowCuts are quadratic ...
virtual bool get_constraints_linearity(Ipopt::Index m, Ipopt::TNLP::LinearityType *const_types)
This call is just passed onto parent class and add number of quadratic cuts.
void addRowCuts(const OsiCuts &cuts, bool safe)
Add some cuts to the problem formulaiton (handles Quadratics).
const Ipopt::Number * g_l()
Get the current values for constraints lower bounds.
virtual bool get_scaling_parameters(Ipopt::Number &obj_scaling, bool &use_x_scaling, Ipopt::Index n, Ipopt::Number *x_scaling, bool &use_g_scaling, Ipopt::Index m, Ipopt::Number *g_scaling)
Method that returns scaling parameters (passed to parent all quadratic not scaled).
This is a derived class fro TMINLP2TNLP to handle adding quadratic cuts.
virtual ~TMINLP2TNLPQuadCuts()
Destructor.
void removeCuts(unsigned int number, const int *toRemove)
Remove some cuts from the formulation.
int sizeQuadCuts() const
Number of quadratic cuts in the collection.
TNLP::IndexStyleEnum index_style() const
Acces index_style.
void fint fint fint real fint real real real real real real * g
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
Ipopt::Index num_variables() const
Get the number of variables.
vector< Ipopt::Number > duals_sol_
Dual multipliers of constraints and bounds.
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
void set_linear_objective(int n_var, const double *obj, double c_0)
Change objective to a linear one whith given objective function.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
This call is just passed onto parent class and add lambda for quadratic cuts.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Return the hessian of the lagrangian.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x (appends constraint values for quadratics).
const Ipopt::Number * g_u()
Get the current values for constraints upper bounds.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
Ipopt::Index nnz_jac_g() const
Access number of entries in tminlp_ hessian.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
Method called by Ipopt to get the starting point.
void fint fint fint real fint real * x