BonTMINLP2Quad.cpp
Go to the documentation of this file.
1 // (C) Copyright International Business Machines Corporation 2007
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Pierre Bonami, International Business Machines Corporation
7 //
8 // Date : 10/06/2007
9 
10 #include "BonTMINLP2Quad.hpp"
11 #include <climits>
12 
13 using namespace Ipopt;
14 
15 //#define DEBUG
16 namespace Bonmin {
17 
18  TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const SmartPtr<Bonmin::TMINLP> tminlp):
19  TMINLP2TNLP(tminlp)
20  {
21  // Fill the locally stored hessian matrix
22 
23  // Get the number of nonzoeroes in the matrix
24  const int nnz_h = TMINLP2TNLP::nnz_h_lag();
26  if(nnz_h > 0){
27  int * jCol = new int [nnz_h];
28  int * iRow = new int [nnz_h];
29 
30  TMINLP2TNLP::eval_h(num_variables(), NULL, false,
31  0., TMINLP2TNLP::num_constraints(), NULL, false,
32  nnz_h, jCol, iRow, NULL);
33 
34  for(int i = 0 ; i < nnz_h ; i++){
35 #ifndef NDEBUG
36  bool inserted =
37 #endif
38  H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
39  std::make_pair(i, -1))).second;
40  assert(inserted == true);
41  }
42  delete [] jCol;
43  delete [] iRow;
44  }
45  assert(nnz_h == (int) H_.size());
47  }
48 
49 
54  TMINLP2TNLP(other),
55  quadRows_(other.quadRows_),
56  H_(),
57  curr_nnz_jac_(other.curr_nnz_jac_),
58  obj_(other.obj_)
59  {
60  // Get the number of nonzoeroes in the matrix
61  const size_t nnz_h = TMINLP2TNLP::nnz_h_lag();
62 
63  if(nnz_h > 0){
64  int * jCol = new int [nnz_h];
65  int * iRow = new int [nnz_h];
66  int m = TMINLP2TNLP::num_constraints() - (int)quadRows_.size();
67  TMINLP2TNLP::eval_h(num_variables(), NULL, false,
68  0., m, NULL, false,
69  (int)nnz_h, jCol, iRow, NULL);
70 
71  for(size_t i = 0 ; i < nnz_h ; i++){
72 #ifndef NDEBUG
73  bool inserted =
74 #endif
75  H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
76  std::make_pair(i, -1))).second;
77  assert(inserted == true);
78  }
79  delete [] jCol;
80  delete [] iRow;
81  }
82  assert(nnz_h == H_.size());
83 
84  //Properly create quadRows_
85  for(size_t i = 0 ; i < quadRows_.size() ; i++){
86  quadRows_[i] = new QuadRow(*quadRows_[i]);
87  }
88 
89  int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
90  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
91  quadRows_[i]->add_to_hessian(H_, offset);
92  }
93  }
94 
95 
98  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
99  delete quadRows_[i];
100  }
101  }
102 
103 
104  bool TMINLP2TNLPQuadCuts::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
105  Index& nnz_h_lag,
106  TNLP::IndexStyleEnum& index_style){
107  bool ret_val = TMINLP2TNLP::get_nlp_info(n,m,nnz_jac_g, nnz_h_lag, index_style);
108  nnz_h_lag = (int)H_.size();
109  nnz_jac_g = curr_nnz_jac_;
110  //printf("Dinmension in TMINLP2Quad are %i\n", curr_nnz_jac_);
111  return ret_val;
112  }
113 
116  bool TMINLP2TNLPQuadCuts::get_bounds_info(Index n, Number* x_l, Number* x_u,
117  Index m, Number* g_l, Number* g_u){
118  bool ret_val = TMINLP2TNLP::get_bounds_info(n, x_l, x_u,
119  m, g_l, g_u);
120  return ret_val;
121  }
122 
123  bool
124  TMINLP2TNLPQuadCuts::get_constraints_linearity(Index m, LinearityType* const_types)
125  {
126  bool ret_val = TMINLP2TNLP::get_constraints_linearity(m - (int)quadRows_.size(), const_types);
127  const_types += m - (int)quadRows_.size();
128  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
129  if(quadRows_[i]->isLinear())
130  const_types[i] = TNLP::LINEAR;
131  else
132  const_types[i] = TNLP::NON_LINEAR;
133  }
134  return ret_val;
135  }
136 
139  bool TMINLP2TNLPQuadCuts::get_starting_point(Index n, bool init_x, Number* x,
140  bool init_z, Number* z_L, Number* z_U,
141  Index m, bool init_lambda,
142  Number* lambda){
143  return TMINLP2TNLP::get_starting_point(n, init_x, x, init_z, z_L, z_U, m, init_lambda, lambda);
144  }
145 
150  bool& use_x_scaling, Index n,
151  Number* x_scaling,
152  bool& use_g_scaling, Index m,
153  Number* g_scaling){
154  assert(num_constraints() == m);
155  bool retval = get_scaling_parameters(obj_scaling, use_x_scaling, n, x_scaling, use_g_scaling, m - (int)quadRows_.size(), g_scaling);
156  if(use_g_scaling){
157  g_scaling += m - (int)quadRows_.size();
158  CoinFillN(g_scaling, (int)quadRows_.size(), 1.);}
159  return retval;
160  }
161 
163  bool
164  TMINLP2TNLPQuadCuts::eval_f(Index n, const Number* x, bool new_x,
165  Number& obj_value){
166  if(obj_.empty()){
167  return TMINLP2TNLP::eval_f(n, x, new_x, obj_value);
168  }
169  if(new_x){
170  TMINLP2TNLP::eval_f(n,x, new_x, obj_value);
171  }
172  obj_value = c_;
173  assert(n == (int) obj_.size());
174  for(int i = 0 ; i < n ; i++){
175  obj_value += obj_[i] * x[i];
176  }
177  return true;
178  }
179 
182  bool
183  TMINLP2TNLPQuadCuts::eval_grad_f(Index n, const Number* x, bool new_x,
184  Number* grad_f){
185  if(obj_.empty()){
186  return TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
187  if(new_x){
188  TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
189  assert(n == (int) obj_.size());
190  for(int i = 0 ; i < n ; i++){
191  grad_f[i] = obj_[i];
192  }
193  return true;
194  }
195 
196  bool TMINLP2TNLPQuadCuts::eval_gi(Index n, const Number* x, bool new_x,
197  Index i, Number& gi)
198  {
199  int m_orig = num_constraints() - (int)quadRows_.size();
200  if(i < m_orig){
201  return TMINLP2TNLP::eval_gi(n, x, new_x, i, gi);
202  }
203  i -= m_orig;
204  gi = quadRows_[i]->eval_f(x, new_x);
205  return false;
206  }
207 
208 
210  bool TMINLP2TNLPQuadCuts::eval_g(Index n, const Number* x, bool new_x,
211  Index m, Number* g){
212  int m_tminlp = m - (int)quadRows_.size();
213  bool retval = TMINLP2TNLP::eval_g(n, x, new_x, m_tminlp, g);
214  g+= (m_tminlp);
215  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
216  g[i] = quadRows_[i]->eval_f(x, new_x);
217  }
218  return retval;
219  }
220 
226  bool TMINLP2TNLPQuadCuts::eval_jac_g(Index n, const Number* x, bool new_x,
227  Index m, Index nele_jac, Index* iRow,
228  Index *jCol, Number* values){
229  int n_ele_orig = TMINLP2TNLP::nnz_jac_g();
230  int m_orig = m - (int)quadRows_.size();
231  int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
232 
233  bool retval = TMINLP2TNLP::eval_jac_g(n, x, new_x, m_orig ,
234  n_ele_orig, iRow, jCol, values);
235  if(values == NULL){
236  assert(iRow != NULL);
237  assert(jCol != NULL);
238  iRow += n_ele_orig;
239  jCol += n_ele_orig;
240  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
241  const int & nnz = quadRows_[i]->nnz_grad();
242  Ipopt::Index mi = m_orig + i + offset;
243  CoinFillN(iRow, nnz, mi);
244  quadRows_[i]->gradiant_struct(nnz, jCol, offset);
245  iRow += nnz;
246  jCol += nnz;
247  }
248  }
249  else {
250  assert(iRow == NULL);
251  assert(jCol == NULL);
252  values += n_ele_orig;
253  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
254  const int & nnz = quadRows_[i]->nnz_grad();
255  quadRows_[i]->eval_grad(nnz, x, new_x, values);
256  values+=nnz;
257  }
258  }
259  return retval;
260  }
261 
262  bool TMINLP2TNLPQuadCuts::eval_grad_gi(Index n, const Number* x, bool new_x,
263  Index i, Index& nele_grad_gi, Index* jCol,
264  Number* values)
265  {
266  int m_orig = num_constraints() - (int)quadRows_.size();
267  if(i < m_orig){
268  return TMINLP2TNLP::eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
269  }
270  i -= m_orig;
271  int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
272  if(values == NULL){
273  assert(jCol != NULL);
274  nele_grad_gi = quadRows_[i]->nnz_grad();
275  quadRows_[i]->gradiant_struct(nele_grad_gi, jCol, offset);
276  }
277  else{
278  assert(jCol == NULL);
279  quadRows_[i]->eval_grad(nele_grad_gi, x, new_x, values);
280  }
281  return false;
282  }
290  bool TMINLP2TNLPQuadCuts::eval_h(Index n, const Number* x, bool new_x,
291  Number obj_factor, Index m, const Number* lambda,
292  bool new_lambda, Index nele_hess,
293  Index* iRow, Index* jCol, Number* values){
294  if(!obj_.empty()) obj_factor = 0;
295  if(values == NULL){
296  assert(iRow != NULL);
297  assert(jCol != NULL);
298 #ifdef DEBUG
299  std::cout<<"Hessian structure"<<std::endl;
300 #endif
301  int nnz = 0;
302  int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
303  int nnz_sup = nnz_h_lag_orig;
304  for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
305  if(i->second.second == -1){
306  assert(i->second.first < nnz_h_lag_orig);
307  }
308  else {
309  assert(i->second.second > 0);
310  assert(i->second.first >= nnz_h_lag_orig);
311  i->second.first = nnz_sup;
312  nnz_sup++;
313  }
314  iRow[i->second.first] = i->first.first;
315  jCol[i->second.first] = i->first.second;
316 #ifdef DEBUG
317  printf("iRow %i, jCol %i : nnz %i\n",
318  i->first.second, i->first.first,
319  i->second.first);
320 #endif
321  //assert(*jCol >= *iRow);
322  nnz++;
323  }
324  assert(nnz == (int) H_.size());
325  return true;
326  }
327  else {
328 #ifdef DEBUG
329  std::cout<<"Computing hessian"<<std::endl;
330 #endif
331  assert(iRow == NULL);
332  assert(jCol == NULL);
333  int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
334  int m_orig = m - (int)quadRows_.size();
335  bool ret_val = TMINLP2TNLP::eval_h(n, x, new_x, obj_factor, m_orig, lambda, new_lambda,
336  nnz_h_lag_orig, iRow, jCol, values);
337  CoinZeroN(values + nnz_h_lag_orig, (int)H_.size() - nnz_h_lag_orig);
338  for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
339  quadRows_[i]->eval_hessian(lambda[i + m_orig], values);
340  }
341  return ret_val;
342  }
343  }
345 
347  void
348  TMINLP2TNLPQuadCuts::addCuts(const Cuts & cuts, bool safe){
349  assert(cuts.sizeColCuts() == 0);
350 #ifdef DEBUG
351  printf("Adding %i cuts\n", cuts.sizeRowCuts());
352 #endif
353  int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
354 
355  g_l_.reserve(g_l_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
356  g_u_.reserve(g_u_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
357  quadRows_.reserve(quadRows_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
358 
359  int n = cuts.sizeQuadCuts();
360  for(int i = 0 ; i < n ; i++){
361  g_l_.push_back(cuts.quadCut(i).lb());
362  g_u_.push_back(cuts.quadCut(i).ub());
363  quadRows_.push_back(new QuadRow(cuts.quadCut(i)));
364  quadRows_.back()->add_to_hessian(H_, offset);
365  curr_nnz_jac_ += quadRows_.back()->nnz_grad();
366  }
367  addRowCuts((OsiCuts) cuts, safe);
368  duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
369  x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
370  duals_init_ = x_init_() + x_l_.size();
371  }
372 
374  void TMINLP2TNLPQuadCuts::addCuts(unsigned int numcuts,
375  const OsiRowCut ** cuts){
376 #ifdef DEBUG
377  printf("Adding %i cuts\n", numcuts);
378 #endif
379  int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
380  g_l_.reserve(g_l_.size() + numcuts);
381  g_u_.reserve(g_u_.size() + numcuts);
382  quadRows_.reserve(quadRows_.size() + numcuts);
383  for(unsigned int i = 0 ; i < numcuts ; i++){
384  g_l_.push_back(cuts[i]->lb());
385  g_u_.push_back(cuts[i]->ub());
386 
387  const QuadCut * quadCut = dynamic_cast<const QuadCut *> (cuts[i]);
388  if(quadCut){
389  quadRows_.push_back(new QuadRow(*quadCut));
390  quadRows_.back()->add_to_hessian(H_, offset);
391  }
392  else
393  quadRows_.push_back(new QuadRow(*cuts[i]));
394  curr_nnz_jac_ += quadRows_.back()->nnz_grad();
395  }
396  duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
397  x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
398  duals_init_ = x_init_() + x_l_.size();
399  }
401  void TMINLP2TNLPQuadCuts::addCuts(const OsiCuts& cuts){
402  assert(cuts.sizeColCuts() == 0);
403 #ifdef DEBUG
404  printf("Adding %i cuts\n", cuts.sizeRowCuts());
405 #endif
406 
407  const Cuts * quadCuts = dynamic_cast<const Cuts *>(&cuts);
408  if(quadCuts) {
409  addCuts(*quadCuts, true);
410  return;}
411 
412  addRowCuts(cuts, true);
413  }
415  void TMINLP2TNLPQuadCuts::addRowCuts(const OsiCuts& cuts, bool safe){
416  // Check with rowCuts are quadratics and move them to quadratic cuts.
417  int n = cuts.sizeRowCuts();
418  g_l_.reserve(g_l_.size() + n);
419  g_u_.reserve(g_u_.size() + n);
420  quadRows_.reserve(quadRows_.size() + n);
421 
422  int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
423 
424  for(int i = 0 ; i < n ; i++){
425  g_l_.push_back(cuts.rowCut(i).lb());
426  g_u_.push_back(cuts.rowCut(i).ub());
427  if(safe == false){
428  assert(dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i)) == NULL);
429  }
430  else {
431  const QuadCut * cut = dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i));
432  if(cut){
433  quadRows_.push_back(new QuadRow(*cut));
434  quadRows_.back()->add_to_hessian(H_, offset);
435  curr_nnz_jac_ += quadRows_.back()->nnz_grad();
436  continue;
437  }
438  }
439  quadRows_.push_back(new QuadRow(cuts.rowCut(i)));
440  curr_nnz_jac_ += quadRows_.back()->nnz_grad();
441  }
442  duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
443  x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
444  duals_init_ = x_init_() + x_l_.size();
445  }
446 
448  void TMINLP2TNLPQuadCuts::removeCuts(unsigned int n,const int * idxs){
449  if(n == 0) return;
450  vector< int > order(quadRows_.size());
451  int m_tminlp = num_constraints() - (int)quadRows_.size();
452  //delete the pointers
453  for(unsigned int k = 0; k < n ; k++){//Erase
454  int idx = idxs[k] - m_tminlp ;
455  quadRows_[idx]->remove_from_hessian(H_);
456  curr_nnz_jac_ -= quadRows_[idx]->nnz_grad();
457  delete quadRows_[idx];
458  quadRows_[idx] = NULL;}
459 
460  for(unsigned int i = 0 ; i < order.size() ; i++){
461  order[i] = i;
462  }
463  for(unsigned int i = 0 ; i < n ; i++){
464  assert(idxs[i] - m_tminlp >= 0);
465  order[ idxs[i] - m_tminlp ] = INT_MAX;
466  }
467 
468  std::sort(order.begin(), order.end());
469 
470 
471  int i;
472  double * g_l = g_l_() + m_tminlp;
473  double * g_u = g_u_() + m_tminlp;
474  for(i = 0 ; order[i] < INT_MAX ; i++){
475  assert(order[i] >= i);
476  quadRows_[i] = quadRows_[order[i]];
477  g_l[i] = g_l[order[i]];
478  g_u[i] = g_u[order[i]];
479  }
480  quadRows_.erase(quadRows_.begin() + i, quadRows_.end());
481  g_l_.erase(g_l_.begin() + m_tminlp + i, g_l_.end());
482  g_u_.erase(g_u_.begin() + m_tminlp + i, g_u_.end());
483  }
484 
485 void
487  int nnz = 0;
488  for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
489  std::cout<<"nnz: "<<nnz
490  <<"jCol: "<<i->first.first
491  <<", iRow "<<i->first.second<<std::endl;
492  nnz++;
493  }
494 }
495 
496 void
497 TMINLP2TNLPQuadCuts::set_linear_objective(int n_var, const double * obj, double c_0){
498  assert(n_var == TMINLP2TNLP::num_variables());
499  obj_.resize(n_var);
500  CoinCopyN(obj, n_var, obj_());
501  c_ = c_0;
502 }
503 }//Ends Bonmin namespace
504 
double * values
void printH()
print H_ for debug.
vector< Ipopt::Number > g_l_
Lower bounds on constraints values.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Return the hessian of the lagrangian.
vector< Ipopt::Number > x_init_
Initial primal point.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
int curr_nnz_jac_
Current umber of entries in the jacobian.
vector< Ipopt::Number > x_l_
Current lower bounds on variables.
QuadCut & quadCut(int i)
Access to a quadratic cut by reference.
Definition: BonQuadCut.hpp:198
double c_
constant term in objective function.
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
TMINLP2TNLPQuadCuts()
Default Constructor.
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
Ipopt::Number * duals_init_
Initial values for all dual multipliers (constraints then lower bounds then upper bounds) ...
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
Ipopt::Index nnz_h_lag()
Get the nomber of nz in hessian.
Generalizes OsiCuts to handle quadratic cuts.
Definition: BonQuadCut.hpp:101
vector< double > obj_
Store user passed linear objective.
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
This call is just passed onto parent class and add bounds of quadratic cuts.
vector< QuadRow * > quadRows_
Some storage for quadratic cuts.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
This call is just passed onto parent class and add number of quadratic cuts.
AdjustableMat H_
Storage for the original hessian of the problem.
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
Stores a quadratic row of the form l &lt; c + ax + x^T Q x &lt; u.
Definition: BonQuadRow.hpp:32
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
Returns the constraint linearity.
vector< Ipopt::Number > g_u_
Upper bounds on constraints values.
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
The caller is allowed to modify the bounds, so this method returns the internal bounds information...
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
Ipopt::Index num_constraints() const
Get the number of constraints.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
void fint fint * k
void addCuts(const Cuts &cuts, bool safe)
Add some linear or quadratic cuts to the problem formulation if some of the OsiRowCuts are quadratic ...
virtual bool get_constraints_linearity(Ipopt::Index m, Ipopt::TNLP::LinearityType *const_types)
This call is just passed onto parent class and add number of quadratic cuts.
void addRowCuts(const OsiCuts &cuts, bool safe)
Add some cuts to the problem formulaiton (handles Quadratics).
static int
Definition: OSdtoa.cpp:2173
const Ipopt::Number * g_l()
Get the current values for constraints lower bounds.
virtual bool get_scaling_parameters(Ipopt::Number &obj_scaling, bool &use_x_scaling, Ipopt::Index n, Ipopt::Number *x_scaling, bool &use_g_scaling, Ipopt::Index m, Ipopt::Number *g_scaling)
Method that returns scaling parameters (passed to parent all quadratic not scaled).
This is a derived class fro TMINLP2TNLP to handle adding quadratic cuts.
virtual ~TMINLP2TNLPQuadCuts()
Destructor.
void removeCuts(unsigned int number, const int *toRemove)
Remove some cuts from the formulation.
int sizeQuadCuts() const
Number of quadratic cuts in the collection.
Definition: BonQuadCut.hpp:178
TNLP::IndexStyleEnum index_style() const
Acces index_style.
static fint nnz_h
void fint fint fint real fint real real real real real real * g
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
Ipopt::Index num_variables() const
Get the number of variables.
void fint * m
vector< Ipopt::Number > duals_sol_
Dual multipliers of constraints and bounds.
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
void set_linear_objective(int n_var, const double *obj, double c_0)
Change objective to a linear one whith given objective function.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
This call is just passed onto parent class and add lambda for quadratic cuts.
void fint * n
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Return the hessian of the lagrangian.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x (appends constraint values for quadratics).
const Ipopt::Number * g_u()
Get the current values for constraints upper bounds.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
Ipopt::Index nnz_jac_g() const
Access number of entries in tminlp_ hessian.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
Method called by Ipopt to get the starting point.
void fint fint fint real fint real * x