BonTMINLP2TNLP.cpp
Go to the documentation of this file.
1 // (C) Copyright International Business Machines Corporation and Carnegie Mellon University 2004, 2006
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Carl D. Laird, Carnegie Mellon University,
7 // Andreas Waechter, International Business Machines Corporation
8 // Pierre Bonami, Carnegie Mellon University,
9 //
10 // Date : 12/01/2004
11 
12 
13 #include "BonTMINLP2TNLP.hpp"
14 #include "IpBlas.hpp"
15 #include "IpAlgTypes.hpp"
16 #include "IpIpoptCalculatedQuantities.hpp"
17 #include <climits>
18 #include <string>
19 #include <fstream>
20 #include <sstream>
22 #include "OsiBranchingObject.hpp"
23 
24 using namespace Ipopt;
25 
26 extern bool BonminAbortAll;
27 class OsiObject;
28 namespace Bonmin
29 {
30 
31  TMINLP2TNLP::TMINLP2TNLP(const SmartPtr<TMINLP> tminlp
32 #ifdef WARM_STARTER
33  ,
34  const OptionsList& options
35 #endif
36  )
37  :
38  var_types_(),
39  x_l_(),
40  x_u_(),
41  orig_x_l_(),
42  orig_x_u_(),
43  g_l_(),
44  g_u_(),
45  x_init_(),
46  duals_init_(NULL),
47  x_init_user_(),
48  x_sol_(),
49  g_sol_(),
50  duals_sol_(),
51  tminlp_(tminlp),
52  nnz_jac_g_(0),
53  nnz_h_lag_(0),
54  index_style_(TNLP::FORTRAN_STYLE),
55  obj_value_(1e100),
56  curr_warm_starter_(),
57  nlp_lower_bound_inf_(-DBL_MAX),
58  nlp_upper_bound_inf_(DBL_MAX),
59  warm_start_entire_iterate_(true),
60  need_new_warm_starter_(true)
61  {
62  // read the nlp size and bounds information from
63  // the TMINLP and keep an internal copy. This way the
64  // caller can modify the bounds that are sent to Ipopt;
65  assert(IsValid(tminlp_));
66  Index n,m;
67  bool retval =
68  tminlp_->get_nlp_info(n, m, nnz_jac_g_, nnz_h_lag_, index_style_);
69 
70  ASSERT_EXCEPTION(retval, TMINLP_INVALID,
71  "get_nlp_info of TMINLP returns false.");
72 
73  // Allocate space for the variable types vector
74  var_types_.resize(n);
75 
76  // retrieve the variable types
77  tminlp_->get_variables_types(n, var_types_());
78 
79  // Allocate space for the internal copy of the variable bounds
80  x_l_.resize(n);
81  x_u_.resize(n);
82  orig_x_l_.resize(n);
83  orig_x_u_.resize(n);
84 
85  g_l_.resize(m);
86  g_u_.resize(m);
87 
88  // retrieve the variable bounds
89  if(m){
90  tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, g_l_(), g_u_());
91  }
92  else {
93  tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, NULL, NULL);
94  }
95  IpBlasDcopy(n, x_l_(), 1, orig_x_l_(), 1);
96  IpBlasDcopy(n, x_u_(), 1, orig_x_u_(), 1);
97 
98 
99  // Allocate space for the initial point
100  x_init_user_.resize(n);
101  tminlp_->get_starting_point(n, true, x_init_user_(), false, NULL, NULL,
102  m, false, NULL);
103 
104 #ifdef WARM_STARTER
105  // Get values for parameters
106  options.GetNumericValue("nlp_lower_bound_inf", nlp_lower_bound_inf_, "");
107  options.GetNumericValue("nlp_upper_bound_inf", nlp_upper_bound_inf_, "");
108  options.GetBoolValue("warm_start_entire_iterate",
110 #endif
111  }
112 
114  :
115  var_types_(),
116  x_l_(),
117  x_u_(),
118  orig_x_l_(),
119  orig_x_u_(),
120  g_l_(),
121  g_u_(),
122  x_init_(),
123  duals_init_(NULL),
124  x_init_user_(),
125  x_sol_(),
126  g_sol_(),
127  duals_sol_(),
128  tminlp_(other.tminlp_),
129  nnz_jac_g_(other.nnz_jac_g_),
130  nnz_h_lag_(other.nnz_h_lag_),
131  index_style_(other.index_style_),
132  return_status_(other.return_status_),
133  obj_value_(other.obj_value_),
134  curr_warm_starter_(other.curr_warm_starter_),
135  nlp_lower_bound_inf_(other.nlp_lower_bound_inf_),
136  nlp_upper_bound_inf_(other.nlp_upper_bound_inf_),
137  warm_start_entire_iterate_(other.warm_start_entire_iterate_),
138  need_new_warm_starter_(other.need_new_warm_starter_)
139  {
140  gutsOfCopy(other);
141  }
142 
144  TMINLP2TNLP &
146  if(this != &rhs){
147  tminlp_ = rhs.tminlp_;
148  nnz_jac_g_ = rhs.nnz_jac_g_;
149  nnz_h_lag_ = rhs.nnz_h_lag_;
152  obj_value_ = rhs.obj_value_;
158 
159  gutsOfDelete();
160  gutsOfCopy(rhs);
161 
162  }
163  return (*this);
164  }
165 
167  {
168  gutsOfDelete();
169  }
170 
171  void
173  }
174 
180  void
182  {
183  Index n = other.num_variables();
184  Index m = other.num_constraints();
185 
186  if(n > 0){//Copies all the arrays in n_
187  var_types_ = other.var_types_;
188 
189  x_l_.resize(n);
190  x_u_.resize(n); // Those are copied in copyUserModification
191  IpBlasDcopy(n, other.x_l_(), 1, x_l_(), 1);
192  IpBlasDcopy(n, other.x_u_(), 1, x_u_(), 1);
193 
194  orig_x_l_.resize(n);
195  orig_x_u_.resize(n);
196  IpBlasDcopy(n, other.orig_x_l_(), 1, orig_x_l_(), 1);
197  IpBlasDcopy(n, other.orig_x_u_(), 1, orig_x_u_(), 1);
198  x_init_user_.resize(n);
199  IpBlasDcopy(n, other.x_init_user_(), 1, x_init_user_(), 1);
200  if(!other.x_sol_.empty()) {
201  Set_x_sol(n,other.x_sol_());
202  }
203  }
204 
205  if(!other.g_l_.empty()){
206  const size_t& size = other.g_l_.size();
207  g_l_.resize(size);
208  g_u_.resize(size);
209  }
210 
211  if(m > 0){//Copies all the arrays in m_
212  IpBlasDcopy(m, other.g_l_(), 1, g_l_(), 1);
213  IpBlasDcopy(m, other.g_u_(), 1, g_u_(), 1);
214  if(!other.g_sol_.empty()) {
215  g_sol_.resize(m);
216  IpBlasDcopy(m, other.g_sol_(), 1, g_sol_(), 1);
217  }
218  }
219 
220 
221  x_init_ = other.x_init_;
222 
223  if(other.duals_init_) {
224  duals_init_ = x_init_() + n;
225  }
226  else
227  duals_init_ = NULL;
228 
229 
230  if(!other.duals_sol_.empty()) {
231  duals_sol_.resize(m + 2*n);
232  IpBlasDcopy((int) duals_sol_.size(), other.duals_sol_(), 1, duals_sol_(), 1);
233  }
234 
235 }
236 
238  const Number * x_l,
239  const Number * x_u)
240  {
241  assert(n==num_variables());
242  IpBlasDcopy(n, x_l, 1, x_l_(), 1);
243  IpBlasDcopy(n, x_u, 1, x_u_(), 1);
244  }
245 
247  const Number * x_l)
248  {
249  assert(n==num_variables());
250  IpBlasDcopy(n, x_l, 1, x_l_(), 1);
251  }
252 
254  const Number * x_u)
255  {
256  assert(n==num_variables());
257  IpBlasDcopy(n, x_u, 1, x_u_(), 1);
258  }
259 
260  void TMINLP2TNLP::SetVariableBounds(Index var_no, Number x_l, Number x_u)
261  {
262  assert(var_no >= 0 && var_no < num_variables());
263  x_l_[var_no] = x_l;
264  x_u_[var_no] = x_u;
265  }
266 
267  void TMINLP2TNLP::SetVariableLowerBound(Index var_no, Number x_l)
268  {
269  assert(var_no >= 0 && var_no < num_variables());
270  x_l_[var_no] = x_l;
271  }
272 
273  void TMINLP2TNLP::SetVariableUpperBound(Index var_no, Number x_u)
274  {
275  assert(var_no >= 0 && var_no < num_variables());
276  x_u_[var_no] = x_u;
277  }
278 
280  {
281  curr_warm_starter_ = NULL;
282  x_init_.clear();
283  }
284 
285  void TMINLP2TNLP::setxInit(Index n,const Number* x_init)
286  {
287  assert(n == num_variables());
288  if((int)x_init_.size() < n)
289  x_init_.resize(n);
290  IpBlasDcopy(n, x_init, 1, x_init_(), 1);
291  }
292 
293  void TMINLP2TNLP::setDualsInit(Index m, const Number* duals_init)
294  {
295  assert(m == num_variables() * 2 + num_constraints() );
296  x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
298 
299  if(m >0)
300  IpBlasDcopy(m, duals_init, 1, duals_init_, 1);
301 
302  }
303 
305  void TMINLP2TNLP::Set_x_sol(Index n, const Number* x_sol)
306  {
307  assert(n == num_variables());
308  if (x_sol_.empty()) {
309  x_sol_.resize(n);
310  }
311  assert(n == (int) x_sol_.size());
312  IpBlasDcopy(n, x_sol, 1, x_sol_(), 1);
313  }
314 
316  void TMINLP2TNLP::Set_dual_sol(Index n, const Number* dual_sol)
317  {
318  assert(n == num_variables() *2 + num_constraints());
319  if (duals_sol_.empty()) {
320  duals_sol_.resize(n);
321  }
322  assert(n == (int) duals_sol_.size());
323  IpBlasDcopy(n, dual_sol, 1, duals_sol_(), 1);
324  }
325 
328  {
329  assert(n >= 0 && n < num_variables());
330  var_types_[n] = type;
331  }
332 
333  bool TMINLP2TNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
334  Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
335  {
336  n = num_variables();
337  m = num_constraints();
338  nnz_jac_g = nnz_jac_g_;
339  nnz_h_lag = nnz_h_lag_;
340  index_style = index_style_;
341  //printf("Been there and said %i\n", nnz_jac_g_);
342  return true;
343  }
344 
345  bool TMINLP2TNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
346  Index m, Number* g_l, Number* g_u)
347  {
348  assert(n==num_variables());
349  assert(m==num_constraints());
350  IpBlasDcopy(n, x_l_(), 1, x_l, 1);
351  IpBlasDcopy(n, x_u_(), 1, x_u, 1);
352  if (m > 0){
353  IpBlasDcopy(m, g_l_(), 1, g_l, 1);
354  IpBlasDcopy(m, g_u_(), 1, g_u, 1);
355  }
356  return true;
357  }
358 
359  bool TMINLP2TNLP::get_starting_point(Index n, bool init_x, Number* x,
360  bool init_z, Number* z_L, Number* z_U,
361  Index m, bool init_lambda,
362  Number* lambda)
363  {
364  assert(m==num_constraints());
365  assert(n==num_variables());
366 #if 0
367  x_init_.resize(3*n + m, 0.);
368  duals_init_ = x_init_() + n;
369 #endif
370  if (init_x == true) {
371  if(x_init_.empty()){
372  assert(x_init_user_.size() >= n);
373  IpBlasDcopy(n, x_init_user_(), 1, x, 1);
374  }
375  else
376  IpBlasDcopy(n, x_init_(), 1, x, 1);
377  }
378  if (init_z == true) {
379  if(duals_init_ == NULL)
380  return false;
381  assert(x_init_.size() == 3*n + m && duals_init_ == x_init_() + n);
382  IpBlasDcopy(n, duals_init_, 1, z_L, 1);
383  IpBlasDcopy(n, duals_init_ + n, 1, z_U, 1);
384 
385  }
386  if(init_lambda == true) {
387  if(duals_init_ == NULL)
388  return false;
389  assert(x_init_.size() == 3*n + m && duals_init_ == x_init_() + n);
390  if(m > 0)
391  IpBlasDcopy(m, duals_init_ + 2*n , 1, lambda, 1);
392  }
393 
394  need_new_warm_starter_ = true;
395  return true;
396  }
397 
398  bool TMINLP2TNLP::get_warm_start_iterate(IteratesVector& warm_start_iterate)
399  {
400  if (IsNull(curr_warm_starter_)) {
401  return false;
402  }
403 
404  bool retval = curr_warm_starter_->WarmStartIterate(num_variables(), x_l_(), x_u_(),
405  warm_start_iterate);
406 
407  need_new_warm_starter_ = true;
408  return retval;
409  }
410 
411  bool TMINLP2TNLP::eval_f(Index n, const Number* x, bool new_x,
412  Number& obj_value)
413  {
414  return tminlp_->eval_f(n, x, new_x, obj_value);
415  }
416 
417  bool TMINLP2TNLP::eval_grad_f(Index n, const Number* x, bool new_x,
418  Number* grad_f)
419  {
420  grad_f[n-1] = 0;
421  return tminlp_->eval_grad_f(n, x, new_x, grad_f);
422  }
423 
424  bool TMINLP2TNLP::eval_g(Index n, const Number* x, bool new_x,
425  Index m, Number* g)
426  {
427  int return_code = tminlp_->eval_g(n, x, new_x, m, g);
428  return return_code;
429  }
430 
431  bool TMINLP2TNLP::eval_jac_g(Index n, const Number* x, bool new_x,
432  Index m, Index nele_jac, Index* iRow,
433  Index *jCol, Number* values)
434  {
435  bool return_code =
436  tminlp_->eval_jac_g(n, x, new_x, m, nele_jac,
437  iRow, jCol, values);
438  if(iRow != NULL){
439  Index buf;
440  for(Index k = 0; k < nele_jac ; k++){
441  buf = iRow[k];
442  iRow[k] = -1;
443  iRow[k] = buf;
444  }
445  }
446  return return_code;
447  }
448 
449  bool TMINLP2TNLP::eval_h(Index n, const Number* x, bool new_x,
450  Number obj_factor, Index m, const Number* lambda,
451  bool new_lambda, Index nele_hess,
452  Index* iRow, Index* jCol, Number* values)
453  {
454  return tminlp_->eval_h(n, x, new_x, obj_factor, m, lambda,
455  new_lambda, nele_hess,
456  iRow, jCol, values);
457  }
458 
459 
460  bool TMINLP2TNLP::eval_gi(Index n, const Number* x, bool new_x,
461  Index i, Number& gi)
462  {
463  return tminlp_->eval_gi(n, x, new_x, i, gi);
464  }
465 
466  bool TMINLP2TNLP::eval_grad_gi(Index n, const Number* x, bool new_x,
467  Index i, Index& nele_grad_gi, Index* jCol,
468  Number* values)
469  {
470  return tminlp_->eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
471  }
472 
473  void TMINLP2TNLP::finalize_solution(SolverReturn status,
474  Index n, const Number* x, const Number* z_L, const Number* z_U,
475  Index m, const Number* g, const Number* lambda,
476  Number obj_value,
477  const IpoptData* ip_data,
478  IpoptCalculatedQuantities* ip_cq)
479  {
480  assert(n == (Index) num_variables());
481  assert(m == (Index) num_constraints());
482  x_sol_.resize(n);
483 
484  IpBlasDcopy(n, x, 1, x_sol_(), 1);
485 
486  if(m > 0){
487  g_sol_.resize(m);
488  IpBlasDcopy(m, g, 1, g_sol_(), 1);
489  }
490  duals_sol_.resize(m + 2*n);
491  if(lambda){
492  if(m > 0)
493  IpBlasDcopy(m, lambda, 1, duals_sol_() + 2*n, 1);
494 
495  IpBlasDcopy(n, z_L, 1 , duals_sol_() , 1);
496  IpBlasDcopy(n, z_U, 1 , duals_sol_() + n, 1);
497  }
498 
499  return_status_ = status;
501 
502  if(status == Ipopt::LOCAL_INFEASIBILITY && ip_cq != NULL){
503  obj_value_ = ip_cq->curr_nlp_constraint_violation(NORM_MAX);
504  }
506  curr_warm_starter_->Finalize();
507  }
508  }
509 
510 
511  bool TMINLP2TNLP::intermediate_callback(AlgorithmMode mode,
512  Index iter, Number obj_value,
513  Number inf_pr, Number inf_du,
514  Number mu, Number d_norm,
515  Number regularization_size,
516  Number alpha_du, Number alpha_pr,
517  Index ls_trials,
518  const IpoptData* ip_data,
519  IpoptCalculatedQuantities* ip_cq)
520  {
521  if (BonminAbortAll) return false;
522 #if WARM_STARTER
523  // If we don't have this swtiched on, we assume that also the
524  // "warm_start" option for bonmin is set not to refer to the
525  // interior warm start object
527  return true;
528  }
530  // Create a new object for later warm start information
535  need_new_warm_starter_ = false;
536  }
537 
538  return curr_warm_starter_->UpdateStoredIterates(mode, *ip_data, *ip_cq);
539 #else
540  return true;
541 #endif
542  }
543 
544 
550  void
551  TMINLP2TNLP::outputDiffs(const std::string& probName, const std::string * varNames)
552  {
553  const int &numcols = num_variables();
554  const int &numrows = num_constraints();
555 
556  const double * currentLower = x_l();
557  const double * currentUpper = x_u();
558 
559  const double * originalLower = orig_x_l();
560  const double * originalUpper = orig_x_u();
561  CoinRelFltEq eq;
562  std::string fBoundsName = probName;
563  std::ostringstream os;
564  fBoundsName+=".bounds";
565  std::string fModName = probName;
566  fModName+= ".mod";
567  std::ofstream fBounds;
568  std::ofstream fMod;
569  bool hasVarNames = 0;
570 
571  if(varNames!=NULL )
572  hasVarNames=1;
573  if(hasVarNames)
574  fMod.open(fModName.c_str());
575  fBounds.open(fBoundsName.c_str());
576 
577  for(int i = 0 ; i < numcols ; i++) {
578  if(!eq(currentLower[i],originalLower[i])) {
579  if(hasVarNames)
580  fMod<<"bounds"<<i<<": "
581  <<varNames[i]<<" >= "
582  <<currentLower[i]<<";\n";
583 
584 
585  fBounds<<"LO"<<"\t"<<i<<"\t"<<currentLower[i]<<std::endl;
586  }
587  if(!eq(currentUpper[i],originalUpper[i])) {
588  if(hasVarNames)
589  fMod<<"bounds"<<i<<": "
590  <<varNames[i]<<" <= "
591  <<currentUpper[i]<<";\n";
592 
593  fBounds<<"UP"<<"\t"<<i<<"\t"<<currentUpper[i]<<std::endl;
594  }
595  }
596 
597  //write a file with starting point
598  std::string fStartPointName=probName;
599  fStartPointName+=".start";
600 
601  std::ofstream fStartPoint(fStartPointName.c_str());
602  const double * primals = x_init();
603  const double * duals = duals_init();
604  fStartPoint.precision(17);
605  fStartPoint<<numcols<<"\t"<<2*numcols+numrows<<std::endl;
606  for(int i = 0 ; i < numcols ; i++)
607  fStartPoint<<primals[i]<<std::endl;
608  int end = 2*numcols + numrows;
609  if(duals) {
610  for(int i = 0 ; i < end; i++)
611  fStartPoint<<duals[i]<<std::endl;
612  }
613 
614  }
615 
617  void
619  {
620  for(int i=0 ; i < num_variables() ; i++) {
621  if( ( var_types_[i] == TMINLP::INTEGER ||
622  var_types_[i] == TMINLP::BINARY )&&
623  x_l_[i] < x_u_[i] + 0.5)//not fixed
624  {
625  x_sol_[i] = ceil(x_l_[i]) + 0.5;//make it integer infeasible
626  }
627  }
628  }
629 
630  bool
632  bool& use_x_scaling, Index n,
633  Number* x_scaling,
634  bool& use_g_scaling, Index m,
635  Number* g_scaling)
636  {
637  return tminlp_->get_scaling_parameters(obj_scaling, use_x_scaling, n,
638  x_scaling,
639  use_g_scaling, m, g_scaling);
640  }
641 
642 
647  void
649  {
650  curr_warm_starter_ = warm_starter;
651  }
654  {
655  return curr_warm_starter_;
656  }
657 
658 
660  double
662  Number help;
663  tminlp_->eval_upper_bound_f(num_variables(), x, help);
664  return help;
665  }
666 
667  double
668  TMINLP2TNLP::check_solution(OsiObject ** objects, int nObjects){
669  assert(x_sol_.size() == num_variables());
670  assert(g_sol_.size() == num_constraints());
671  if (objects) {
672  for (int i = 0 ; i < nObjects ; i++) {
673  OsiSimpleInteger * obj = dynamic_cast<OsiSimpleInteger *>(objects[i]);
674  if(obj){
675  int colNumber = obj->columnNumber();
676  x_sol_[colNumber] = floor(x_sol_[colNumber]+0.5);
677  }
678  }
679  }
680  else {
681  for (unsigned int i = 0; i < x_sol_.size() ; i++) {
683  x_sol_[i] = floor(x_sol_[i]+0.5);
684  }
685  }
686  }
687  eval_g((int)x_sol_.size(), x_sol_(), true, (int)g_sol_.size(), g_sol_());
688  eval_f((int)x_sol_.size(), x_sol_(), false, obj_value_);
689  double error = 0;
690  for(unsigned int i = 0 ; i < g_sol_.size() ; i++){
691  error = std::max(error, std::max(0., g_l_[i] - g_sol_[i]));
692  error = std::max(error, std::max(0., - g_u_[i] + g_sol_[i]));
693  }
694  return error;
695  }
696 
697 }// namespace Bonmin
698 
double * values
TNLP::IndexStyleEnum index_style_
index style (fortran or C)
void Set_dual_sol(Ipopt::Index n, const Ipopt::Number *dual_sol)
Set the contiuous dual solution.
void outputDiffs(const std::string &probName, const std::string *varNames)
Procedure to ouptut relevant informations to reproduce a sub-problem.
vector< Ipopt::Number > g_l_
Lower bounds on constraints values.
void SetVariableType(Ipopt::Index n, TMINLP::VariableType type)
Change the type of the variable.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Return the hessian of the lagrangian.
vector< Ipopt::Number > x_init_
Initial primal point.
void resetStartingPoint()
reset the starting point to original one.
vector< Ipopt::Number > x_l_
Current lower bounds on variables.
const Ipopt::Number * x_init() const
get the starting primal point
void SetVariablesUpperBounds(Ipopt::Index n, const Ipopt::Number *x_u)
Change the upper bound on the variable.
void SetVariablesLowerBounds(Ipopt::Index n, const Ipopt::Number *x_l)
Change the lower bound on the variables.
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
virtual ~TMINLP2TNLP()
Default destructor.
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
void Set_x_sol(Ipopt::Index n, const Ipopt::Number *x_sol)
Set the contiuous solution.
vector< Ipopt::Number > orig_x_u_
Original upper bounds on variables.
Ipopt::Number * duals_init_
Initial values for all dual multipliers (constraints then lower bounds then upper bounds) ...
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
virtual bool get_scaling_parameters(Ipopt::Number &obj_scaling, bool &use_x_scaling, Ipopt::Index n, Ipopt::Number *x_scaling, bool &use_g_scaling, Ipopt::Index m, Ipopt::Number *g_scaling)
Method that returns scaling parameters.
bool IsValid(const OSSmartPtr< U > &smart_ptr)
Definition: OSSmartPtr.hpp:465
vector< TMINLP::VariableType > var_types_
Types of the variable (TMINLP::CONTINUOUS, TMINLP::INTEGER, TMINLP::BINARY).
void SetVariableLowerBound(Ipopt::Index var_no, Ipopt::Number x_l)
Change the lower bound on the variable.
Ipopt::SmartPtr< IpoptInteriorWarmStarter > curr_warm_starter_
Pointer to object that holds warmstart information.
bool warm_start_entire_iterate_
Option from Ipopt - we currently use it to see if we want to use some clever warm start or just the l...
void force_fractionnal_sol()
force solution to be fractionnal.
vector< Ipopt::Number > x_sol_
Optimal solution.
TMINLP2TNLP & operator=(const TMINLP2TNLP &)
Overloaded Equals Operator.
vector< Ipopt::Number > g_sol_
Activities of constraint g( x_sol_)
void SetVariableUpperBound(Ipopt::Index var_no, Ipopt::Number x_u)
Change the upper bound on the variable.
Ipopt::SmartPtr< TMINLP > tminlp_
pointer to the tminlp that is being adapted
double evaluateUpperBoundingFunction(const double *x)
Evaluate the upper bounding function at given point and store the result.
Ipopt::Number obj_value() const
Get the objective value.
const Ipopt::Number * x_l()
Get the current values for the lower bounds.
void SetVariablesBounds(Ipopt::Index n, const Ipopt::Number *x_l, const Ipopt::Number *x_u)
Change the bounds on the variables.
vector< Ipopt::Number > g_u_
Upper bounds on constraints values.
bool IsNull(const OSSmartPtr< U > &smart_ptr)
Definition: OSSmartPtr.hpp:471
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
The caller is allowed to modify the bounds, so this method returns the internal bounds information...
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
vector< Ipopt::Number > orig_x_l_
Original lower bounds on variables.
bool need_new_warm_starter_
Do we need a new warm starter object.
fint end
vector< Ipopt::Number > x_u_
Current upper bounds on variables.
const Ipopt::Number * x_u()
Get the current values for the upper bounds.
Ipopt::Number nlp_lower_bound_inf_
Value for a lower bound that denotes -infinity.
void SetWarmStarter(Ipopt::SmartPtr< IpoptInteriorWarmStarter > warm_starter)
vector< Ipopt::Number > x_init_user_
User-provideed initial prmal point.
Ipopt::Index nnz_jac_g_
Number of non-zeroes in the constraints jacobian.
Ipopt::Index num_constraints() const
Get the number of constraints.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
void fint fint * k
const Ipopt::Number * duals_init() const
get the starting dual point
const Ipopt::Number * orig_x_l() const
Get the original values for the lower bounds.
static int
Definition: OSdtoa.cpp:2173
double check_solution(OsiObject **objects=0, int nObjects=-1)
Round and check the current solution, return norm inf of constraint violation.
const Ipopt::Number * orig_x_u() const
Get the original values for the upper bounds.
Ipopt::Number obj_value_
Value of the optimal solution found by Ipopt.
Ipopt::SolverReturn return_status_
Return status of the optimization process.
void setDualsInit(Ipopt::Index n, const Ipopt::Number *duals_init)
set the dual starting point to duals_init
Ipopt::Number nlp_upper_bound_inf_
Value for a upper bound that denotes infinity.
TMINLP2TNLP()
Default Constructor.
bool BonminAbortAll
Definition: BonCbc.cpp:37
void gutsOfCopy(const TMINLP2TNLP &source)
Copies all the arrays.
Ipopt::Index nnz_h_lag_
Number of non-zeroes in the lagrangian hessian.
void fint fint fint real fint real real real real real real * g
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
Ipopt::Index num_variables() const
Get the number of variables.
void fint * m
vector< Ipopt::Number > duals_sol_
Dual multipliers of constraints and bounds.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
void setxInit(Ipopt::Index n, const Ipopt::Number *x_init)
set the starting point to x_init
Ipopt::SmartPtr< IpoptInteriorWarmStarter > GetWarmStarter()
virtual bool intermediate_callback(Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value, Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu, Ipopt::Number d_norm, Ipopt::Number regularization_size, Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
Intermediate Callback method for the user.
void fint * n
void SetVariableBounds(Ipopt::Index var_no, Ipopt::Number x_l, Ipopt::Number x_u)
Change the bounds on the variable.
virtual bool get_warm_start_iterate(Ipopt::IteratesVector &warm_start_iterate)
Methat that returns an Ipopt IteratesVector that has the starting point for all internal varibles...
VariableType
Type of the variables.
Definition: BonTMINLP.hpp:192
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
Method called by Ipopt to get the starting point.
void fint fint fint real fint real * x