BonTNLP2FPNLP.cpp
Go to the documentation of this file.
1 // (C) Copyright Carnegie Mellon University 2005
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Pierre Bonami, Carnegie Mellon University,
7 //
8 // Date : 05/25/2005
9 
10 
11 #include "BonTNLP2FPNLP.hpp"
12 
13 using namespace Ipopt;
14 
15 namespace Bonmin
16 {
17  TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor):
18  tnlp_(tnlp),
19  inds_(),
20  vals_(),
21  lambda_(1.),
22  sigma_(1.),
23  norm_(2),
24  objectiveScalingFactor_(objectiveScalingFactor),
25  use_feasibility_pump_objective_(false),
26  use_cutoff_constraint_(false),
27  use_local_branching_constraint_(false),
28  cutoff_(COIN_DBL_MAX),
29  rhs_local_branching_constraint_(COIN_DBL_MAX),
30  index_style_(TNLP::C_STYLE)
31  {}
32 
34  tnlp_(tnlp),
35  inds_(other->inds_),
36  vals_(other->vals_),
37  lambda_(other->lambda_),
38  sigma_(other->sigma_),
39  norm_(other->norm_),
40  objectiveScalingFactor_(other->objectiveScalingFactor_),
41  use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
42  use_cutoff_constraint_(other->use_cutoff_constraint_),
43  use_local_branching_constraint_(other->use_local_branching_constraint_),
44  cutoff_(other->cutoff_),
45  rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
46  index_style_(other->index_style_)
47  {}
48 
50  {
51  }
52 
53  void
54  TNLP2FPNLP::set_cutoff(Number cutoff)
55  {
56  Number epsilon = 1.0e-6;
57  if(cutoff > 1.0e-8)
58  cutoff_ = (1-epsilon) * cutoff;
59  else if(cutoff < -1.0e-8)
60  cutoff_ = (1+epsilon) * cutoff;
61  else
62  cutoff_ = - epsilon;
63  }
64 
65  void
66  TNLP2FPNLP::set_dist_to_point_obj(size_t n, const Number * vals, const Index * inds)
67  {
68  inds_.resize(n);
69  vals_.resize(n);
70  std::copy(vals, vals + n, vals_.begin());
71  std::copy(inds, inds + n, inds_.begin());
72  }
73 
75  double
77  {
78  double ret_val = 0;
79  assert(vals_.size() == inds_.size());
80  if(norm_ == 2){
81  for(unsigned int i = 0; i < vals_.size() ; i++) {
82  ret_val += ( x[inds_[i]] - vals_[i] ) * ( x[inds_[i]] - vals_[i] );
83  }
84  }
85  else if(norm_ == 1){
86  for(unsigned int i = 0 ; i < vals_.size() ; i++) {
87  if(vals_[i] <= 0.1)
88  ret_val += x[inds_[i]];
89  else{
90  ret_val += (1.0 - x[inds_[i]]);
91  // ret_val -= x[inds_[i]];
92  }
93  }
94  }
95  return ret_val;
96  }
97 
98  bool
99  TNLP2FPNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
100  Index& nnz_h_lag,
101  TNLP::IndexStyleEnum& index_style)
102  {
103  bool ret_code = tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
104  index_style);
105 
106  index_style_ = index_style; // this function is called before any
107  // other that uses index_style_
108 
110  nnz_h_lag += (int)vals_.size();
111 
113  m += 2;
114  nnz_jac_g += (n + (int)vals_.size());
115  }
116  else if(use_cutoff_constraint_) {
117  m++;
118  nnz_jac_g += n;
119  }
121  m++;
122  nnz_jac_g += (int)vals_.size();
123  }
124 
125  return ret_code;
126  }
127 
128  bool
129  TNLP2FPNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
130  Index m, Number* g_l, Number* g_u)
131  {
132  bool ret_code;
133 
135  ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
136  g_l[m-2] = - COIN_DBL_MAX;
137  g_u[m-2] = cutoff_;
138  g_l[m-1] = - COIN_DBL_MAX;
140  }
141  else if(use_cutoff_constraint_) {
142  ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
143  g_l[m-1] = - COIN_DBL_MAX;
144  g_u[m-1] = cutoff_;
145  }
147  ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
148  g_l[m-1] = - COIN_DBL_MAX;
150  }
151  else
152  ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m, g_l, g_u);
153 
154  return ret_code;
155  }
156 
157  bool
158  TNLP2FPNLP::eval_f(Index n, const Number* x, bool new_x,
159  Number& obj_value)
160  {
161  bool ret_code = tnlp_->eval_f(n, x, new_x, obj_value);//for new_x
162 
164  obj_value *= (1 - lambda_) * sigma_;
166  }
167 
168  return ret_code;
169  }
170 
171  bool
172  TNLP2FPNLP::eval_grad_f(Index n, const Number* x, bool new_x,
173  Number* grad_f)
174  {
175  bool ret_code = tnlp_->eval_grad_f(n, x, new_x, grad_f);
176 
178  for(int i = 0 ; i < n ; i++) {
179  grad_f[i] *= (1-lambda_) * sigma_;
180  }
181  if(norm_ ==2){
182  for(unsigned int i = 0 ; i < inds_.size() ; i++) {
183  grad_f[inds_[i]] += objectiveScalingFactor_*2*lambda_*( x[inds_[i]] - vals_[i] );
184  }
185  }
186  else{
187  for(unsigned int i = 0 ; i < inds_.size() ; i++) {
188  if(vals_[i] <= 0.1)
189  grad_f[inds_[i]] += objectiveScalingFactor_*lambda_;
190  else
191  grad_f[inds_[i]] -= objectiveScalingFactor_*lambda_;
192  }
193  }
194  }
195 
196  return ret_code;
197  }
198 
199  bool
200  TNLP2FPNLP::eval_g(Index n, const Number* x, bool new_x,
201  Index m, Number* g)
202  {
203  bool ret_code;
204 
206  ret_code = tnlp_->eval_g(n, x, new_x, m-2, g);
207  // compute the value of the cutoff constraint
208  Number obj_value;
209  if(eval_f(n, x, new_x, obj_value))
210  g[m-2] = obj_value;
211  else
212  ret_code = false;
213  // compute the value of the local branching constraint
214  Number g_local_branching = 0.0;
215  for(unsigned int i = 0 ; i < vals_.size() ; i++) {
216  if(vals_[i] <= 0.1)
217  g_local_branching += x[inds_[i]];
218  else
219  g_local_branching += (1.0 - x[inds_[i]]);
220  }
221  g[m-1] = g_local_branching;
222  }
223  else if(use_cutoff_constraint_) {
224  ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
225  Number obj_value;
226  if(eval_f(n, x, new_x, obj_value))
227  g[m-1] = obj_value;
228  else
229  ret_code = false;
230  }
232  ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
233  Number g_local_branching = 0.0;
234  for(unsigned int i = 0 ; i < vals_.size() ; i++) {
235  if(vals_[i] <= 0.1)
236  g_local_branching += x[inds_[i]];
237  else
238  g_local_branching += (1.0 - x[inds_[i]]);
239  }
240  g[m-1] = g_local_branching;
241  }
242  else
243  ret_code = tnlp_->eval_g(n, x, new_x, m, g);
244 
245  return ret_code;
246  }
247 
248  bool
249  TNLP2FPNLP::eval_jac_g(Index n, const Number* x, bool new_x,
250  Index m, Index nele_jac, Index* iRow,
251  Index *jCol, Number* values)
252  {
253  bool ret_code;
254 
256  int n_integers = (int)vals_.size();
257  ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers,
258  iRow, jCol, values);
259 
260  if (iRow && jCol && !values) { //Initialization phase
261  int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
262  // compute the jacobian contribution of the cutoff constraint
263  int k = nele_jac - n - n_integers;
264  iRow += k;
265  jCol += k;
266  for(int i = 0; i< n; i++) {
267  iRow[i] = m - 2 + index_correction;
268  jCol[i] = i + index_correction;
269  }
270  // compute the jacobian contribution of the local branching constraint
271  k = nele_jac - n_integers;
272  iRow += k;
273  jCol += k;
274  for(int i = 0; i< n_integers; i++) {
275  iRow[i] = m - 1 + index_correction;
276  jCol[i] = inds_[i] + index_correction;
277  }
278  }
279  else if (!iRow & !jCol && values) { //computation phase
280  // compute the jacobian contribution of the cutoff constraint
281  Number* grad_f = new Number[n];
282  bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
283  if(ret_code_grad_f) {
284  int k = nele_jac - n - n_integers;
285  values += k;
286  for(int i = 0; i< n; i++) {
287  values[i] = grad_f[i];
288  }
289  }
290  else
291  ret_code = false;
292  delete [] grad_f;
293  // compute the jacobian contribution of the local branching constraint
294  int k = nele_jac - n_integers;
295  values += k;
296  for(int i = 0; i< n_integers; i++) {
297  if(vals_[i] <= 0.1)
298  values[i] = 1.0;
299  else
300  values[i] = -1.0;
301  }
302  }
303  else { //error phase
304  DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
305  }
306  }
307  else if(use_cutoff_constraint_) {
308  ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n,
309  iRow, jCol, values);
310 
311  if (iRow && jCol && !values) { //Initialization phase
312  int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
313  int k = nele_jac - n;
314  iRow += k;
315  jCol += k;
316  for(int i = 0; i< n; i++) {
317  iRow[i] = m - 1 + index_correction;
318  jCol[i] = i + index_correction;
319  }
320  }
321  else if (!iRow & !jCol && values) { //computation phase
322  Number* grad_f = new Number[n];
323  bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
324  if(ret_code_grad_f) {
325  int k = nele_jac - n;
326  values += k;
327  for(int i = 0; i< n; i++) {
328  values[i] = grad_f[i];
329  }
330  }
331  else
332  ret_code = false;
333  delete [] grad_f;
334  }
335  else { //error phase
336  DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
337  }
338  }
340  int n_integers = (int)vals_.size();
341  ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers,
342  iRow, jCol, values);
343 
344  if (iRow && jCol && !values) { //Initialization phase
345  int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
346  int k = nele_jac - n_integers;
347  iRow += k;
348  jCol += k;
349  for(int i = 0; i< n_integers; i++) {
350  iRow[i] = m - 1 + index_correction;
351  jCol[i] = inds_[i] + index_correction;
352  }
353  }
354  else if (!iRow & !jCol && values) { //computation phase
355  int k = nele_jac - n_integers;
356  values += k;
357  for(int i = 0; i< n_integers; i++) {
358  if(vals_[i] <= 0.1)
359  values[i] = 1.0;
360  else
361  values[i] = -1.0;
362  }
363  }
364  else { //error phase
365  DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
366  }
367  }
368  else
369  ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac,
370  iRow, jCol, values);
371 
372  return ret_code;
373  }
374 
375  bool
376  TNLP2FPNLP::eval_h(Index n, const Number* x, bool new_x,
377  Number obj_factor, Index m, const Number* lambda,
378  bool new_lambda, Index nele_hess,
379  Index* iRow, Index* jCol, Number* values)
380  {
381  bool ret_code;
382 
383  int nnz_obj_h = (norm_ == 2) ? (int)inds_.size() : 0;
384 
386  double coef_obj = (iRow != NULL)?0 : lambda[m - 2];
387  ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
388  m - 2, lambda, new_lambda, nele_hess - nnz_obj_h,
389  iRow, jCol, values);
390  }
391  else if(use_cutoff_constraint_) {
392  double coef_obj = (iRow != NULL)?0 : lambda[m - 1];
393  ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
394  m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
395  iRow, jCol, values);
396  }
398  ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
399  m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
400  iRow, jCol, values);
401  }
402  else { // this is the original feasibility pump implementation
403  ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
404  m, lambda, new_lambda, nele_hess - nnz_obj_h,
405  iRow, jCol, values);
406  }
407 
408  //Now add extra elements corresponding to the hessian of the distance
410  if (iRow && jCol && !values) //Initialization phase
411  {
412  int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
413  int k = nele_hess - nnz_obj_h;
414  iRow += k;
415  jCol += k;
416  for(unsigned int i = 0; i < inds_.size() ; i++)
417  {
418  iRow[i] = inds_[i] + index_correction;
419  jCol[i] = inds_[i] + index_correction;
420  }
421  DBG_ASSERT(k==nele_hess);
422  }
423  else if (!iRow & !jCol && values) //computation phase
424  {
425  int k = nele_hess - nnz_obj_h;
426  values += k;
427  for(unsigned int i = 0; i < inds_.size() ; i++)
428  {
429  values[i] = 2* objectiveScalingFactor_* lambda_* obj_factor;
430  }
431  DBG_ASSERT(k==nele_hess);
432  }
433  else //error phase
434  {
435  DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
436  }
437  }
438 
439  return ret_code;
440  }
441 
442  void
443  TNLP2FPNLP::finalize_solution(SolverReturn status,
444  Index n, const Number* x, const Number* z_L, const Number* z_U,
445  Index m, const Number* g, const Number* lambda,
446  Number obj_value,
447  const IpoptData* ip_data,
448  IpoptCalculatedQuantities* ip_cq)
449  {
450  int m2 = m;
452  m2--;
453  }
455  m2--;
456  }
457  tnlp_->finalize_solution(status,n, x, z_L, z_U,m2, g, lambda, obj_value,
458  ip_data, ip_cq);
459  }
460 }
461 
double lambda_
value for the convex combination to take between original objective and distance function.
double * values
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
get info from nlp_ and add hessian information
void set_cutoff(Ipopt::Number cutoff)
Set the cutoff value to use in the cutoff constraint.
void set_dist_to_point_obj(size_t n, const Ipopt::Number *vals, const Ipopt::Index *inds)
Set the point to which distance is minimized.
Ipopt::TNLP::IndexStyleEnum index_style_
Ipopt::Index style (C++ or Fortran)
fint m2
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
overloaded to return the value of the objective function
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
This call is just passed onto tnlp_.
virtual ~TNLP2FPNLP()
Default destructor.
vector< Ipopt::Index > inds_
Indices of the variables for which distance is minimized (i.e. indices of integer variables in a feas...
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
overload to return the jacobian of g
void fint fint fint real fint real real real real real real real real real * e
TNLP2FPNLP()
Default Constructor.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
overload this method to return the vector of the gradient of the objective w.r.t. ...
Ipopt::SmartPtr< TNLP > tnlp_
pointer to the tminlp that is being adapted
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
overload to return the values of the left-hand side of the constraints
double dist_to_point(const Ipopt::Number *x)
Compute the norm-2 distance to the current point to which distance is minimized.
double objectiveScalingFactor_
Scaling factor for the objective.
void fint fint * k
bool use_feasibility_pump_objective_
Flag to indicate that we want to use the feasibility pump objective.
static int
Definition: OSdtoa.cpp:2173
bool use_cutoff_constraint_
Flag to indicate that we want to use a cutoff constraint This constraint has the form f(x) &lt;= (1-epsil...
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Evaluate the modified Hessian of the Lagrangian.
int norm_
Norm to use (L_1 or L_2).
vector< Ipopt::Number > vals_
Values of the point to which we separate (if x is the point vals_[i] should be x[inds_[i]] ) ...
double rhs_local_branching_constraint_
RHS of local branching constraint.
void fint fint fint real fint real real real real real real * g
double sigma_
Scaling for the original objective.
void fint * m
double cutoff_
Value of best solution known.
bool use_local_branching_constraint_
Flag to indicate that we want to use a local branching constraint.
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
void fint * n
void fint fint fint real fint real * x