BonOuterApprox.cpp
Go to the documentation of this file.
1 // (C) Copyright International Business Machines Corporation 2007
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Pierre Bonami, International Business Machines Corporation
7 //
8 // Date : 10/16/2007
9 #include "BonOuterApprox.hpp"
10 #include "BonBabSetupBase.hpp"
12 #include "BonTypes.hpp"
13 #include <vector>
14 #include <sstream>
15 namespace Bonmin {
16 
18 
19 void
21  b.options()->GetNumericValue("tiny_element", tiny_, "bonmin.");
22  b.options()->GetNumericValue("very_tiny_element", veryTiny_, "bonmin.");
23 }
24 void
26  OsiSolverInterface *si,
27  const double * x, bool getObj)
28  {
29 
30  int n;
31  int m;
32  int nnz_jac_g;
33  int nnz_h_lag;
34  Ipopt::TNLP::IndexStyleEnum index_style;
35 
36  Bonmin::TMINLP2TNLP * model = minlp.problem();
37 
38  //Get problem information
39  model->get_nlp_info( n, m, nnz_jac_g, nnz_h_lag, index_style);
40 
41  vector<int> iRow(nnz_jac_g);
42  vector<int> jCol(nnz_jac_g);
43  vector<double> vals(nnz_jac_g);
44 
45  //get Jacobian
46  model->eval_jac_g(n, x, 1, m, nnz_jac_g, &iRow[0], &jCol[0], NULL);
47  model->eval_jac_g(n, x, 1, m, nnz_jac_g, NULL, NULL, &vals[0]);
48 
49  //Put jacobian arrays in c style
50  if(index_style == Ipopt::TNLP::FORTRAN_STYLE){
51  for(int k = 0 ; k < nnz_jac_g ; k++){
52  iRow[k]--;
53  jCol[k]--;
54  }
55  }
56  vector<double> g(m);
57  model->eval_g(n, x, 1, m, &g[0]);
58 
59  vector<double> rowLow(m);
60  vector<double> rowUp(m);
61  vector<double> colUp(n);
62  vector<double> colLow(n);
63 
64  model->get_bounds_info(n, &colLow[0], &colUp[0], m, &rowLow[0], &rowUp[0]);
65 
66  double infty = si->getInfinity();
67  double nlp_infty = infty;
68 
69  vector<Ipopt::TNLP::LinearityType> const_types(m);
70  model->get_constraints_linearity(m, &const_types[0]);
71  for(int i = 0 ; i < m ; i++) {
72  if(rowLow[i] > - nlp_infty){
73  rowLow[i] = (rowLow[i] - g[i]);
74  if(1 || const_types[i] != Ipopt::TNLP::LINEAR) rowLow[i] -= 1e-07;
75  }
76  if(rowUp[i] < nlp_infty){
77  rowUp[i] = (rowUp[i] - g[i]);
78  if(1 || const_types[i] != Ipopt::TNLP::LINEAR) rowUp[i] += 1e-07;
79  }
80  }
81 
82  //Then convert everything to a CoinPackedMatrix
83  //Go through values, clean coefficients and fix bounds
84  for(int i = 0 ; i < nnz_jac_g ; i++) {
85  if(const_types[iRow[i]] != Ipopt::TNLP::LINEAR || //For linear just copy is fine.
86  cleanNnz(vals[i],colLow[jCol[i]], colUp[jCol[i]],
87  rowLow[iRow[i]], rowUp[iRow[i]],
88  x[jCol[i]],
89  rowLow[iRow[i]],
90  rowUp[iRow[i]], tiny_, veryTiny_)) {
91  rowLow[iRow[i]] += vals[i] * x[jCol[i]];
92  rowUp[iRow[i]] += vals[i] *x[jCol[i]];
93  }
94  }
95  CoinPackedMatrix mat(true, &iRow[0], &jCol[0], &vals[0], nnz_jac_g);
96  mat.setDimensions(m,n); // In case matrix was empty, this should be enough
97 
98  vector<double> obj(n,0.);
99 
100  si->loadProblem(mat, &colLow[0], &colUp[0], &obj[0], &rowLow[0], &rowUp[0]);
101 
102  for(int i = 0 ; i < n ; i++) {
103  if(minlp.isInteger(i))
104  si->setInteger(i);
105  }
106  if(getObj) {
107  if(model->hasLinearObjective()){
108  std::cout<<"Linear stuff"<<std::endl;
109  double zero;
110  model->eval_f(n, &obj[0], 1, zero);
111  si->setDblParam(OsiObjOffset, -zero);
112  //if(fabs(zero - 0) > 1e-10)
113  //addObjVar = true;
114  //else {
115  //Copy the linear objective and don't create a dummy variable.
116  model->eval_grad_f(n, x, 1,&obj[0]);
117  si->setObjective(&obj[0]);
118  //}
119  }
120  else {
121  //add variable alpha
122  //(alpha should be empty in the matrix with a coefficient of -1 and unbounded)
123  CoinPackedVector a;
124  si->addCol(a,-si->getInfinity(), si->getInfinity(), 1.);
125 
126  // Now get the objective cuts
127  // get the gradient, pack it and add the cut
128  model->eval_grad_f(n, x, 1,&obj[0]);
129  double ub;
130  model->eval_f(n, x, 1, ub);
131  ub*=-1;
132  double lb = -1e300;
133  CoinPackedVector objCut;
134  CoinPackedVector * v = &objCut;
135  v->reserve(n);
136  for(int i = 0; i<n ; i++) {
137  if(nnz_jac_g)
138  {
139  if(cleanNnz(obj[i],colLow[i], colUp[i],
140  -si->getInfinity(), 0,
141  x[i],
142  lb,
143  ub, tiny_, veryTiny_)) {
144  v->insert(i,obj[i]);
145  lb += obj[i] * x[i];
146  ub += obj[i] * x[i];
147  }
148  }
149  else //Unconstrained problem can not put clean coefficient
150  {
151  if(cleanNnz(obj[i],colLow[i], colUp[i],
152  -si->getInfinity(), 0,
153  x[i],
154  lb,
155  ub, 1e-03, 1e-08)) {
156  v->insert(i,obj[i]);
157  lb += obj[i] * x[i];
158  ub += obj[i] * x[i];
159  }
160  }
161  }
162  v->insert(n,-1);
163  si->addRow(objCut, lb, ub);
164  }
165  }
166 
167 #if 0
168  std::ostringstream os;
169  os<<"OA_"<<nTimesCalled;
170  nTimesCalled++;
171  std::string f_name = os.str();
172  si->writeMps(f_name.c_str());
173 #endif
174  }
175 
176 }
177 
bool cleanNnz(double &value, double colLower, double colUpper, double rowLower, double rowUpper, double colsol, double &lb, double &ub, double tiny, double veryTiny)
Facilitator to clean up coefficient.
const TMINLP2TNLP * problem() const
get pointer to the TMINLP2TNLP adapter
This is class provides an Osi interface for a Mixed Integer Linear Program expressed as a TMINLP (so ...
virtual bool isInteger(int columnNumber) const
Return true if column is integer.
void fint fint fint real * a
void extractLinearRelaxation(Bonmin::OsiTMINLPInterface &minlp, OsiSolverInterface *si, const double *x, bool getObj)
Build the Outer approximation in minlp and put it in si.
void fint fint fint real fint real real real real real real real real real * e
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
Returns the constraint linearity.
A class to have all elements necessary to setup a branch-and-bound.
void initialize(Bonmin::BabSetupBase &b)
Initialize using options.
double tiny_
If constraint coefficient is below this, we try to remove it.
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
The caller is allowed to modify the bounds, so this method returns the internal bounds information...
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
void fint fint * k
double veryTiny_
If constraint coefficient is below this, we neglect it.
Ipopt::SmartPtr< Ipopt::OptionsList > options()
Acces list of Options.
void fint fint fint real fint real real real real real real * g
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
void fint * m
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
virtual bool hasLinearObjective()
returns true if objective is linear.
void fint * n
return b
Definition: OSdtoa.cpp:1719
static int nTimesCalled
Count the number of linear outer approximations taken.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
real infty
void fint fint fint real fint real * x