BonOuterDescription.cpp
Go to the documentation of this file.
1 // (C) Copyright CNRS and others 2010
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Pierre Bonami, Université de la Méditérannée
7 // Hassan Hijazi, Orange Labs
8 //
9 // Date : 05/22/2010
10 
11 #include "BonOuterDescription.hpp"
13 
14 namespace Bonmin{
15 
16 
17 //Copied from OsiTMINLPInterface
18 
19 //A procedure to try to remove small coefficients in OA cuts (or make it non small
20 static inline
21 bool cleanNnz(double &value, double colLower, double colUpper,
22  double rowLower, double rowUpper, double colsol,
23  double & lb, double &ub, double tiny, double veryTiny)
24 {
25  if(fabs(value)>= tiny) return 1;
26 
27  if(fabs(value)<veryTiny) return 0;//Take the risk?
28 
29  //try and remove
30  double infty = 1e20;
31  bool colUpBounded = colUpper < 10000;
32  bool colLoBounded = colLower > -10000;
33  bool rowNotLoBounded = rowLower <= - infty;
34  bool rowNotUpBounded = rowUpper >= infty;
35  bool pos = value > 0;
36 
37  if(colLoBounded && pos && rowNotUpBounded) {
38  lb += value * (colsol - colLower);
39  return 0;
40  }
41  else
42  if(colLoBounded && !pos && rowNotLoBounded) {
43  ub += value * (colsol - colLower);
44  return 0;
45  }
46  else
47  if(colUpBounded && !pos && rowNotUpBounded) {
48  lb += value * (colsol - colUpper);
49  return 0;
50  }
51  else
52  if(colUpBounded && pos && rowNotLoBounded) {
53  ub += value * (colsol - colUpper);
54  return 0;
55  }
56  //can not remove coefficient increase it to smallest non zero
57  if(pos) value = tiny;
58  else
59  value = - tiny;
60  return 1;
61 }
62 
63 
68  OsiTMINLPInterface &si, OsiCuts &cs, int ind,
69  const double * x, int getObj, const double * x2, double theta,
70  bool global) {
71  int n, m, nnz_jac_g, nnz_h_lag;
72  Ipopt::TNLP::IndexStyleEnum index_style;
73  TMINLP2TNLP* problem = si.problem();
74  problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
75 
76  double g_i = 0;
77  problem->eval_gi(n, x, 1, ind, g_i);
78  vector<int> jCol(n);
79  int nnz;
80  problem->eval_grad_gi(n, x, 0, ind, nnz, jCol(), NULL);
81  vector<double> jValues(nnz);
82  problem->eval_grad_gi(n, x, 0, ind, nnz, NULL, jValues());
83  //As jacobian is stored by cols fill OsiCuts with cuts
84  CoinPackedVector cut;
85  double lb;
86  double ub;
87 
88  const double * rowLower = si.getRowLower();
89  const double * rowUpper = si.getRowUpper();
90  const double * colLower = si.getColLower();
91  const double * colUpper = si.getColUpper();
92  const double * duals = si.getRowPrice() + 2 * n;
93  double infty = si.getInfinity();
94  double nlp_infty = infty;
95  int rowIdx = ind;
96 
97  if (rowLower[rowIdx] > -nlp_infty)
98  lb = rowLower[rowIdx] - g_i;
99  else
100  lb = -infty;
101  if (rowUpper[rowIdx] < nlp_infty)
102  ub = rowUpper[rowIdx] - g_i;
103  else
104  ub = infty;
105  if (rowLower[rowIdx] > -infty && rowUpper[rowIdx] < infty) {
106  if (duals[rowIdx] >= 0)// <= inequality
107  lb = -infty;
108  if (duals[rowIdx] <= 0)// >= inequality
109  ub = infty;
110  }
111 
112  double tiny = 1e-08;
113  double veryTiny = 1e-20;
114 
115  for (int i = 0; i < nnz; i++) {
116  if(index_style == Ipopt::TNLP::FORTRAN_STYLE) jCol[i]--;
117  const int &colIdx = jCol[i];
118  //"clean" coefficient
119  if (cleanNnz(jValues[i], colLower[colIdx], colUpper[colIdx],
120  rowLower[rowIdx], rowUpper[rowIdx], x[colIdx], lb, ub,
121  tiny, veryTiny)) {
122  cut.insert(colIdx, jValues[i]);
123 
124  if (lb > -infty)
125  lb += jValues[i] * x[colIdx];
126  if (ub < infty)
127  ub += jValues[i] * x[colIdx];
128  }
129  }
130 
131 
132  bool add = true;
133  //Compute cut violation
134  if (x2 != NULL) {
135  double rhs = cut.dotProduct(x2);
136  double violation = 0.;
137  if (ub < infty)
138  violation = std::max(violation, fabs(rhs - ub));
139  if (lb > -infty)
140  violation = std::max(violation, fabs(lb - rhs));
141  if (violation < theta) {
142  add = false;
143  }
144  }
145  OsiRowCut newCut;
146  // if(lb[i]>-1e20) assert (ub[i]>1e20);
147 
148  if (add) {
149  if (global) {
150  newCut.setGloballyValidAsInteger(1);
151  }
152  //newCut.setEffectiveness(99.99e99);
153 
154  //********* Perspective Extension ********//
155  const int* ids = problem->get_const_xtra_id(); // vector of indices corresponding to the binary variable activating the corresponding constraint
156  // Get the index of the corresponding indicator binary variable
157  int binary_id = (ids == NULL) ? -1 : ids[ind];// index corresponding to the binary variable activating the corresponding constraint
158  if(binary_id>0) {// If this hyperplane is a linearization of a disjunctive constraint, we link its righthand (or lefthand) side to the corresponding indicator binary variable
159  if (lb > -infty) { // ∂x ≥ lb => ∂x - lb*z ≥ 0
160  cut.insert(binary_id, -lb);
161  newCut.setLb(0);
162  newCut.setUb(ub);
163 
164  }
165  if (ub < infty) { // ∂x ≤ ub => ∂x - ub*z ≤ 0
166  cut.insert(binary_id, -ub);
167  newCut.setLb(lb);
168  newCut.setUb(0);
169 
170  }
171  }
172  else {
173  newCut.setLb(lb);
174  newCut.setUb(ub);
175  }
176  //********* Perspective Extension ********//
177 
178  newCut.setRow(cut);
179  //newCut.print();
180  cs.insert(newCut);
181  }
182 }
183 
184 /* Old Uniform sampling method
185 void addOuterDescription(OsiTMINLPInterface &nlp, OsiSolverInterface &si,
186  const double * x, int nbAp, bool getObj) {
187  int n;
188  int m;
189  int nnz_jac_g;
190  int nnz_h_lag;
191  Ipopt::TNLP::IndexStyleEnum index_style;
192  //Get problem information
193  TMINLP2TNLP* problem = nlp.problem();
194  problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
195 
196  const double * colLower = nlp.getColLower();
197  const double * colUpper = nlp.getColUpper();
198  const Bonmin::TMINLP::VariableType* variableType = problem->var_types();
199  vector<Ipopt::TNLP::LinearityType> constTypes(m);
200  problem->get_constraints_linearity(m, constTypes());
201  // Hassan OA initial description
202  int OuterDesc = 0;
203  if (OuterDesc == 0) {
204  OsiCuts cs;
205 
206  double * p = CoinCopyOfArray(nlp.getColLower(), n);
207  double * pp = CoinCopyOfArray(nlp.getColLower(), n);
208  double * up = CoinCopyOfArray(nlp.getColUpper(), n);
209  //b->options()->GetIntegerValue("number_approximations_initial_outer",nbAp, b->prefix());
210  std::vector<int> nbG(m, 0);// Number of generated points for each nonlinear constraint
211 
212  std::vector<double> step(n);
213 
214  for (int i = 0; i < n; i++) {
215 
216  if (colUpper[i] > 1e08) {
217  up[i] = 0;
218  }
219 
220  if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i]
221  == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) {
222  step[i] = 0;
223  } else
224  step[i] = (up[i] - colLower[i]) / (nbAp);
225 
226  if (colLower[i] < -1e08) {
227  p[i] = 0;
228  pp[i] = 0;
229  }
230  }
231  vector<double> g_p(m);
232  vector<double> g_pp(m);
233  for (int i = 0; (i < m); i++) {
234  if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
235  getMyOuterApproximation(nlp, cs, i, p, 0, NULL, 10000, true);// Generate Tangents at current point
236  }
237  for (int j = 1; j <= nbAp; j++) {
238 
239  for (int i = 0; i < n; i++) {
240  pp[i] += step[i];
241  }
242 
243 
244  problem->eval_g(n, p, 1, m, g_p());
245  problem->eval_g(n, pp, 1, m, g_pp());
246  double diff = 0;
247  int varInd = 0;
248  for (int i = 0; (i < m); i++) {
249  if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
250  if (varInd == n - 1)
251  varInd = 0;
252  diff = std::abs(g_p[i] - g_pp[i]);
253 
254  if (nbG[i] < nbAp && diff ) {
255  getMyOuterApproximation(nlp, cs, i, pp, 0, NULL, 10000, true);// Generate Tangents at current point
256  p[varInd] = pp[varInd];
257  nbG[i]++;
258  }
259  varInd++;
260  }
261  }
262  for (int i = 0; i < m ; i++) {
263  if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
264  getMyOuterApproximation(nlp, cs, i, up, 0, NULL, 10000, true);// Generate Tangents at current point
265  }
266 
267  si.applyCuts(cs);
268  delete [] p;
269  delete [] pp;
270  delete [] up;
271  }
272 
273 } */
274  // New curvature based sampling method
275  void addOuterDescription(OsiTMINLPInterface &nlp, OsiSolverInterface &si,
276  const double * x, int nbAp, bool getObj) {
277  int n;
278  int m;
279  int nnz_jac_g;
280  int nnz_h_lag;
281  Ipopt::TNLP::IndexStyleEnum index_style;
282  //Get problem information
283  TMINLP2TNLP* problem = nlp.problem();
284  problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
285 
286  const double * colLower = nlp.getColLower();
287  const double * colUpper = nlp.getColUpper();
288  const Bonmin::TMINLP::VariableType* variableType = problem->var_types();
290  problem->get_constraints_linearity(m, constTypes());
291  // Hassan OA initial description
292  int OuterDesc = 0;
293  if (OuterDesc == 0) {
294  OsiCuts cs;
295 
296  double * p = CoinCopyOfArray(nlp.getColLower(), n);
297  double * pp = CoinCopyOfArray(nlp.getColLower(), n);
298  double * up = CoinCopyOfArray(nlp.getColUpper(), n);
299  //b->options()->GetIntegerValue("number_approximations_initial_outer",nbAp, b->prefix());
300  std::vector<int> nbG(m, 2);// Number of generated points for each nonlinear constraint, we always generate two cuts at lower and upper bounds.
301 
302  std::vector<double> step(n);
303 
304  for (int i = 0; i < n; i++) {
305 
306  if (colUpper[i] > 1e08) {
307  up[i] = 0;
308  }
309 
310  if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i]
311  == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) {
312  step[i] = 0;
313  } else
314  step[i] = (up[i] - colLower[i]) / 2e02; // if step[i]!=0 then variable i appears in one univariate nonlinear function, a small step is computed in order to perform curavture based sampling
315 
316  if (colLower[i] < -1e08) {
317  p[i] = 0;
318  pp[i] = 0;
319  }
320  }
321  vector<double> g_p(m);
322  double g_p_i, g_pp_i;
323  problem->eval_g(n, p, 1, m, g_p()); // Evaluate function g at lowerbounds
324  vector<double> g_pp(m);
325  vector<double> g_up(m);
326  problem->eval_g(n, up, 1, m, g_up()); // Evaluate function g at upperbounds
327 
328  for (int i = 0; (i < m); i++) {
329  if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
330  getMyOuterApproximation(nlp, cs, i, p, 0, NULL, 10000, true);// Generate Tangents at lowerbounds
331  }
332  vector<double> thr(m); // minimum threshold value for the variation of function g (curvature) for generating a cut at point pp
333  for (int i = 0; i < m; i++) {
334  thr[i] = std::abs(g_up[i]-g_p[i])/nbAp;
335  }
336  double diff = 0;
337  for (int i = 0; (i < m); i++) { // Generate Outer-Approximation initial cuts for all nonlinear constraints
338  if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
339  p = CoinCopyOfArray(nlp.getColLower(), n); // For each nonlinear constraint reset all points to lowerbounds
340  pp = CoinCopyOfArray(nlp.getColLower(), n);
341  while (nbG[i] < nbAp) { // Iterate untill the number of initial approximations is reached for each nonlinear constraint
342 
343  // Curvature sampling
344  for (int j = 0; j < n; j++) {
345  pp[j] += step[j];
346  }
347  problem->eval_gi(n, p, 1, i, g_p_i);
348  problem->eval_gi(n, pp, 1, i, g_pp_i);
349  diff = std::abs(g_p_i - g_pp_i);
350 
351  if (diff>=thr[i] ) {
352  getMyOuterApproximation(nlp, cs, i, pp, 0, NULL, 10000, true);// Generate Tangents at current point
353  for (int j = 0; j < n; j++) {
354  p[j] = pp[j]; // Move all previous points to the current one
355  }
356 
357  nbG[i]++;
358  }
359  }
360 
361  }
362 
363  for (int i = 0; i < m ; i++) {
364  if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
365  getMyOuterApproximation(nlp, cs, i, up, 0, NULL, 10000, true);// Generate Tangents at upperbounds
366  }
367 
368  si.applyCuts(cs);
369  delete [] p;
370  delete [] pp;
371  delete [] up;
372 
373  }
374 
375 
376  }
377 }
378 
void addOuterDescription(OsiTMINLPInterface &nlp, OsiSolverInterface &si, const double *x, int nbAp, bool getObj)
Adds an outer description of problem to linear formulation.
virtual double getInfinity() const
Get solver&#39;s value for infinity.
static Bigint * diff(Bigint *a, Bigint *b)
Definition: OSdtoa.cpp:1120
void getMyOuterApproximation(OsiTMINLPInterface &si, OsiCuts &cs, int ind, const double *x, int getObj, const double *x2, double theta, bool global)
Get the outer approximation constraints at provided point and only for the specified constraint (ind ...
virtual const double * getColLower() const
Get pointer to array[getNumCols()] of column lower bounds.
pos
position where the operator should be printed when printing the expression
ULong x2
Definition: OSdtoa.cpp:1776
const TMINLP2TNLP * problem() const
get pointer to the TMINLP2TNLP adapter
This is class provides an Osi interface for a Mixed Integer Linear Program expressed as a TMINLP (so ...
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
static char * j
Definition: OSdtoa.cpp:3622
int up
Definition: OSdtoa.cpp:1817
void fint fint fint real fint real real real real real real real real real * e
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
Returns the constraint linearity.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
virtual const double * getRowUpper() const
Get pointer to array[getNumRows()] of row upper bounds.
static bool cleanNnz(double &value, double colLower, double colUpper, double rowLower, double rowUpper, double colsol, double &lb, double &ub, double tiny, double veryTiny)
virtual const double * getRowPrice() const
Get pointer to array[getNumRows()] of dual prices.
virtual const double * getColUpper() const
Get pointer to array[getNumCols()] of column upper bounds.
virtual const double * getRowLower() const
Get pointer to array[getNumRows()] of row lower bounds.
void fint * m
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
virtual const int * get_const_xtra_id() const
Access array describing constraint to which perspectives should be applied.
void fint * n
VariableType
Type of the variables.
Definition: BonTMINLP.hpp:192
const TMINLP::VariableType * var_types()
Get the variable types.
real infty
void fint fint fint real fint real * x