BonHeuristicDiveMIP.cpp
Go to the documentation of this file.
1 // Copyright (C) 2007, International Business Machines Corporation and others.
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Joao P. Goncalves, International Business Machines Corporation
7 //
8 // Date : November 12, 2007
9 
10 #include "BonminConfig.h"
11 #include "BonHeuristicDiveMIP.hpp"
12 #include "CoinHelperFunctions.hpp"
13 #include "CbcModel.hpp"
14 #include "BonHeuristicDive.hpp"
15 #include "BonSubMipSolver.hpp"
16 #include "BonCbcLpStrategy.hpp"
17 
18 #ifdef COIN_HAS_CPX
19 #include "OsiCpxSolverInterface.hpp"
20 #endif
21 
22 #include "OsiClpSolverInterface.hpp"
23 
24 #include "OsiAuxInfo.hpp"
25 
26 #include "CoinTime.hpp"
27 
28 #include <fstream>
29 
30 #include <iomanip>
31 
32 #include "CoinHelperFunctions.hpp"
33 
34 //#define DEBUG_BON_HEURISTIC_DIVE_MIP
35 
36 using namespace std;
37 
38 namespace Bonmin
39 {
40  HeuristicDiveMIP::HeuristicDiveMIP(BonminSetup * setup)
41  :
42  CbcHeuristic(),
43  setup_(setup),
44  howOften_(100),
45  mip_(NULL)
46  {
47  Initialize(setup);
48  }
49 
50  void
52  delete mip_;
53  mip_ = new SubMipSolver (*b, b->prefix());
54 
55  }
56 
58  :
59  CbcHeuristic(copy),
60  setup_(copy.setup_),
61  howOften_(copy.howOften_),
62  mip_(new SubMipSolver(*copy.mip_))
63  {
64  }
65 
68  {
69  if(this != &rhs) {
70  CbcHeuristic::operator=(rhs);
71  setup_ = rhs.setup_;
72  howOften_ = rhs.howOften_;
73  delete mip_;
74  if(rhs.mip_)
75  mip_ = new SubMipSolver(*rhs.mip_);
76  }
77  return *this;
78  }
79 
81  delete mip_;
82  }
83 
84  struct MatComp{
85  const int * iRow;
86  const int * jCol;
88  bool operator()(int i,int j){
89  return (jCol[i] < jCol[j]) || (jCol[i] == jCol[j] && iRow[i] < iRow[j]);
90  }
91  };
92 
93 
94  int
95  HeuristicDiveMIP::solution(double &solutionValue, double *betterSolution)
96  {
97  if(model_->getNodeCount() || model_->getCurrentPassNumber() > 1) return 0;
98  if ((model_->getNodeCount()%howOften_)!=0||model_->getCurrentPassNumber()>1)
99  return 0;
100 
101  int returnCode = 0; // 0 means it didn't find a feasible solution
102 
103  OsiTMINLPInterface * nlp = NULL;
104  if(setup_->getAlgorithm() == B_BB)
105  nlp = dynamic_cast<OsiTMINLPInterface *>(model_->solver()->clone());
106  else
107  nlp = dynamic_cast<OsiTMINLPInterface *>(setup_->nonlinearSolver()->clone());
108 
109  TMINLP2TNLP* minlp = nlp->problem();
110 
111  // set tolerances
112  double integerTolerance = model_->getDblParam(CbcModel::CbcIntegerTolerance);
113  double primalTolerance = 1.0e-6;
114 
115  int numberColumns;
116  int numberRows;
117  int nnz_jac_g;
118  int nnz_h_lag;
119  Ipopt::TNLP::IndexStyleEnum index_style;
120  minlp->get_nlp_info(numberColumns, numberRows, nnz_jac_g,
121  nnz_h_lag, index_style);
122 
123  const Bonmin::TMINLP::VariableType* variableType = minlp->var_types();
124  const double* x_sol = minlp->x_sol();
125  const double* x_l = minlp->x_l();
126  const double* x_u = minlp->x_u();
127  //const double* g_sol = minlp->g_sol();
128  const double* g_l = minlp->g_l();
129  const double* g_u = minlp->g_u();
130 
131  adjustPrimalTolerance(minlp, primalTolerance);
132 
133  assert(isNlpFeasible(minlp, primalTolerance));
134 
135  // Get information about the linear and nonlinear part of the instance
136  TMINLP* tminlp = nlp->model();
137  Ipopt::TNLP::LinearityType* variableLinearNonLinear = new
138  Ipopt::TNLP::LinearityType [numberColumns];
139  tminlp->get_variables_linearity(numberColumns, variableLinearNonLinear);
140  vector<int> linearVariable;
141  vector<int> nonlinearVariable;
142  for (int iColumn=0;iColumn<numberColumns;iColumn++) {
143  if (variableLinearNonLinear[iColumn]==Ipopt::TNLP::LINEAR)
144  linearVariable.push_back(iColumn);
145  else
146  nonlinearVariable.push_back(iColumn);
147  }
148  size_t numberLinearColumns = linearVariable.size();
149  size_t numberNonlinearColumns = nonlinearVariable.size();
150 
151 
152  // Get the indicies of the jacobian
153  // This is also a way of knowing which variables are
154  // used in each row
155  int* indexRow = new int[nnz_jac_g];
156  int* indexCol = new int[nnz_jac_g];
157  minlp->eval_jac_g(numberColumns, x_sol, false,
158  numberRows, nnz_jac_g,
159  indexRow, indexCol, 0);
160 
161  vector<int> sortedIndex(nnz_jac_g);
162  CoinIotaN(sortedIndex(), nnz_jac_g, 0);
163  MatComp c;
164  c.iRow = indexRow;
165  c.jCol = indexCol;
166  std::sort(sortedIndex.begin(), sortedIndex.end(), c);
167 
168  int* row = new int[nnz_jac_g];
169  int* columnStart = new int[numberColumns];
170  int* columnLength = new int[numberColumns];
171  CoinZeroN(columnStart, numberColumns);
172  CoinZeroN(columnLength, numberColumns);
173  vector<vector<int> > column(numberRows); // stores the index of
174  // the variables in
175  // each row
176  vector<vector<int> > columnInt(numberRows); // stores the index of
177  // the integer variables in
178  // each row
179  std::vector<int> numberColumnsLinear(numberRows, 0); // stores the number
180  // of the linear variables in
181  // each row
182  int indexCorrection = (index_style == Ipopt::TNLP::C_STYLE) ? 0 : 1;
183  int iniCol = -1;
184  for(int i=0; i<nnz_jac_g; i++) {
185  int thisIndexCol = indexCol[sortedIndex[i]]-indexCorrection;
186  if(indexCol[sortedIndex[i]] != iniCol) {
187  iniCol = indexCol[sortedIndex[i]];
188  columnStart[thisIndexCol] = i;
189  columnLength[thisIndexCol] = 1;
190  }
191  else {
192  columnLength[thisIndexCol]++;
193  }
194  row[i] = indexRow[sortedIndex[i]]-indexCorrection;
195  column[row[i]].push_back(thisIndexCol);
196  if (variableType[thisIndexCol] != Bonmin::TMINLP::CONTINUOUS)
197  columnInt[row[i]].push_back(thisIndexCol);
198  if(variableLinearNonLinear[thisIndexCol] == Ipopt::TNLP::LINEAR)
199  numberColumnsLinear[row[i]]++;
200  }
201 
202  // Get solution array for heuristic solution
203  double* newSolution = new double [numberColumns];
204  memcpy(newSolution,x_sol,numberColumns*sizeof(double));
205  double* new_g_sol = new double [numberRows];
206 
207 
208  // create a set with the indices of the fractional variables
209  vector<int> integerNonlinearColumns; // stores the integer variables
210  int numberFractionalNonlinearVariables = 0;
211  for (size_t iNLCol=0;iNLCol<numberNonlinearColumns;iNLCol++) {
212  int iColumn = nonlinearVariable[iNLCol];
213  if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
214  integerNonlinearColumns.push_back(iColumn);
215  double value=newSolution[iColumn];
216  if (fabs(floor(value+0.5)-value)>integerTolerance) {
217  numberFractionalNonlinearVariables++;
218  }
219  }
220  }
221 
222  setInternalVariables(minlp);
223 
224  int iteration = -1;
225  while(numberFractionalNonlinearVariables) {
226  iteration++;
227 
228  // select a fractional variable to bound
229  int bestColumn = -1;
230  int bestRound = -1; // -1 rounds down, +1 rounds up
231  selectVariableToBranch(minlp, integerNonlinearColumns, newSolution,
232  bestColumn, bestRound);
233 
234  if(bestColumn >= 0) {
235  if(bestRound < 0)
236  minlp->SetVariableUpperBound(bestColumn, floor(newSolution[bestColumn]));
237  else
238  minlp->SetVariableLowerBound(bestColumn, ceil(newSolution[bestColumn]));
239  } else {
240  break;
241  }
242 
243  nlp->initialSolve();
244 
245  if(minlp->optimization_status() != Ipopt::SUCCESS) {
246  break;
247  }
248 
249  memcpy(newSolution,x_sol,numberColumns*sizeof(double));
250 
251  numberFractionalNonlinearVariables = 0;
252  for(int iIntCol=0; iIntCol<(int)integerNonlinearColumns.size(); iIntCol++) {
253  int iColumn = integerNonlinearColumns[iIntCol];
254  double value=newSolution[iColumn];
255  if (fabs(floor(value+0.5)-value)>integerTolerance)
256  numberFractionalNonlinearVariables++;
257  }
258 
259  double newSolutionValue;
260  minlp->eval_f(numberColumns, newSolution, true, newSolutionValue);
261  }
262 
263 
264  // now we are going to solve a MIP with the linear part of the problem
265  int numberFractionalLinearVariables = 0;
266  for (size_t iLCol=0;iLCol<numberLinearColumns;iLCol++) {
267  int iColumn = linearVariable[iLCol];
268  if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
269  double value=newSolution[iColumn];
270  if (fabs(floor(value+0.5)-value)>integerTolerance) {
271  numberFractionalLinearVariables++;
272  }
273  }
274  }
275 
276  bool feasible = true;
277  if(numberFractionalLinearVariables) {
278  int numberMIPRows = 0;
279  int* mapRows = new int[numberRows];
280  for(int iRow=0; iRow<numberRows; iRow++) {
281  mapRows[iRow] = -1; // this means that there are no linear columns in this row
282  if(numberColumnsLinear[iRow] > 0) {
283  mapRows[iRow] = numberMIPRows++;
284  }
285  }
286 
287  // set all linear variables to zero in order to compute the
288  // impact of the nonlinear variables in each row
289  int numberIntegerLinearColumns = 0;
290  for (size_t iLCol=0;iLCol<numberLinearColumns;iLCol++) {
291  int iColumn = linearVariable[iLCol];
292  newSolution[iColumn] = 0.0;
293  if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS)
294  numberIntegerLinearColumns++;
295  }
296 
297  double* gradient_f = new double[numberColumns];
298  minlp->eval_grad_f(numberColumns,newSolution,true,gradient_f);
299  // create row lower and upper bounds for MILP
300  minlp->eval_g(numberColumns, newSolution, true,
301  numberRows, new_g_sol);
302  double* row_lb = new double[numberMIPRows];
303  double* row_ub = new double[numberMIPRows];
304  for(int iRow=0; iRow<numberRows; iRow++) {
305  if(mapRows[iRow] > -1) {
306  assert(mapRows[iRow] < numberMIPRows);
307  if(g_l[iRow] == (-1.0) * nlp->getInfinity())
308  row_lb[mapRows[iRow]] = g_l[iRow];
309  else
310  row_lb[mapRows[iRow]] = g_l[iRow] - new_g_sol[iRow];
311  if(g_u[iRow] == nlp->getInfinity())
312  row_ub[mapRows[iRow]] = g_u[iRow];
313  else
314  row_ub[mapRows[iRow]] = g_u[iRow] - new_g_sol[iRow];
315  }
316  }
317 
318  // get the jacobian so that we know the coefficients of the MILP matrix
319  double* jac_g = new double [nnz_jac_g];
320  minlp->eval_jac_g(numberColumns, x_sol, false,
321  numberRows, nnz_jac_g,
322  0, 0, jac_g);
323 
324 
325  // Define the constraint matrix for MILP
326  CoinPackedMatrix* matrix = new CoinPackedMatrix(true,0,0);
327  matrix->setDimensions(numberMIPRows,0);
328 
329  // create objective function and columns lower and upper bounds for MILP
330  // and create columns for matrix in MILP
331  double* objective = new double[numberLinearColumns];
332  double* col_lb = new double[numberLinearColumns];
333  double* col_ub = new double[numberLinearColumns];
334  int* indexIntegerColumn = new int[numberIntegerLinearColumns];
335  int numberIndexIntegerColumn = 0;
336  for (size_t iLCol=0;iLCol<numberLinearColumns;iLCol++) {
337  int iColumn = linearVariable[iLCol];
338  objective[iLCol] = gradient_f[iColumn];
339  col_lb[iLCol] = x_l[iColumn];
340  col_ub[iLCol] = x_u[iColumn];
341  CoinPackedVector newRow;
342  int end = columnStart[iColumn]+columnLength[iColumn];
343  for (int j=columnStart[iColumn];
344  j< end;j++) {
345  int iRow = row[j];
346  newRow.insert(mapRows[iRow], jac_g[sortedIndex[j]]);
347  }
348  matrix->appendCol(newRow);
349  if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS)
350  indexIntegerColumn[numberIndexIntegerColumn++] = static_cast<int>(iLCol);
351  }
352 
353  // load the problem to OSI
354  OsiSolverInterface *si = mip_->solver();
355  bool delete_si = false;
356  if(si == NULL){
357  si = new OsiClpSolverInterface;
358  mip_->setLpSolver(si);
359  delete_si = true;
360  }
361  CoinMessageHandler * handler = model_->messageHandler()->clone();
362  si->passInMessageHandler(handler);
363  si->messageHandler()->setLogLevel(0);
364 
365  si->loadProblem(*matrix, col_lb, col_ub, objective, row_lb, row_ub);
366  si->setInteger(indexIntegerColumn, numberIndexIntegerColumn);
367 
368  mip_->optimize(DBL_MAX, 0, 60);
369 
370  if(mip_->getLastSolution()) {
371  const double* solution = mip_->getLastSolution();
372  assert(si->getNumCols() == static_cast<int>(numberLinearColumns));
373  for (size_t iLCol=0;iLCol<numberLinearColumns;iLCol++) {
374  int iColumn = linearVariable[iLCol];
375  newSolution[iColumn] = solution[iLCol];
376  }
377  }
378  else
379  feasible = false;
380 
381  delete [] mapRows;
382  delete [] row_lb;
383  delete [] row_ub;
384  delete [] jac_g;
385  delete [] gradient_f;
386  delete matrix;
387  delete [] objective;
388  delete [] col_lb;
389  delete [] col_ub;
390  delete [] indexIntegerColumn;
391  if(delete_si){
392  delete si;
393  }
394  delete handler;
395  }
396 
397 #if 0
398  bool feasible = true;
399  for (int iColumn=0;iColumn<numberColumns;iColumn++) {
400  double value=newSolution[iColumn];
401  if(value < x_l[iColumn] || value > x_u[iColumn]) {
402  feasible = false;
403  break;
404  }
405  if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
406  if (fabs(floor(value+0.5)-value)>integerTolerance) {
407  feasible = false;
408  break;
409  }
410  }
411  }
412  minlp->eval_g(numberColumns, newSolution, true,
413  numberRows, new_g_sol);
414  for(int iRow=0; iRow<numberRows; iRow++) {
415  if(new_g_sol[iRow]<g_l[iRow]-primalTolerance ||
416  new_g_sol[iRow]>g_u[iRow]+primalTolerance) {
417  if(minlp->optimization_status() != SUCCESS) {
418  feasible = false;
419  break;
420  } else {
421 #ifdef DEBUG_BON_HEURISTIC_DIVE_MIP
422  cout<<"It should be infeasible because: "<<endl;
423  cout<<"g_l["<<iRow<<"]= "<<g_l[iRow]<<" "
424  <<"g_sol["<<iRow<<"]= "<<new_g_sol[iRow]<<" "
425  <<"g_u["<<iRow<<"]= "<<g_u[iRow]<<endl;
426  cout<<"primalTolerance= "<<primalTolerance<<endl;
427 #endif
428  feasible = false;
429  break;
430  }
431  }
432  }
433 #else
434  if(feasible) {
435  // fix the integer variables and solve the NLP
436  for (int iColumn=0;iColumn<numberColumns;iColumn++) {
437  if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
438  double value=newSolution[iColumn];
439  if (fabs(floor(value+0.5)-value)>integerTolerance) {
440 #ifdef DEBUG_BON_HEURISTIC_DIVE_MIP
441  cout<<"It should be infeasible because: "<<endl;
442  cout<<"variable "<<iColumn<<" is not integer"<<endl;
443 #endif
444  feasible = false;
445  break;
446  }
447  else {
448  value=floor(newSolution[iColumn]+0.5);
449  minlp->SetVariableUpperBound(iColumn, value);
450  minlp->SetVariableLowerBound(iColumn, value);
451  }
452  }
453  }
454  if(feasible) {
455  nlp->initialSolve();
456  if(minlp->optimization_status() != Ipopt::SUCCESS) {
457  feasible = false;
458  }
459  memcpy(newSolution,x_sol,numberColumns*sizeof(double));
460  }
461  }
462 #endif
463 
464  if(feasible) {
465  double newSolutionValue;
466  minlp->eval_f(numberColumns, newSolution, true, newSolutionValue);
467  if(newSolutionValue < solutionValue) {
468  memcpy(betterSolution,newSolution,numberColumns*sizeof(double));
469  solutionValue = newSolutionValue;
470  returnCode = 1;
471  }
472  }
473 
474  delete [] variableLinearNonLinear;
475  delete [] indexRow;
476  delete [] indexCol;
477  delete [] row;
478  delete [] columnStart;
479  delete [] columnLength;
480  delete [] newSolution;
481  delete [] new_g_sol;
482  delete nlp;
483 
484 #ifdef DEBUG_BON_HEURISTIC_DIVE_MIP
485  std::cout<<"DiveMIP returnCode = "<<returnCode<<std::endl;
486 #endif
487 
488  return returnCode;
489  }
490 }
Base class for all MINLPs that use a standard triplet matrix form and dense vectors.
Definition: BonTMINLP.hpp:59
virtual void setInternalVariables(TMINLP2TNLP *minlp)=0
sets internal variables
virtual double getInfinity() const
Get solver&#39;s value for infinity.
HeuristicDiveMIP & operator=(const HeuristicDiveMIP &rhs)
Assignment operator.
bool isNlpFeasible(TMINLP2TNLP *minlp, const double primalTolerance)
checks if the NLP relaxation of the problem is feasible
const TMINLP2TNLP * problem() const
get pointer to the TMINLP2TNLP adapter
This is class provides an Osi interface for a Mixed Integer Linear Program expressed as a TMINLP (so ...
void adjustPrimalTolerance(TMINLP2TNLP *minlp, double &primalTolerance)
Adjusts the primalTolerance in case some of the constraints are violated.
OsiSolverInterface * clone(bool copyData=true) const
Virtual copy constructor.
void SetVariableLowerBound(Ipopt::Index var_no, Ipopt::Number x_l)
Change the lower bound on the variable.
BonminSetup * setup_
Setup to use for local searches (will make copies).
Ipopt::SolverReturn optimization_status() const
Get Optimization status.
void SetVariableUpperBound(Ipopt::Index var_no, Ipopt::Number x_u)
Change the upper bound on the variable.
virtual void initialSolve()
Solve initial continuous relaxation.
void Initialize(BonminSetup *setup)
Initialize method.
static char * j
Definition: OSdtoa.cpp:3622
const double * getLastSolution()
get the solution found in last local search (return NULL if no solution).
virtual int solution(double &solutionValue, double *betterSolution)
Performs heuristic.
SubMipSolver * mip_
A subsolver for MIP.
A very simple class to provide a common interface for solving MIPs with Cplex and Cbc...
virtual void selectVariableToBranch(TMINLP2TNLP *minlp, const vector< int > &integerColumns, const double *newSolution, int &bestColumn, int &bestRound)=0
Selects the next variable to branch on.
Bonmin::Algorithm getAlgorithm()
Get the algorithm used.
const Ipopt::Number * x_l()
Get the current values for the lower bounds.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
fint end
const Ipopt::Number * x_u()
Get the current values for the upper bounds.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
static int
Definition: OSdtoa.cpp:2173
OsiSolverInterface * solver()
const Ipopt::Number * g_l()
Get the current values for constraints lower bounds.
bool operator()(int i, int j)
Destructor.
const Ipopt::Number * x_sol() const
get the solution values
OsiTMINLPInterface * nonlinearSolver()
Pointer to the non-linear solver used.
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Returns the value of the objective function in x.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
HeuristicDiveMIP(BonminSetup *setup)
Constructor with setup.
void optimize(double cutoff, int loglevel, double maxTime)
update cutoff and optimize MIP.
return b
Definition: OSdtoa.cpp:1719
real c
const Ipopt::Number * g_u()
Get the current values for constraints upper bounds.
const char * prefix() const
Get prefix to use for options.
VariableType
Type of the variables.
Definition: BonTMINLP.hpp:192
void setLpSolver(OsiSolverInterface *lp)
Assign lp solver.
const TMINLP::VariableType * var_types()
Get the variable types.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.
int howOften_
How often to do (code can change)
virtual bool get_variables_linearity(Ipopt::Index n, Ipopt::TNLP::LinearityType *var_types)=0
overload this method to provide the variables linearity.