10 #include "CoinPragma.hpp"
12 #include "CbcModel.hpp"
65 Ipopt::TNLP::IndexStyleEnum index_style;
66 minlp->
get_nlp_info(numberColumns, numberRows, nnz_jac_g,
67 nnz_h_lag, index_style);
69 const double* x_sol = minlp->
x_sol();
74 int* indexRow =
new int[nnz_jac_g];
75 int* indexCol =
new int[nnz_jac_g];
77 numberRows, nnz_jac_g,
78 indexRow, indexCol, 0);
80 int indexCorrection = (index_style == Ipopt::TNLP::C_STYLE) ? 0 : 1;
82 for(
int i=0; i<nnz_jac_g; i++) {
83 int thisIndexCol = indexCol[i]-indexCorrection;
84 if(indexCol[i] != iniCol) {
98 const double* newSolution,
102 double integerTolerance = model_->getDblParam(CbcModel::CbcIntegerTolerance);
104 const double* x_l = minlp->
x_l();
105 const double* x_u = minlp->
x_u();
111 Ipopt::TNLP::IndexStyleEnum index_style;
112 minlp->
get_nlp_info(numberColumns, numberRows, nnz_jac_g,
113 nnz_h_lag, index_style);
115 double* gradient_f =
new double[numberColumns];
117 double bestScore = COIN_DBL_MAX;
120 minlp->
eval_grad_f(numberColumns,newSolution,
true,gradient_f);
121 for(
int iIntCol=0; iIntCol<(
int)integerColumns.size(); iIntCol++) {
122 int iColumn = integerColumns[iIntCol];
123 double value=newSolution[iColumn];
124 if (fabs(floor(value+0.5)-value)>integerTolerance) {
125 double below = floor(value);
126 double downFraction = COIN_DBL_MAX;
128 double gradient = gradient_f[iColumn];
129 if(below >= x_l[iColumn])
130 downFraction = value-below;
131 double above = ceil(value);
132 double upFraction = COIN_DBL_MAX;
133 if(above <= x_u[iColumn])
134 upFraction = ceil(value)-value;
137 if(gradient>=0.0 && upFraction < COIN_DBL_MAX) {
138 objdelta = gradient*upFraction;
140 }
else if(gradient<0.0 && downFraction < COIN_DBL_MAX) {
141 objdelta = gradient*downFraction;
143 }
else if(upFraction < COIN_DBL_MAX) {
144 objdelta = gradient*upFraction;
147 objdelta = gradient*downFraction;
150 double score = (objdelta + 1
e-6)/((
double)
columnLength_[iColumn]+1.0);
151 if(score<bestScore) {
153 bestColumn = iColumn;
159 delete [] gradient_f;
166 roptions->AddStringOption2(
167 "heuristic_dive_MIP_vectorLength",
168 "if yes runs the Dive MIP VectorLength heuristic",
173 roptions->setOptionExtraInfo(
"heuristic_dive_MIP_vectorLength", 63);
HeuristicDiveMIP & operator=(const HeuristicDiveMIP &rhs)
Assignment operator.
virtual CbcHeuristic * clone() const
Clone.
int * columnLength_
the number of nonzero elements in each column
void Initialize(Ipopt::SmartPtr< Ipopt::OptionsList > options)
Initiaize using passed options.
HeuristicDiveMIPVectorLength()
Default Constructor.
HeuristicDiveMIPVectorLength & operator=(const HeuristicDiveMIPVectorLength &rhs)
Assignment operator.
virtual void setInternalVariables(TMINLP2TNLP *minlp)
sets internal variables
void fint fint fint real fint real real real real real real real real real * e
const Ipopt::Number * x_l()
Get the current values for the lower bounds.
const Ipopt::Number * x_u()
Get the current values for the upper bounds.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
const Ipopt::Number * x_sol() const
get the solution values
virtual void selectVariableToBranch(TMINLP2TNLP *minlp, const vector< int > &integerColumns, const double *newSolution, int &bestColumn, int &bestRound)
Selects the next variable to branch on.
Ipopt::SmartPtr< Ipopt::OptionsList > options()
Acces list of Options.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
static void registerOptions(Ipopt::SmartPtr< Bonmin::RegisteredOptions > roptions)
Register the options common to all local search based heuristics.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.