10 #include "CoinPragma.hpp"
12 #include "CbcModel.hpp"
63 Ipopt::TNLP::IndexStyleEnum index_style;
64 minlp->
get_nlp_info(numberColumns, numberRows, nnz_jac_g,
65 nnz_h_lag, index_style);
67 const double* x_sol = minlp->
x_sol();
72 int* indexRow =
new int[nnz_jac_g];
73 int* indexCol =
new int[nnz_jac_g];
75 numberRows, nnz_jac_g,
76 indexRow, indexCol, 0);
78 int indexCorrection = (index_style == Ipopt::TNLP::C_STYLE) ? 0 : 1;
80 for(
int i=0; i<nnz_jac_g; i++) {
81 int thisIndexCol = indexCol[i]-indexCorrection;
82 if(indexCol[i] != iniCol) {
96 const double* newSolution,
100 double integerTolerance = model_->getDblParam(CbcModel::CbcIntegerTolerance);
102 const double* x_l = minlp->
x_l();
103 const double* x_u = minlp->
x_u();
109 Ipopt::TNLP::IndexStyleEnum index_style;
110 minlp->
get_nlp_info(numberColumns, numberRows, nnz_jac_g,
111 nnz_h_lag, index_style);
113 double* gradient_f =
new double[numberColumns];
115 double bestScore = COIN_DBL_MAX;
118 minlp->
eval_grad_f(numberColumns,newSolution,
true,gradient_f);
119 for(
int iIntCol=0; iIntCol<(
int)integerColumns.size(); iIntCol++) {
120 int iColumn = integerColumns[iIntCol];
121 double value=newSolution[iColumn];
122 if (fabs(floor(value+0.5)-value)>integerTolerance) {
123 double below = floor(value);
124 double downFraction = COIN_DBL_MAX;
126 double gradient = gradient_f[iColumn];
127 if(below >= x_l[iColumn])
128 downFraction = value-below;
129 double above = ceil(value);
130 double upFraction = COIN_DBL_MAX;
131 if(above <= x_u[iColumn])
132 upFraction = ceil(value)-value;
135 if(gradient>=0.0 && upFraction < COIN_DBL_MAX) {
136 objdelta = gradient*upFraction;
138 }
else if(gradient<0.0 && downFraction < COIN_DBL_MAX) {
139 objdelta = gradient*downFraction;
141 }
else if(upFraction < COIN_DBL_MAX) {
142 objdelta = gradient*upFraction;
145 objdelta = gradient*downFraction;
148 double score = (objdelta + 1
e-6)/((
double)
columnLength_[iColumn]+1.0);
149 if(score<bestScore) {
151 bestColumn = iColumn;
157 delete [] gradient_f;
164 roptions->AddStringOption2(
165 "heuristic_dive_vectorLength",
166 "if yes runs the Dive VectorLength heuristic",
171 roptions->setOptionExtraInfo(
"heuristic_dive_vectorLength", 63);
int * columnLength_
the number of nonzero elements in each column
virtual CbcHeuristic * clone() const
Clone.
HeuristicDive & operator=(const HeuristicDive &rhs)
Assignment operator.
virtual void selectVariableToBranch(TMINLP2TNLP *minlp, const vector< int > &integerColumns, const double *newSolution, int &bestColumn, int &bestRound)
Selects the next variable to branch on.
HeuristicDiveVectorLength()
Default Constructor.
void fint fint fint real fint real real real real real real real real real * e
const Ipopt::Number * x_l()
Get the current values for the lower bounds.
static void registerOptions(Ipopt::SmartPtr< Bonmin::RegisteredOptions > roptions)
Register the options common to all local search based heuristics.
HeuristicDiveVectorLength & operator=(const HeuristicDiveVectorLength &rhs)
Assignment operator.
void Initialize(Ipopt::SmartPtr< Ipopt::OptionsList > options)
Initiaize using passed options.
const Ipopt::Number * x_u()
Get the current values for the upper bounds.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Returns the vector of the gradient of the objective w.r.t.
virtual void setInternalVariables(TMINLP2TNLP *minlp)
sets internal variables
const Ipopt::Number * x_sol() const
get the solution values
Ipopt::SmartPtr< Ipopt::OptionsList > options()
Acces list of Options.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Returns the jacobian of the constraints.