21 bool cleanNnz(
double &value,
double colLower,
double colUpper,
22 double rowLower,
double rowUpper,
double colsol,
23 double & lb,
double &ub,
double tiny,
double veryTiny)
25 if(fabs(value)>= tiny)
return 1;
27 if(fabs(value)<veryTiny)
return 0;
31 bool colUpBounded = colUpper < 10000;
32 bool colLoBounded = colLower > -10000;
33 bool rowNotLoBounded = rowLower <= -
infty;
34 bool rowNotUpBounded = rowUpper >=
infty;
37 if(colLoBounded && pos && rowNotUpBounded) {
38 lb += value * (colsol - colLower);
42 if(colLoBounded && !pos && rowNotLoBounded) {
43 ub += value * (colsol - colLower);
47 if(colUpBounded && !pos && rowNotUpBounded) {
48 lb += value * (colsol - colUpper);
52 if(colUpBounded && pos && rowNotLoBounded) {
53 ub += value * (colsol - colUpper);
69 const double *
x,
int getObj,
const double *
x2,
double theta,
71 int n,
m, nnz_jac_g, nnz_h_lag;
72 Ipopt::TNLP::IndexStyleEnum index_style;
74 problem->
get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
77 problem->
eval_gi(n, x, 1, ind, g_i);
82 problem->
eval_grad_gi(n, x, 0, ind, nnz, NULL, jValues());
94 double nlp_infty =
infty;
97 if (rowLower[rowIdx] > -nlp_infty)
98 lb = rowLower[rowIdx] - g_i;
101 if (rowUpper[rowIdx] < nlp_infty)
102 ub = rowUpper[rowIdx] - g_i;
105 if (rowLower[rowIdx] > -infty && rowUpper[rowIdx] < infty) {
106 if (duals[rowIdx] >= 0)
108 if (duals[rowIdx] <= 0)
113 double veryTiny = 1
e-20;
115 for (
int i = 0; i <
nnz; i++) {
116 if(index_style == Ipopt::TNLP::FORTRAN_STYLE) jCol[i]--;
117 const int &colIdx = jCol[i];
119 if (
cleanNnz(jValues[i], colLower[colIdx], colUpper[colIdx],
120 rowLower[rowIdx], rowUpper[rowIdx], x[colIdx], lb, ub,
122 cut.insert(colIdx, jValues[i]);
125 lb += jValues[i] * x[colIdx];
127 ub += jValues[i] * x[colIdx];
135 double rhs = cut.dotProduct(x2);
136 double violation = 0.;
138 violation = std::max(violation, fabs(rhs - ub));
140 violation = std::max(violation, fabs(lb - rhs));
141 if (violation < theta) {
150 newCut.setGloballyValidAsInteger(1);
157 int binary_id = (ids == NULL) ? -1 : ids[ind];
160 cut.insert(binary_id, -lb);
166 cut.insert(binary_id, -ub);
276 const double *
x,
int nbAp,
bool getObj) {
281 Ipopt::TNLP::IndexStyleEnum index_style;
284 problem->
get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
293 if (OuterDesc == 0) {
300 std::vector<int> nbG(m, 2);
302 std::vector<double> step(n);
304 for (
int i = 0; i <
n; i++) {
306 if (colUpper[i] > 1e08) {
310 if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i]
314 step[i] = (up[i] - colLower[i]) / 2e02;
316 if (colLower[i] < -1e08) {
322 double g_p_i, g_pp_i;
323 problem->
eval_g(n, p, 1, m, g_p());
326 problem->
eval_g(n, up, 1, m, g_up());
328 for (
int i = 0; (i <
m); i++) {
329 if(constTypes[i] != Ipopt::TNLP::NON_LINEAR)
continue;
333 for (
int i = 0; i <
m; i++) {
334 thr[i] = std::abs(g_up[i]-g_p[i])/nbAp;
337 for (
int i = 0; (i <
m); i++) {
338 if(constTypes[i] != Ipopt::TNLP::NON_LINEAR)
continue;
341 while (nbG[i] < nbAp) {
344 for (
int j = 0;
j <
n;
j++) {
347 problem->
eval_gi(n, p, 1, i, g_p_i);
348 problem->
eval_gi(n, pp, 1, i, g_pp_i);
349 diff = std::abs(g_p_i - g_pp_i);
353 for (
int j = 0;
j <
n;
j++) {
363 for (
int i = 0; i <
m ; i++) {
364 if(constTypes[i] != Ipopt::TNLP::NON_LINEAR)
continue;
void addOuterDescription(OsiTMINLPInterface &nlp, OsiSolverInterface &si, const double *x, int nbAp, bool getObj)
Adds an outer description of problem to linear formulation.
virtual double getInfinity() const
Get solver's value for infinity.
static Bigint * diff(Bigint *a, Bigint *b)
void getMyOuterApproximation(OsiTMINLPInterface &si, OsiCuts &cs, int ind, const double *x, int getObj, const double *x2, double theta, bool global)
Get the outer approximation constraints at provided point and only for the specified constraint (ind ...
virtual const double * getColLower() const
Get pointer to array[getNumCols()] of column lower bounds.
pos
position where the operator should be printed when printing the expression
const TMINLP2TNLP * problem() const
get pointer to the TMINLP2TNLP adapter
This is class provides an Osi interface for a Mixed Integer Linear Program expressed as a TMINLP (so ...
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
void fint fint fint real fint real real real real real real real real real * e
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
Returns the constraint linearity.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Returns the vector of constraint values in x.
virtual const double * getRowUpper() const
Get pointer to array[getNumRows()] of row upper bounds.
static bool cleanNnz(double &value, double colLower, double colUpper, double rowLower, double rowUpper, double colsol, double &lb, double &ub, double tiny, double veryTiny)
virtual const double * getRowPrice() const
Get pointer to array[getNumRows()] of dual prices.
virtual const double * getColUpper() const
Get pointer to array[getNumCols()] of column upper bounds.
virtual const double * getRowLower() const
Get pointer to array[getNumRows()] of row lower bounds.
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, TNLP::IndexStyleEnum &index_style)
This call is just passed onto the TMINLP object.
virtual const int * get_const_xtra_id() const
Access array describing constraint to which perspectives should be applied.
VariableType
Type of the variables.
const TMINLP::VariableType * var_types()
Get the variable types.
void fint fint fint real fint real * x