14 using namespace Ipopt;
19 tnlp_solver_(tnlp_solver)
21 options->GetIntegerValue(
"oa_log_level", oa_log_level_, tnlp_solver->prefix());
22 options->GetEnumValue(
"cut_strengthening_type", cut_strengthening_type_,
23 tnlp_solver->prefix());
24 options->GetEnumValue(
"disjunctive_cut_type", disjunctive_cut_type_,
25 tnlp_solver->prefix());
27 tnlp_solver_->options()->clear();
28 if (!tnlp_solver_->Initialize(
"strength.opt")) {
29 throw CoinError(
"CutStrengthener",
"CutStrengthener",
"Error during initialization of tnlp_solver_");
31 tnlp_solver_->options()->SetStringValue(
"hessian_approximation",
"limited-memory");
32 tnlp_solver_->options()->SetStringValue(
"mu_strategy",
"adaptive");
41 const double* minlp_lb,
42 const double* minlp_ub,
43 const int gindex, CoinPackedVector& cut,
44 double& cut_lb,
double& cut_ub,
45 int n,
const double*
x,
49 const int cut_nele = cut.getNumElements();
50 const int* cut_indices = cut.getIndices();
52 const double* cut_elements = cut.getElements();
56 retval =
StrengthenCut(tminlp, gindex, cut, n, x, minlp_lb, minlp_ub,
64 for (
int i=0; i<cut_nele; i++) {
65 const int& idx = cut_indices[i];
68 const double& xi = x[idx];
69 const double this_viol = CoinMin(xi-floor(xi),ceil(xi)-xi);
70 if (this_viol > viol) {
79 retval =
StrengthenCut(tminlp, gindex, cut, n, x, minlp_lb, minlp_ub,
85 const int& idx = cut_indices[imostfra];
86 const double& xi = x[idx];
88 printf(
"Doing disjunction for constr %d on x[%d] = %e\n", gindex, idx, xi);
90 const double down_xi = floor(xi);
91 double* changed_bnds =
new double[
n];
93 CoinCopyN(minlp_ub, n, changed_bnds);
94 changed_bnds[idx] = down_xi;
95 double cut_lb_down = cut_lb;
96 double cut_ub_down = cut_ub;
98 changed_bnds, cut_lb_down, cut_ub_down);
99 double cut_lb_up = cut_lb;
100 double cut_ub_up = cut_ub;
102 CoinCopyN(minlp_lb, n, changed_bnds);
103 changed_bnds[idx] = down_xi + 1.;
104 retval =
StrengthenCut(tminlp, gindex, cut, n, x, changed_bnds,
105 minlp_ub, cut_lb_up, cut_ub_up);
107 delete [] changed_bnds;
110 double coeff_xi = cut_elements[imostfra];
111 const double old_coeff = coeff_xi;
112 if (cut_lb <= -infty) {
114 coeff_xi += cut_ub_down - cut_ub_up;
115 cut_ub = cut_ub_up + (down_xi+1.)*(cut_ub_down - cut_ub_up);
119 coeff_xi += cut_lb_down - cut_lb_up;
120 cut_lb = cut_lb_up + (down_xi+1.)*(cut_lb_down - cut_lb_up);
122 cut.setElement(imostfra, coeff_xi);
123 printf(
"old coeff = %e new = %e\n", old_coeff, coeff_xi);
129 std::cerr <<
"Invalid case for disjunctive_cut_type_ in CutStrengthener HandleOneCut\n";
140 const int gindex, CoinPackedVector& cut,
141 double& cut_lb,
double& cut_ub,
142 const double g_val,
const double g_lb,
144 int n,
const double*
x,
149 bool is_tight =
false;
154 const Number tight_tol = 1
e-8;
155 if (cut_lb <= -infty && g_ub - g_val <= tight_tol) {
158 else if (cut_ub >= infty && g_val - g_lb <= tight_tol) {
164 const double orig_lb = cut_lb;
165 const double orig_ub = cut_ub;
172 printf(
" Error during strengthening of global cut for constraint %d\n", gindex);
177 fabs(orig_ub-cut_ub)>1
e-4)) {
178 if (orig_ub < infty) {
179 printf(
" Strengthening ub of global cut for constraint %d from %e to %e\n", gindex, orig_ub, cut_ub);
182 printf(
" Strengthening lb of global cut for constraint %d from %e to %e\n", gindex, orig_lb, cut_lb);
191 CoinPackedVector cut2(cut);
193 problem->
x_u(), gindex, cut2,
197 printf(
" Error during strengthening of local cut for constraint %d\n", gindex);
201 const Number localCutTol = 1
e-4;
202 if (fabs(lb2-cut_lb) >= localCutTol || fabs(cut_ub-ub2) >= localCutTol) {
204 printf(
" Strengthening ub of local cut for constraint %d from %e to %e\n", gindex, cut_ub, ub2);
207 printf(
" Strengthening ub of local cut for constraint %d from %e to %e\n", gindex, cut_lb, lb2);
211 newCut2.setEffectiveness(99.99e99);
214 newCut2.setRow(cut2);
224 const CoinPackedVector& row,
234 Index* jCol =
new Index[n+1];
236 if (constr_index == -1) {
239 double* x_rand =
new double[
n];
240 for (
int i=0; i<
n; i++) {
241 const double radius = CoinMin(1., x_u[i]-x_l[i]);
242 const double p = CoinMax(CoinMin(x[i]-0.5*radius, x_u[i]-radius),
244 x_rand[i] = p + radius*CoinDrand48();
246 Number* grad_f =
new Number[
n];
247 bool retval = tminlp->eval_grad_f(n, x_rand, new_x, grad_f);
255 for (
int i=0; i<
n; i++) {
256 if (grad_f[i] != 0.) {
257 jCol[nele_grad_gi++] = i;
261 jCol[nele_grad_gi++] =
n;
264 if (!tminlp->eval_grad_gi(n, x, new_x, constr_index, nele_grad_gi,
272 if (lb <= -COIN_DBL_MAX) {
273 assert(ub < COIN_DBL_MAX);
277 assert(ub >= COIN_DBL_MAX);
282 constr_index, nele_grad_gi, jCol);
291 const Number tiny_move = 0
e-8;
292 const Number final_bound = stnlp->StrengthenedBound();
294 lb = final_bound - tiny_move;
297 ub = final_bound + tiny_move;
309 const CoinPackedVector& cut,
312 const Number* starting_point,
313 const double* x_l_orig,
314 const double* x_u_orig,
321 constr_index_(constr_index),
322 nvar_constr_(nvar_constr),
323 lower_bound_(lower_bound),
324 have_final_bound_(false),
327 starting_point_ =
new Number[n_orig_];
328 x_full_ =
new Number[n_orig_];
329 IpBlasDcopy(n_orig_, starting_point, 1, starting_point_, 1);
330 IpBlasDcopy(n_orig_, starting_point, 1, x_full_, 1);
332 obj_grad_ =
new Number[nvar_constr_];
333 x_l_ =
new Number[nvar_constr_];
334 x_u_ =
new Number[nvar_constr_];
335 const Number zero = 0.;
336 IpBlasDcopy(nvar_constr_, &zero, 0, obj_grad_, 1);
338 const int cut_nele = cut.getNumElements();
339 const int* cut_indices = cut.getIndices();
340 const double* cut_elements = cut.getElements();
342 for (
int i = 0; i<cut_nele; i++) {
343 const int& idx = cut_indices[i];
346 for (
int j=0;
j<nvar_constr_;
j++) {
347 if (idx == jCol[
j]) {
353 printf(
"There is an index (%d) in the cut that does not appear in the constraint.\n",idx);
358 obj_grad_[jidx] = cut_elements[i];
361 obj_grad_[jidx] = -cut_elements[i];
366 var_indices_ =
new Index[nvar_constr_];
367 for (
int i=0; i<nvar_constr_; i++) {
368 const Index&
j = jCol[i];
371 x_l_[i] = x_l_orig[
j];
372 x_u_[i] = x_u_orig[
j];
380 if (constr_index_ == -1) {
381 grad_f_ =
new Number[n_orig_];
398 Index& nnz_h_lag, IndexStyleEnum& index_style)
402 nnz_jac_g = nvar_constr_;
404 index_style = C_STYLE;
407 Index nnz_jac_g_orig;
408 Index nnz_h_lag_orig;
409 TNLP::IndexStyleEnum index_style_orig;
410 if(!tminlp_->get_nlp_info(n_orig, m_orig_, nnz_jac_g_orig, nnz_h_lag_orig,
414 if (n_orig_ != n_orig) {
415 std::cerr <<
"Number of variables inconsistent in StrengtheningTNLP::get_nlp_info\n";
424 Index
m, Number* g_l, Number* g_u)
426 if (constr_index_ == -1) {
431 Number* x_l_orig =
new Number[n_orig_];
432 Number* x_u_orig =
new Number[n_orig_];
433 Number* g_l_orig =
new Number[m_orig_];
434 Number* g_u_orig =
new Number[m_orig_];
436 if (!tminlp_->get_bounds_info(n_orig_, x_l_orig, x_u_orig,
437 m_orig_, g_l_orig, g_u_orig)) {
445 g_l[0] = g_l_orig[constr_index_];
446 g_u[0] = g_u_orig[constr_index_];
454 for (Index i=0; i<nvar_constr_; i++) {
464 bool init_z, Number* z_L, Number* z_U,
465 Index
m,
bool init_lambda,
468 assert(!init_z && !init_lambda);
469 assert(n = nvar_constr_);
471 if (constr_index_ == -1) {
472 for (Index i=0; i<n-1; i++) {
473 x[i] = starting_point_[var_indices_[i]];
478 for (Index i=0; i<
n; i++) {
479 x[i] = starting_point_[var_indices_[i]];
488 eval_f(Index n,
const Number*
x,
bool new_x, Number& obj_value)
491 for (Index i=0; i<
n; i++) {
492 obj_value += obj_grad_[i]*x[i];
500 IpBlasDcopy(n, obj_grad_, 1, grad_f, 1);
505 eval_g(Index n,
const Number*
x,
bool new_x, Index
m, Number*
g)
509 if (constr_index_ == -1) {
510 retval = tminlp_->eval_f(n_orig_, x_full_, new_x, g[0]);
514 retval = tminlp_->eval_gi(n_orig_, x_full_, new_x, constr_index_, g[0]);
521 Index
m, Index nele_jac, Index* iRow, Index *jCol,
526 DBG_ASSERT(jCol && !values);
527 for (Index i=0; i<nele_jac; i++) {
533 DBG_ASSERT(!iRow && values);
535 if (constr_index_ == -1) {
536 retval = tminlp_->eval_grad_f(n_orig_, x_full_, new_x, grad_f_);
538 for (Index i=0; i<n-1; i++) {
539 values[i] = grad_f_[var_indices_[i]];
545 retval = tminlp_->eval_grad_gi(n_orig_, x_full_, new_x, constr_index_,
546 nele_jac, NULL, values);
554 Number obj_factor, Index
m,
const Number* lambda,
555 bool new_lambda, Index nele_hess,
556 Index* iRow, Index* jCol, Number*
values)
558 std::cerr <<
"At the moment, StrengtheningTNLP::eval_h is not yet implemented\n";
565 if (constr_index_ == -1) {
566 for (Index i=0; i<nvar_constr_-1; i++) {
567 x_full_[var_indices_[i]] = x[i];
571 for (Index i=0; i<nvar_constr_; i++) {
572 x_full_[var_indices_[i]] = x[i];
579 Index n,
const Number*
x,
const Number* z_L,
const Number* z_U,
580 Index
m,
const Number*
g,
const Number* lambda,
582 const IpoptData* ip_data,
583 IpoptCalculatedQuantities* ip_cq)
585 if (status == SUCCESS || status == STOP_AT_ACCEPTABLE_POINT) {
586 have_final_bound_ =
true;
587 strengthened_bound_ = obj_value;
590 have_final_bound_ =
false;
596 DBG_ASSERT(have_final_bound_);
598 return strengthened_bound_;
601 return -strengthened_bound_;
Base class for all MINLPs that use a standard triplet matrix form and dense vectors.
~StrengtheningTNLP()
Destructor.
CutStrengthener()
Default Constructor.
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
Method to return the objective value.
Ipopt::Number * grad_f_
space for original gradient if objective function is handled
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
Method to return the bounds for my problem.
Ipopt::Number * x_full_
Full dimentional x which is used to call the TMINLP evaluation routines.
void fint fint fint real fint real real real real real real real real real * e
const Ipopt::Number * x_l()
Get the current values for the lower bounds.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
Method to return some info about the nlp.
int cut_strengthening_type_
Type of OA cut strengthener.
const Ipopt::Number * x_u()
Get the current values for the upper bounds.
Ipopt::Number * starting_point_
Starting point.
Ipopt::Number * x_l_
Lower bounds for constraint variables.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
Method to return the starting point for the algorithm.
const Ipopt::Number * orig_x_l() const
Get the original values for the lower bounds.
Ipopt::Number * x_u_
Upper bounds for constraint variables.
Ipopt::Index * var_indices_
List of variables appearing on the constraints.
U * GetRawPtr(const OSSmartPtr< U > &smart_ptr)
const Ipopt::Number * orig_x_u() const
Get the original values for the upper bounds.
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
ReturnStatus
Standard return statuses for a solver.
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Method to return: 1) The structure of the jacobian (if "values" is NULL) 2) The values of the jacobia...
bool StrengthenCut(Ipopt::SmartPtr< TMINLP > tminlp, int constr_index, const CoinPackedVector &row, int n, const double *x, const double *x_l, const double *x_u, double &lb, double &ub)
Method for strengthening one cut.
Ipopt::Number * obj_grad_
Gradient of the (linear) objective function.
Ipopt::SmartPtr< TNLPSolver > tnlp_solver_
Object for solving the TNLPs.
Class implementing the TNLP for strengthening one cut.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
Method to return the constraint residuals.
bool HandleOneCut(bool is_tight, TMINLP *tminlp, TMINLP2TNLP *problem, const double *minlp_lb, const double *minlp_ub, const int gindex, CoinPackedVector &cut, double &cut_lb, double &cut_ub, int n, const double *x, double infty)
Method for generating one type of cut (strengthened or disjunctive)
void fint fint fint real fint real real real real real real * g
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
Method to return the gradient of the objective.
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Method to return: 1) The structure of the hessian of the lagrangian (if "values" is NULL) 2) The valu...
This is an adapter class that converts a TMINLP to a TNLP to be solved by Ipopt.
int oa_log_level_
verbosity level for OA-related output
void update_x_full(const Ipopt::Number *x)
Auxilliary method for updating the full x variable.
VariableType
Type of the variables.
Ipopt::Number StrengthenedBound() const
Method for asking for the strengthened bound.
virtual ~CutStrengthener()
Destructor.
int disjunctive_cut_type_
What kind of disjuntion should be done.
bool ComputeCuts(OsiCuts &cs, TMINLP *tminlp, TMINLP2TNLP *problem, const int gindex, CoinPackedVector &cut, double &cut_lb, double &cut_ub, const double g_val, const double g_lb, const double g_ub, int n, const double *x, double infty)
Method for generating and strenghtening all desired cuts.
const TMINLP::VariableType * var_types()
Get the variable types.
void fint fint fint real fint real * x