13 using namespace Ipopt;
17 TNLP2FPNLP::TNLP2FPNLP(
const SmartPtr<TNLP> tnlp,
double objectiveScalingFactor):
24 objectiveScalingFactor_(objectiveScalingFactor),
25 use_feasibility_pump_objective_(false),
26 use_cutoff_constraint_(false),
27 use_local_branching_constraint_(false),
28 cutoff_(COIN_DBL_MAX),
29 rhs_local_branching_constraint_(COIN_DBL_MAX),
30 index_style_(TNLP::C_STYLE)
37 lambda_(other->lambda_),
38 sigma_(other->sigma_),
40 objectiveScalingFactor_(other->objectiveScalingFactor_),
41 use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
42 use_cutoff_constraint_(other->use_cutoff_constraint_),
43 use_local_branching_constraint_(other->use_local_branching_constraint_),
44 cutoff_(other->cutoff_),
45 rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
46 index_style_(other->index_style_)
56 Number epsilon = 1.0e-6;
59 else if(cutoff < -1.0
e-8)
70 std::copy(vals, vals + n,
vals_.begin());
71 std::copy(inds, inds + n,
inds_.begin());
81 for(
unsigned int i = 0; i <
vals_.size() ; i++) {
86 for(
unsigned int i = 0 ; i <
vals_.size() ; i++) {
88 ret_val += x[
inds_[i]];
90 ret_val += (1.0 - x[inds_[i]]);
101 TNLP::IndexStyleEnum& index_style)
103 bool ret_code =
tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
114 nnz_jac_g += (n + (
int)
vals_.size());
130 Index
m, Number* g_l, Number* g_u)
135 ret_code =
tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
136 g_l[m-2] = - COIN_DBL_MAX;
138 g_l[m-1] = - COIN_DBL_MAX;
142 ret_code =
tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
143 g_l[m-1] = - COIN_DBL_MAX;
147 ret_code =
tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
148 g_l[m-1] = - COIN_DBL_MAX;
152 ret_code =
tnlp_->get_bounds_info(n, x_l , x_u, m, g_l, g_u);
161 bool ret_code =
tnlp_->eval_f(n, x, new_x, obj_value);
175 bool ret_code =
tnlp_->eval_grad_f(n, x, new_x, grad_f);
178 for(
int i = 0 ; i <
n ; i++) {
182 for(
unsigned int i = 0 ; i <
inds_.size() ; i++) {
187 for(
unsigned int i = 0 ; i <
inds_.size() ; i++) {
206 ret_code =
tnlp_->eval_g(n, x, new_x, m-2, g);
209 if(
eval_f(n, x, new_x, obj_value))
214 Number g_local_branching = 0.0;
215 for(
unsigned int i = 0 ; i <
vals_.size() ; i++) {
217 g_local_branching += x[
inds_[i]];
219 g_local_branching += (1.0 - x[inds_[i]]);
221 g[m-1] = g_local_branching;
224 ret_code =
tnlp_->eval_g(n, x, new_x, m-1, g);
226 if(
eval_f(n, x, new_x, obj_value))
232 ret_code =
tnlp_->eval_g(n, x, new_x, m-1, g);
233 Number g_local_branching = 0.0;
234 for(
unsigned int i = 0 ; i <
vals_.size() ; i++) {
236 g_local_branching += x[
inds_[i]];
238 g_local_branching += (1.0 - x[inds_[i]]);
240 g[m-1] = g_local_branching;
243 ret_code =
tnlp_->eval_g(n, x, new_x, m, g);
250 Index
m, Index nele_jac, Index* iRow,
251 Index *jCol, Number*
values)
257 ret_code =
tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers,
260 if (iRow && jCol && !values) {
261 int index_correction = (
index_style_ == TNLP::C_STYLE) ? 0 : 1;
263 int k = nele_jac - n - n_integers;
266 for(
int i = 0; i<
n; i++) {
267 iRow[i] = m - 2 + index_correction;
268 jCol[i] = i + index_correction;
271 k = nele_jac - n_integers;
274 for(
int i = 0; i< n_integers; i++) {
275 iRow[i] = m - 1 + index_correction;
276 jCol[i] =
inds_[i] + index_correction;
279 else if (!iRow & !jCol && values) {
281 Number* grad_f =
new Number[
n];
282 bool ret_code_grad_f =
eval_grad_f(n, x, new_x, grad_f);
283 if(ret_code_grad_f) {
284 int k = nele_jac - n - n_integers;
286 for(
int i = 0; i<
n; i++) {
287 values[i] = grad_f[i];
294 int k = nele_jac - n_integers;
296 for(
int i = 0; i< n_integers; i++) {
304 DBG_ASSERT(
false &&
"Invalid combination of iRow, jCol, and values pointers");
308 ret_code =
tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n,
311 if (iRow && jCol && !values) {
312 int index_correction = (
index_style_ == TNLP::C_STYLE) ? 0 : 1;
313 int k = nele_jac -
n;
316 for(
int i = 0; i<
n; i++) {
317 iRow[i] = m - 1 + index_correction;
318 jCol[i] = i + index_correction;
321 else if (!iRow & !jCol && values) {
322 Number* grad_f =
new Number[
n];
323 bool ret_code_grad_f =
eval_grad_f(n, x, new_x, grad_f);
324 if(ret_code_grad_f) {
325 int k = nele_jac -
n;
327 for(
int i = 0; i<
n; i++) {
328 values[i] = grad_f[i];
336 DBG_ASSERT(
false &&
"Invalid combination of iRow, jCol, and values pointers");
341 ret_code =
tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers,
344 if (iRow && jCol && !values) {
345 int index_correction = (
index_style_ == TNLP::C_STYLE) ? 0 : 1;
346 int k = nele_jac - n_integers;
349 for(
int i = 0; i< n_integers; i++) {
350 iRow[i] = m - 1 + index_correction;
351 jCol[i] =
inds_[i] + index_correction;
354 else if (!iRow & !jCol && values) {
355 int k = nele_jac - n_integers;
357 for(
int i = 0; i< n_integers; i++) {
365 DBG_ASSERT(
false &&
"Invalid combination of iRow, jCol, and values pointers");
369 ret_code =
tnlp_->eval_jac_g(n, x, new_x, m, nele_jac,
377 Number obj_factor, Index
m,
const Number* lambda,
378 bool new_lambda, Index nele_hess,
379 Index* iRow, Index* jCol, Number*
values)
383 int nnz_obj_h = (
norm_ == 2) ? (
int)
inds_.size() : 0;
386 double coef_obj = (iRow != NULL)?0 : lambda[m - 2];
388 m - 2, lambda, new_lambda, nele_hess - nnz_obj_h,
392 double coef_obj = (iRow != NULL)?0 : lambda[m - 1];
394 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
399 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
404 m, lambda, new_lambda, nele_hess - nnz_obj_h,
410 if (iRow && jCol && !values)
412 int index_correction = (
index_style_ == TNLP::C_STYLE) ? 0 : 1;
413 int k = nele_hess - nnz_obj_h;
416 for(
unsigned int i = 0; i <
inds_.size() ; i++)
418 iRow[i] =
inds_[i] + index_correction;
419 jCol[i] =
inds_[i] + index_correction;
421 DBG_ASSERT(k==nele_hess);
423 else if (!iRow & !jCol && values)
425 int k = nele_hess - nnz_obj_h;
427 for(
unsigned int i = 0; i <
inds_.size() ; i++)
431 DBG_ASSERT(k==nele_hess);
435 DBG_ASSERT(
false &&
"Invalid combination of iRow, jCol, and values pointers");
444 Index
n,
const Number*
x,
const Number* z_L,
const Number* z_U,
445 Index
m,
const Number*
g,
const Number* lambda,
447 const IpoptData* ip_data,
448 IpoptCalculatedQuantities* ip_cq)
457 tnlp_->finalize_solution(status,n, x, z_L, z_U,m2, g, lambda, obj_value,
double lambda_
value for the convex combination to take between original objective and distance function.
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
get info from nlp_ and add hessian information
void set_cutoff(Ipopt::Number cutoff)
Set the cutoff value to use in the cutoff constraint.
void set_dist_to_point_obj(size_t n, const Ipopt::Number *vals, const Ipopt::Index *inds)
Set the point to which distance is minimized.
Ipopt::TNLP::IndexStyleEnum index_style_
Ipopt::Index style (C++ or Fortran)
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
overloaded to return the value of the objective function
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
This call is just passed onto tnlp_.
virtual ~TNLP2FPNLP()
Default destructor.
vector< Ipopt::Index > inds_
Indices of the variables for which distance is minimized (i.e. indices of integer variables in a feas...
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
overload to return the jacobian of g
void fint fint fint real fint real real real real real real real real real * e
TNLP2FPNLP()
Default Constructor.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
overload this method to return the vector of the gradient of the objective w.r.t. ...
Ipopt::SmartPtr< TNLP > tnlp_
pointer to the tminlp that is being adapted
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
overload to return the values of the left-hand side of the constraints
double dist_to_point(const Ipopt::Number *x)
Compute the norm-2 distance to the current point to which distance is minimized.
double objectiveScalingFactor_
Scaling factor for the objective.
bool use_feasibility_pump_objective_
Flag to indicate that we want to use the feasibility pump objective.
bool use_cutoff_constraint_
Flag to indicate that we want to use a cutoff constraint This constraint has the form f(x) <= (1-epsil...
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Evaluate the modified Hessian of the Lagrangian.
int norm_
Norm to use (L_1 or L_2).
vector< Ipopt::Number > vals_
Values of the point to which we separate (if x is the point vals_[i] should be x[inds_[i]] ) ...
double rhs_local_branching_constraint_
RHS of local branching constraint.
void fint fint fint real fint real real real real real real * g
double sigma_
Scaling for the original objective.
double cutoff_
Value of best solution known.
bool use_local_branching_constraint_
Flag to indicate that we want to use a local branching constraint.
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
void fint fint fint real fint real * x