8 #ifndef _TNLP2FPNLP_HPP_
9 #define _TNLP2FPNLP_HPP_
13 #include "IpSmartPtr.hpp"
28 TNLP2FPNLP(
const Ipopt::SmartPtr<Ipopt::TNLP> tnlp,
double objectiveScalingFactor = 100);
31 TNLP2FPNLP(
const Ipopt::SmartPtr<TNLP> tnlp,
const Ipopt::SmartPtr<TNLP2FPNLP> other);
36 void use(Ipopt::SmartPtr<TNLP> tnlp){
37 tnlp_ = GetRawPtr(tnlp);}
61 { assert(rhs_local_branching_constraint >= 0);
81 assert(lambda >= 0. && lambda <= 1.);
85 assert(norm >0 && norm < 3);
92 virtual bool get_nlp_info(Ipopt::Index& n, Ipopt::Index& m, Ipopt::Index& nnz_jac_g,
93 Ipopt::Index& nnz_h_lag, Ipopt::TNLP::IndexStyleEnum& index_style);
97 virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number* x_l, Ipopt::Number* x_u,
98 Ipopt::Index m, Ipopt::Number* g_l, Ipopt::Number* g_u);
103 bool init_z, Ipopt::Number* z_L, Ipopt::Number* z_U,
104 Ipopt::Index m,
bool init_lambda,
105 Ipopt::Number* lambda)
110 if(lambda!=NULL)lambda[m2] = 0;
114 if(lambda!= NULL)lambda[m2] = 0;
116 int ret_code =
tnlp_->get_starting_point(n, init_x, x,
117 init_z, z_L, z_U, m2, init_lambda, lambda);
122 virtual bool eval_f(Ipopt::Index n,
const Ipopt::Number* x,
bool new_x,
123 Ipopt::Number& obj_value);
127 virtual bool eval_grad_f(Ipopt::Index n,
const Ipopt::Number* x,
bool new_x,
128 Ipopt::Number* grad_f);
132 virtual bool eval_g(Ipopt::Index n,
const Ipopt::Number* x,
bool new_x,
133 Ipopt::Index m, Ipopt::Number* g);
136 virtual bool eval_jac_g(Ipopt::Index n,
const Ipopt::Number* x,
bool new_x,
137 Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index* iRow,
138 Ipopt::Index *jCol, Ipopt::Number* values);
141 virtual bool eval_h(Ipopt::Index n,
const Ipopt::Number* x,
bool new_x,
142 Ipopt::Number obj_factor, Ipopt::Index m,
const Ipopt::Number* lambda,
143 bool new_lambda, Ipopt::Index nele_hess,
144 Ipopt::Index* iRow, Ipopt::Index* jCol, Ipopt::Number* values);
151 Ipopt::Index n,
const Ipopt::Number* x,
const Ipopt::Number* z_L,
const Ipopt::Number* z_U,
152 Ipopt::Index m,
const Ipopt::Number* g,
const Ipopt::Number* lambda,
153 Ipopt::Number obj_value,
154 const Ipopt::IpoptData* ip_data,
155 Ipopt::IpoptCalculatedQuantities* ip_cq);
160 return tnlp_->get_variables_linearity(n, var_types);;
171 const_types[m2] = Ipopt::TNLP::NON_LINEAR;
175 const_types[m2] = Ipopt::TNLP::LINEAR;
177 return tnlp_->get_constraints_linearity(m2, const_types);
double lambda_
value for the convex combination to take between original objective and distance function.
void set_cutoff(Ipopt::Number cutoff)
Set the cutoff value to use in the cutoff constraint.
void set_dist_to_point_obj(size_t n, const Ipopt::Number *vals, const Ipopt::Index *inds)
Set the point to which distance is minimized.
Ipopt::TNLP::IndexStyleEnum index_style_
Ipopt::Index style (C++ or Fortran)
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
get info from nlp_ and add hessian information
void use(Ipopt::SmartPtr< TNLP > tnlp)
void set_use_feasibility_pump_objective(bool use_feasibility_pump_objective)
Flag to indicate that we want to use the feasibility pump objective.
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
overloaded to return the value of the objective function
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
overload to return the jacobian of g
virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
This method is called when the algorithm is complete so the TNLP can store/write the solution...
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
Evaluate the modified Hessian of the Lagrangian.
vector< Ipopt::Index > inds_
Indices of the variables for which distance is minimized (i.e. indices of integer variables in a feas...
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
overload this method to return the vector of the gradient of the objective w.r.t. ...
TNLP2FPNLP()
Default Constructor.
Ipopt::SmartPtr< TNLP > tnlp_
pointer to the tminlp that is being adapted
double dist_to_point(const Ipopt::Number *x)
Compute the norm-2 distance to the current point to which distance is minimized.
void setObjectiveScaling(double value)
double objectiveScalingFactor_
Scaling factor for the objective.
virtual bool get_variables_linearity(Ipopt::Index n, LinearityType *var_types)
void set_use_cutoff_constraint(bool use_cutoff_constraint)
Flag to indicate that we want to use a cutoff constraint This constraint has the form f(x) <= (1-epsil...
void setNorm(int norm)
Set the value for simgma.
bool use_feasibility_pump_objective_
Flag to indicate that we want to use the feasibility pump objective.
void setLambda(double lambda)
Set the value for lambda.
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
overload to return the values of the left-hand side of the constraints
bool use_cutoff_constraint_
Flag to indicate that we want to use a cutoff constraint This constraint has the form f(x) <= (1-epsil...
int norm_
Norm to use (L_1 or L_2).
void set_rhs_local_branching_constraint(double rhs_local_branching_constraint)
Set the rhs of the local branching constraint.
vector< Ipopt::Number > vals_
Values of the point to which we separate (if x is the point vals_[i] should be x[inds_[i]] ) ...
virtual ~TNLP2FPNLP()
Default destructor.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
Passed onto tnlp_.
double rhs_local_branching_constraint_
RHS of local branching constraint.
double sigma_
Scaling for the original objective.
void operator=(const TNLP2FPNLP &)
Overloaded Equals Operator.
virtual bool get_constraints_linearity(Ipopt::Index m, LinearityType *const_types)
overload this method to return the constraint linearity.
void set_use_local_branching_constraint(bool use_local_branching_constraint)
Flag to indicate that we want to use a local branching constraint.
double cutoff_
Value of best solution known.
bool use_local_branching_constraint_
Flag to indicate that we want to use a local branching constraint.
This is an adapter class to convert an NLP to a Feasibility Pump NLP by changing the objective functi...
double getObjectiveScaling() const
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
This call is just passed onto tnlp_.
void setSigma(double sigma)
Set the value for sigma.