00001
00002
00003
00004
00005
00006
00007
00008 #ifndef _TNLP2FPNLP_HPP_
00009 #define _TNLP2FPNLP_HPP_
00010
00011 #include "IpTNLP.hpp"
00012 #include "BonTMINLP.hpp"
00013 #include "IpSmartPtr.hpp"
00014 #include "BonTypes.hpp"
00015
00016 namespace Bonmin
00017 {
00022 class TNLP2FPNLP : public Ipopt::TNLP
00023 {
00024 public:
00028 TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor = 100);
00029
00031 TNLP2FPNLP(const SmartPtr<TNLP> tnlp, const SmartPtr<TNLP2FPNLP> other);
00032
00034 virtual ~TNLP2FPNLP();
00036
00039
00040 void set_use_feasibility_pump_objective(bool use_feasibility_pump_objective)
00041 { use_feasibility_pump_objective_ = use_feasibility_pump_objective; }
00042
00045 void set_use_cutoff_constraint(bool use_cutoff_constraint)
00046 { use_cutoff_constraint_ = use_cutoff_constraint; }
00047
00049 void set_use_local_branching_constraint(bool use_local_branching_constraint)
00050 { use_local_branching_constraint_ = use_local_branching_constraint; }
00052
00055
00056 void set_cutoff(Number cutoff);
00057
00059 void set_rhs_local_branching_constraint(double rhs_local_branching_constraint)
00060 { assert(rhs_local_branching_constraint >= 0);
00061 rhs_local_branching_constraint_ = rhs_local_branching_constraint; }
00063
00072 void set_dist2point_obj(int n, const Number * vals, const Index * inds);
00073
00075 void setSigma(double sigma){
00076 assert(sigma >= 0.);
00077 sigma_ = sigma;}
00079 void setLambda(double lambda){
00080 assert(lambda >= 0. && lambda <= 1.);
00081 lambda_ = lambda;}
00083 void setNorm(int norm){
00084 assert(norm >0 && norm < 3);
00085 norm_ = norm;}
00087
00091 virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00092 Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style);
00093
00096 virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
00097 Index m, Number* g_l, Number* g_u);
00098
00101 virtual bool get_starting_point(Index n, bool init_x, Number* x,
00102 bool init_z, Number* z_L, Number* z_U,
00103 Index m, bool init_lambda,
00104 Number* lambda)
00105 {
00106 return tnlp_->get_starting_point(n, init_x, x,
00107 init_z, z_L, z_U, m, init_lambda, lambda);
00108 }
00109
00111 virtual bool eval_f(Index n, const Number* x, bool new_x,
00112 Number& obj_value);
00113
00116 virtual bool eval_grad_f(Index n, const Number* x, bool new_x,
00117 Number* grad_f);
00118
00121 virtual bool eval_g(Index n, const Number* x, bool new_x,
00122 Index m, Number* g);
00123
00125 virtual bool eval_jac_g(Index n, const Number* x, bool new_x,
00126 Index m, Index nele_jac, Index* iRow,
00127 Index *jCol, Number* values);
00128
00130 virtual bool eval_h(Index n, const Number* x, bool new_x,
00131 Number obj_factor, Index m, const Number* lambda,
00132 bool new_lambda, Index nele_hess,
00133 Index* iRow, Index* jCol, Number* values);
00135
00139 virtual void finalize_solution(SolverReturn status,
00140 Index n, const Number* x, const Number* z_L, const Number* z_U,
00141 Index m, const Number* g, const Number* lambda,
00142 Number obj_value,
00143 const IpoptData* ip_data,
00144 IpoptCalculatedQuantities* ip_cq);
00146
00149 void setObjectiveScaling(double value)
00150 {
00151 objectiveScalingFactor_ = value;
00152 }
00153 double getObjectiveScaling() const
00154 {
00155 return objectiveScalingFactor_;
00156 }
00157
00158 private:
00162 double dist2point(const Number *x);
00164
00173 TNLP2FPNLP();
00174
00176 TNLP2FPNLP(const TNLP2FPNLP&);
00177
00179 void operator=(const TNLP2FPNLP&);
00181
00183 SmartPtr<TNLP> tnlp_;
00184
00187
00188 vector<Index> inds_;
00190 vector<Number> vals_;
00193 double lambda_;
00195 double sigma_;
00197 int norm_;
00199
00201 double objectiveScalingFactor_;
00202
00205
00206 bool use_feasibility_pump_objective_;
00207
00210 bool use_cutoff_constraint_;
00211
00213 bool use_local_branching_constraint_;
00215
00218
00219 double cutoff_;
00220
00222 double rhs_local_branching_constraint_;
00224
00226 TNLP::IndexStyleEnum index_style_;
00227
00228 };
00229
00230 }
00231
00232 #endif