00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "BonTNLP2FPNLP.hpp"
00012
00013 using namespace Ipopt;
00014
00015 namespace Bonmin
00016 {
00017 TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor):
00018 tnlp_(tnlp),
00019 inds_(),
00020 vals_(),
00021 lambda_(1.),
00022 sigma_(1.),
00023 norm_(2),
00024 objectiveScalingFactor_(objectiveScalingFactor),
00025 use_feasibility_pump_objective_(false),
00026 use_cutoff_constraint_(false),
00027 use_local_branching_constraint_(false),
00028 cutoff_(COIN_DBL_MAX),
00029 rhs_local_branching_constraint_(COIN_DBL_MAX),
00030 index_style_(TNLP::C_STYLE)
00031 {}
00032
00033 TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, const SmartPtr<TNLP2FPNLP> other):
00034 tnlp_(tnlp),
00035 inds_(other->inds_),
00036 vals_(other->vals_),
00037 lambda_(other->lambda_),
00038 sigma_(other->sigma_),
00039 norm_(other->norm_),
00040 objectiveScalingFactor_(other->objectiveScalingFactor_),
00041 use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
00042 use_cutoff_constraint_(other->use_cutoff_constraint_),
00043 use_local_branching_constraint_(other->use_local_branching_constraint_),
00044 cutoff_(other->cutoff_),
00045 rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
00046 index_style_(other->index_style_)
00047 {}
00048
00049 TNLP2FPNLP::~TNLP2FPNLP()
00050 {
00051 }
00052
00053 void
00054 TNLP2FPNLP::set_cutoff(Number cutoff)
00055 {
00056 Number epsilon = 1.0e-6;
00057 if(cutoff > 1.0e-8)
00058 cutoff_ = (1-epsilon) * cutoff;
00059 else if(cutoff < -1.0e-8)
00060 cutoff_ = (1+epsilon) * cutoff;
00061 else
00062 cutoff_ = - epsilon;
00063 }
00064
00065 void
00066 TNLP2FPNLP::set_dist_to_point_obj(size_t n, const Number * vals, const Index * inds)
00067 {
00068 inds_.resize(n);
00069 vals_.resize(n);
00070 std::copy(vals, vals + n, vals_.begin());
00071 std::copy(inds, inds + n, inds_.begin());
00072 }
00073
00075 double
00076 TNLP2FPNLP::dist_to_point(const Number *x)
00077 {
00078 double ret_val = 0;
00079 assert(vals_.size() == inds_.size());
00080 if(norm_ == 2){
00081 for(unsigned int i = 0; i < vals_.size() ; i++) {
00082 ret_val += ( x[inds_[i]] - vals_[i] ) * ( x[inds_[i]] - vals_[i] );
00083 }
00084 }
00085 else if(norm_ == 1){
00086 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00087 if(vals_[i] <= 0.1)
00088 ret_val += x[inds_[i]];
00089 else{
00090 ret_val += (1.0 - x[inds_[i]]);
00091
00092 }
00093 }
00094 }
00095 return ret_val;
00096 }
00097
00098 bool
00099 TNLP2FPNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00100 Index& nnz_h_lag,
00101 TNLP::IndexStyleEnum& index_style)
00102 {
00103 bool ret_code = tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
00104 index_style);
00105
00106 index_style_ = index_style;
00107
00108
00109 if(use_feasibility_pump_objective_ && norm_ == 2)
00110 nnz_h_lag += (int)vals_.size();
00111
00112 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00113 m += 2;
00114 nnz_jac_g += (n + (int)vals_.size());
00115 }
00116 else if(use_cutoff_constraint_) {
00117 m++;
00118 nnz_jac_g += n;
00119 }
00120 else if(use_local_branching_constraint_) {
00121 m++;
00122 nnz_jac_g += (int)vals_.size();
00123 }
00124
00125 return ret_code;
00126 }
00127
00128 bool
00129 TNLP2FPNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00130 Index m, Number* g_l, Number* g_u)
00131 {
00132 bool ret_code;
00133
00134 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00135 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
00136 g_l[m-2] = - COIN_DBL_MAX;
00137 g_u[m-2] = cutoff_;
00138 g_l[m-1] = - COIN_DBL_MAX;
00139 g_u[m-1] = rhs_local_branching_constraint_;
00140 }
00141 else if(use_cutoff_constraint_) {
00142 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
00143 g_l[m-1] = - COIN_DBL_MAX;
00144 g_u[m-1] = cutoff_;
00145 }
00146 else if(use_local_branching_constraint_) {
00147 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
00148 g_l[m-1] = - COIN_DBL_MAX;
00149 g_u[m-1] = rhs_local_branching_constraint_;
00150 }
00151 else
00152 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m, g_l, g_u);
00153
00154 return ret_code;
00155 }
00156
00157 bool
00158 TNLP2FPNLP::eval_f(Index n, const Number* x, bool new_x,
00159 Number& obj_value)
00160 {
00161 bool ret_code = tnlp_->eval_f(n, x, new_x, obj_value);
00162
00163 if(use_feasibility_pump_objective_) {
00164 obj_value *= (1 - lambda_) * sigma_;
00165 obj_value += objectiveScalingFactor_*lambda_*dist_to_point(x);
00166 }
00167
00168 return ret_code;
00169 }
00170
00171 bool
00172 TNLP2FPNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00173 Number* grad_f)
00174 {
00175 bool ret_code = tnlp_->eval_grad_f(n, x, new_x, grad_f);
00176
00177 if(use_feasibility_pump_objective_) {
00178 for(int i = 0 ; i < n ; i++) {
00179 grad_f[i] *= (1-lambda_) * sigma_;
00180 }
00181 if(norm_ ==2){
00182 for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00183 grad_f[inds_[i]] += objectiveScalingFactor_*2*lambda_*( x[inds_[i]] - vals_[i] );
00184 }
00185 }
00186 else{
00187 for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00188 if(vals_[i] <= 0.1)
00189 grad_f[inds_[i]] += objectiveScalingFactor_*lambda_;
00190 else
00191 grad_f[inds_[i]] -= objectiveScalingFactor_*lambda_;
00192 }
00193 }
00194 }
00195
00196 return ret_code;
00197 }
00198
00199 bool
00200 TNLP2FPNLP::eval_g(Index n, const Number* x, bool new_x,
00201 Index m, Number* g)
00202 {
00203 bool ret_code;
00204
00205 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00206 ret_code = tnlp_->eval_g(n, x, new_x, m-2, g);
00207
00208 Number obj_value;
00209 if(eval_f(n, x, new_x, obj_value))
00210 g[m-2] = obj_value;
00211 else
00212 ret_code = false;
00213
00214 Number g_local_branching = 0.0;
00215 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00216 if(vals_[i] <= 0.1)
00217 g_local_branching += x[inds_[i]];
00218 else
00219 g_local_branching += (1.0 - x[inds_[i]]);
00220 }
00221 g[m-1] = g_local_branching;
00222 }
00223 else if(use_cutoff_constraint_) {
00224 ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00225 Number obj_value;
00226 if(eval_f(n, x, new_x, obj_value))
00227 g[m-1] = obj_value;
00228 else
00229 ret_code = false;
00230 }
00231 else if(use_local_branching_constraint_) {
00232 ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00233 Number g_local_branching = 0.0;
00234 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00235 if(vals_[i] <= 0.1)
00236 g_local_branching += x[inds_[i]];
00237 else
00238 g_local_branching += (1.0 - x[inds_[i]]);
00239 }
00240 g[m-1] = g_local_branching;
00241 }
00242 else
00243 ret_code = tnlp_->eval_g(n, x, new_x, m, g);
00244
00245 return ret_code;
00246 }
00247
00248 bool
00249 TNLP2FPNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00250 Index m, Index nele_jac, Index* iRow,
00251 Index *jCol, Number* values)
00252 {
00253 bool ret_code;
00254
00255 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00256 int n_integers = (int)vals_.size();
00257 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers,
00258 iRow, jCol, values);
00259
00260 if (iRow && jCol && !values) {
00261 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00262
00263 int k = nele_jac - n - n_integers;
00264 iRow += k;
00265 jCol += k;
00266 for(int i = 0; i< n; i++) {
00267 iRow[i] = m - 2 + index_correction;
00268 jCol[i] = i + index_correction;
00269 }
00270
00271 k = nele_jac - n_integers;
00272 iRow += k;
00273 jCol += k;
00274 for(int i = 0; i< n_integers; i++) {
00275 iRow[i] = m - 1 + index_correction;
00276 jCol[i] = inds_[i] + index_correction;
00277 }
00278 }
00279 else if (!iRow & !jCol && values) {
00280
00281 Number* grad_f = new Number[n];
00282 bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00283 if(ret_code_grad_f) {
00284 int k = nele_jac - n - n_integers;
00285 values += k;
00286 for(int i = 0; i< n; i++) {
00287 values[i] = grad_f[i];
00288 }
00289 }
00290 else
00291 ret_code = false;
00292 delete [] grad_f;
00293
00294 int k = nele_jac - n_integers;
00295 values += k;
00296 for(int i = 0; i< n_integers; i++) {
00297 if(vals_[i] <= 0.1)
00298 values[i] = 1.0;
00299 else
00300 values[i] = -1.0;
00301 }
00302 }
00303 else {
00304 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00305 }
00306 }
00307 else if(use_cutoff_constraint_) {
00308 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n,
00309 iRow, jCol, values);
00310
00311 if (iRow && jCol && !values) {
00312 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00313 int k = nele_jac - n;
00314 iRow += k;
00315 jCol += k;
00316 for(int i = 0; i< n; i++) {
00317 iRow[i] = m - 1 + index_correction;
00318 jCol[i] = i + index_correction;
00319 }
00320 }
00321 else if (!iRow & !jCol && values) {
00322 Number* grad_f = new Number[n];
00323 bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00324 if(ret_code_grad_f) {
00325 int k = nele_jac - n;
00326 values += k;
00327 for(int i = 0; i< n; i++) {
00328 values[i] = grad_f[i];
00329 }
00330 }
00331 else
00332 ret_code = false;
00333 delete [] grad_f;
00334 }
00335 else {
00336 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00337 }
00338 }
00339 else if(use_local_branching_constraint_) {
00340 int n_integers = (int)vals_.size();
00341 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers,
00342 iRow, jCol, values);
00343
00344 if (iRow && jCol && !values) {
00345 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00346 int k = nele_jac - n_integers;
00347 iRow += k;
00348 jCol += k;
00349 for(int i = 0; i< n_integers; i++) {
00350 iRow[i] = m - 1 + index_correction;
00351 jCol[i] = inds_[i] + index_correction;
00352 }
00353 }
00354 else if (!iRow & !jCol && values) {
00355 int k = nele_jac - n_integers;
00356 values += k;
00357 for(int i = 0; i< n_integers; i++) {
00358 if(vals_[i] <= 0.1)
00359 values[i] = 1.0;
00360 else
00361 values[i] = -1.0;
00362 }
00363 }
00364 else {
00365 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00366 }
00367 }
00368 else
00369 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00370 iRow, jCol, values);
00371
00372 return ret_code;
00373 }
00374
00375 bool
00376 TNLP2FPNLP::eval_h(Index n, const Number* x, bool new_x,
00377 Number obj_factor, Index m, const Number* lambda,
00378 bool new_lambda, Index nele_hess,
00379 Index* iRow, Index* jCol, Number* values)
00380 {
00381 bool ret_code;
00382
00383 int nnz_obj_h = (norm_ == 2) ? (int)inds_.size() : 0;
00384
00385 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00386 double coef_obj = (iRow != NULL)?0 : lambda[m - 2];
00387 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
00388 m - 2, lambda, new_lambda, nele_hess - nnz_obj_h,
00389 iRow, jCol, values);
00390 }
00391 else if(use_cutoff_constraint_) {
00392 double coef_obj = (iRow != NULL)?0 : lambda[m - 1];
00393 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
00394 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
00395 iRow, jCol, values);
00396 }
00397 else if(use_local_branching_constraint_) {
00398 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
00399 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
00400 iRow, jCol, values);
00401 }
00402 else {
00403 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
00404 m, lambda, new_lambda, nele_hess - nnz_obj_h,
00405 iRow, jCol, values);
00406 }
00407
00408
00409 if(use_feasibility_pump_objective_ && norm_ == 2){
00410 if (iRow && jCol && !values)
00411 {
00412 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00413 int k = nele_hess - nnz_obj_h;
00414 iRow += k;
00415 jCol += k;
00416 for(unsigned int i = 0; i < inds_.size() ; i++)
00417 {
00418 iRow[i] = inds_[i] + index_correction;
00419 jCol[i] = inds_[i] + index_correction;
00420 }
00421 DBG_ASSERT(k==nele_hess);
00422 }
00423 else if (!iRow & !jCol && values)
00424 {
00425 int k = nele_hess - nnz_obj_h;
00426 values += k;
00427 for(unsigned int i = 0; i < inds_.size() ; i++)
00428 {
00429 values[i] = 2* objectiveScalingFactor_* lambda_* obj_factor;
00430 }
00431 DBG_ASSERT(k==nele_hess);
00432 }
00433 else
00434 {
00435 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00436 }
00437 }
00438
00439 return ret_code;
00440 }
00441
00442 void
00443 TNLP2FPNLP::finalize_solution(SolverReturn status,
00444 Index n, const Number* x, const Number* z_L, const Number* z_U,
00445 Index m, const Number* g, const Number* lambda,
00446 Number obj_value,
00447 const IpoptData* ip_data,
00448 IpoptCalculatedQuantities* ip_cq)
00449 {
00450 int m2 = m;
00451 if(use_cutoff_constraint_) {
00452 m2--;
00453 }
00454 if(use_local_branching_constraint_) {
00455 m2--;
00456 }
00457 tnlp_->finalize_solution(status,n, x, z_L, z_U,m2, g, lambda, obj_value,
00458 ip_data, ip_cq);
00459 }
00460 }
00461