00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "BonTNLP2FPNLP.hpp"
00012 #include "IpBlas.hpp"
00013
00014 using namespace Ipopt;
00015
00016 namespace Bonmin
00017 {
00018 TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor):
00019 tnlp_(tnlp),
00020 inds_(),
00021 vals_(),
00022 lambda_(1.),
00023 sigma_(1.),
00024 norm_(2),
00025 objectiveScalingFactor_(objectiveScalingFactor),
00026 use_feasibility_pump_objective_(false),
00027 use_cutoff_constraint_(false),
00028 use_local_branching_constraint_(false),
00029 cutoff_(COIN_DBL_MAX),
00030 rhs_local_branching_constraint_(COIN_DBL_MAX),
00031 index_style_(TNLP::C_STYLE)
00032 {}
00033
00034 TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, const SmartPtr<TNLP2FPNLP> other):
00035 tnlp_(tnlp),
00036 inds_(other->inds_),
00037 vals_(other->vals_),
00038 lambda_(other->lambda_),
00039 sigma_(other->sigma_),
00040 norm_(other->norm_),
00041 objectiveScalingFactor_(other->objectiveScalingFactor_),
00042 use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
00043 use_cutoff_constraint_(other->use_cutoff_constraint_),
00044 use_local_branching_constraint_(other->use_local_branching_constraint_),
00045 cutoff_(other->cutoff_),
00046 rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
00047 index_style_(other->index_style_)
00048 {}
00049
00050 TNLP2FPNLP::~TNLP2FPNLP()
00051 {
00052 }
00053
00054 void
00055 TNLP2FPNLP::set_cutoff(Number cutoff)
00056 {
00057 Number epsilon = 1.0e-6;
00058 if(cutoff > 1.0e-8)
00059 cutoff_ = (1-epsilon) * cutoff;
00060 else if(cutoff < -1.0e-8)
00061 cutoff_ = (1+epsilon) * cutoff;
00062 else
00063 cutoff_ = - epsilon;
00064 }
00065
00066 void
00067 TNLP2FPNLP::set_dist2point_obj(int n, const Number * vals, const Index * inds)
00068 {
00069 inds_.resize(n);
00070 vals_.resize(n);
00071 IpBlasDcopy(n, vals, 1, vals_(), 1);
00072 CoinCopyN(inds, n, inds_());
00073 }
00074
00076 double
00077 TNLP2FPNLP::dist2point(const Number *x)
00078 {
00079 double ret_val = 0;
00080 assert(vals_.size() == inds_.size());
00081 if(norm_ == 2){
00082 for(unsigned int i = 0; i < vals_.size() ; i++) {
00083 ret_val += ( x[inds_[i]] - vals_[i] ) * ( x[inds_[i]] - vals_[i] );
00084 }
00085 }
00086 else if(norm_ == 1){
00087 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00088 if(vals_[i] <= 0.1)
00089 ret_val += x[inds_[i]];
00090 else{
00091 ret_val += (1.0 - x[inds_[i]]);
00092
00093 }
00094 }
00095 }
00096 return ret_val;
00097 }
00098
00099 bool
00100 TNLP2FPNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00101 Index& nnz_h_lag,
00102 TNLP::IndexStyleEnum& index_style)
00103 {
00104 bool ret_code = tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
00105 index_style);
00106
00107 index_style_ = index_style;
00108
00109
00110 if(use_feasibility_pump_objective_ && norm_ == 2)
00111 nnz_h_lag += vals_.size();
00112
00113 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00114 m += 2;
00115 nnz_jac_g += (n + vals_.size());
00116 }
00117 else if(use_cutoff_constraint_) {
00118 m++;
00119 nnz_jac_g += n;
00120 }
00121 else if(use_local_branching_constraint_) {
00122 m++;
00123 nnz_jac_g += vals_.size();
00124 }
00125
00126 return ret_code;
00127 }
00128
00129 bool
00130 TNLP2FPNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00131 Index m, Number* g_l, Number* g_u)
00132 {
00133 bool ret_code;
00134
00135 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00136 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
00137 g_l[m-2] = - COIN_DBL_MAX;
00138 g_u[m-2] = cutoff_;
00139 g_l[m-1] = - COIN_DBL_MAX;
00140 g_u[m-1] = rhs_local_branching_constraint_;
00141 }
00142 else if(use_cutoff_constraint_) {
00143 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
00144 g_l[m-1] = - COIN_DBL_MAX;
00145 g_u[m-1] = cutoff_;
00146 }
00147 else if(use_local_branching_constraint_) {
00148 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
00149 g_l[m-1] = - COIN_DBL_MAX;
00150 g_u[m-1] = rhs_local_branching_constraint_;
00151 }
00152 else
00153 ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m, g_l, g_u);
00154
00155 return ret_code;
00156 }
00157
00158 bool
00159 TNLP2FPNLP::eval_f(Index n, const Number* x, bool new_x,
00160 Number& obj_value)
00161 {
00162 bool ret_code = tnlp_->eval_f(n, x, new_x, obj_value);
00163
00164 if(use_feasibility_pump_objective_) {
00165 obj_value *= (1 - lambda_) * sigma_;
00166 obj_value += objectiveScalingFactor_*lambda_*dist2point(x);
00167 }
00168
00169 return ret_code;
00170 }
00171
00172 bool
00173 TNLP2FPNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00174 Number* grad_f)
00175 {
00176 bool ret_code = tnlp_->eval_grad_f(n, x, new_x, grad_f);
00177
00178 if(use_feasibility_pump_objective_) {
00179 for(int i = 0 ; i < n ; i++) {
00180 grad_f[i] *= (1-lambda_) * sigma_;
00181 }
00182 if(norm_ ==2){
00183 for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00184 grad_f[inds_[i]] += objectiveScalingFactor_*2*lambda_*( x[inds_[i]] - vals_[i] );
00185 }
00186 }
00187 else{
00188 for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00189 if(vals_[i] <= 0.1)
00190 grad_f[inds_[i]] += objectiveScalingFactor_*lambda_;
00191 else
00192 grad_f[inds_[i]] -= objectiveScalingFactor_*lambda_;
00193 }
00194 }
00195 }
00196
00197 return ret_code;
00198 }
00199
00200 bool
00201 TNLP2FPNLP::eval_g(Index n, const Number* x, bool new_x,
00202 Index m, Number* g)
00203 {
00204 bool ret_code;
00205
00206 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00207 ret_code = tnlp_->eval_g(n, x, new_x, m-2, g);
00208
00209 Number obj_value;
00210 if(eval_f(n, x, new_x, obj_value))
00211 g[m-2] = obj_value;
00212 else
00213 ret_code = false;
00214
00215 Number g_local_branching = 0.0;
00216 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00217 if(vals_[i] <= 0.1)
00218 g_local_branching += x[inds_[i]];
00219 else
00220 g_local_branching += (1.0 - x[inds_[i]]);
00221 }
00222 g[m-1] = g_local_branching;
00223 }
00224 else if(use_cutoff_constraint_) {
00225 ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00226 Number obj_value;
00227 if(eval_f(n, x, new_x, obj_value))
00228 g[m-1] = obj_value;
00229 else
00230 ret_code = false;
00231 }
00232 else if(use_local_branching_constraint_) {
00233 ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00234 Number g_local_branching = 0.0;
00235 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00236 if(vals_[i] <= 0.1)
00237 g_local_branching += x[inds_[i]];
00238 else
00239 g_local_branching += (1.0 - x[inds_[i]]);
00240 }
00241 g[m-1] = g_local_branching;
00242 }
00243 else
00244 ret_code = tnlp_->eval_g(n, x, new_x, m, g);
00245
00246 return ret_code;
00247 }
00248
00249 bool
00250 TNLP2FPNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00251 Index m, Index nele_jac, Index* iRow,
00252 Index *jCol, Number* values)
00253 {
00254 bool ret_code;
00255
00256 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00257 int n_integers = vals_.size();
00258 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers,
00259 iRow, jCol, values);
00260
00261 if (iRow && jCol && !values) {
00262 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00263
00264 int k = nele_jac - n - n_integers;
00265 iRow += k;
00266 jCol += k;
00267 for(int i = 0; i< n; i++) {
00268 iRow[i] = m - 2 + index_correction;
00269 jCol[i] = i + index_correction;
00270 }
00271
00272 k = nele_jac - n_integers;
00273 iRow += k;
00274 jCol += k;
00275 for(int i = 0; i< n_integers; i++) {
00276 iRow[i] = m - 1 + index_correction;
00277 jCol[i] = inds_[i] + index_correction;
00278 }
00279 }
00280 else if (!iRow & !jCol && values) {
00281
00282 Number* grad_f = new Number[n];
00283 bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00284 if(ret_code_grad_f) {
00285 int k = nele_jac - n - n_integers;
00286 values += k;
00287 for(int i = 0; i< n; i++) {
00288 values[i] = grad_f[i];
00289 }
00290 }
00291 else
00292 ret_code = false;
00293 delete [] grad_f;
00294
00295 int k = nele_jac - n_integers;
00296 values += k;
00297 for(int i = 0; i< n_integers; i++) {
00298 if(vals_[i] <= 0.1)
00299 values[i] = 1.0;
00300 else
00301 values[i] = -1.0;
00302 }
00303 }
00304 else {
00305 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00306 }
00307 }
00308 else if(use_cutoff_constraint_) {
00309 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n,
00310 iRow, jCol, values);
00311
00312 if (iRow && jCol && !values) {
00313 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00314 int k = nele_jac - n;
00315 iRow += k;
00316 jCol += k;
00317 for(int i = 0; i< n; i++) {
00318 iRow[i] = m - 1 + index_correction;
00319 jCol[i] = i + index_correction;
00320 }
00321 }
00322 else if (!iRow & !jCol && values) {
00323 Number* grad_f = new Number[n];
00324 bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00325 if(ret_code_grad_f) {
00326 int k = nele_jac - n;
00327 values += k;
00328 for(int i = 0; i< n; i++) {
00329 values[i] = grad_f[i];
00330 }
00331 }
00332 else
00333 ret_code = false;
00334 delete [] grad_f;
00335 }
00336 else {
00337 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00338 }
00339 }
00340 else if(use_local_branching_constraint_) {
00341 int n_integers = vals_.size();
00342 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers,
00343 iRow, jCol, values);
00344
00345 if (iRow && jCol && !values) {
00346 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00347 int k = nele_jac - n_integers;
00348 iRow += k;
00349 jCol += k;
00350 for(int i = 0; i< n_integers; i++) {
00351 iRow[i] = m - 1 + index_correction;
00352 jCol[i] = inds_[i] + index_correction;
00353 }
00354 }
00355 else if (!iRow & !jCol && values) {
00356 int k = nele_jac - n_integers;
00357 values += k;
00358 for(int i = 0; i< n_integers; i++) {
00359 if(vals_[i] <= 0.1)
00360 values[i] = 1.0;
00361 else
00362 values[i] = -1.0;
00363 }
00364 }
00365 else {
00366 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00367 }
00368 }
00369 else
00370 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00371 iRow, jCol, values);
00372
00373 return ret_code;
00374 }
00375
00376 bool
00377 TNLP2FPNLP::eval_h(Index n, const Number* x, bool new_x,
00378 Number obj_factor, Index m, const Number* lambda,
00379 bool new_lambda, Index nele_hess,
00380 Index* iRow, Index* jCol, Number* values)
00381 {
00382 bool ret_code;
00383
00384 int nnz_obj_h = (norm_ == 2) ? inds_.size() : 0;
00385
00386 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00387 double coef_obj = (iRow != NULL)?0 : lambda[m - 2];
00388 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
00389 m - 2, lambda, new_lambda, nele_hess - nnz_obj_h,
00390 iRow, jCol, values);
00391 }
00392 else if(use_cutoff_constraint_) {
00393 double coef_obj = (iRow != NULL)?0 : lambda[m - 1];
00394 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
00395 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
00396 iRow, jCol, values);
00397 }
00398 else if(use_local_branching_constraint_) {
00399 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
00400 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
00401 iRow, jCol, values);
00402 }
00403 else {
00404 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
00405 m, lambda, new_lambda, nele_hess - nnz_obj_h,
00406 iRow, jCol, values);
00407 }
00408
00409
00410 if(use_feasibility_pump_objective_ && norm_ == 2){
00411 if (iRow && jCol && !values)
00412 {
00413 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00414 int k = nele_hess - nnz_obj_h;
00415 iRow += k;
00416 jCol += k;
00417 for(unsigned int i = 0; i < inds_.size() ; i++)
00418 {
00419 iRow[i] = inds_[i] + index_correction;
00420 jCol[i] = inds_[i] + index_correction;
00421 }
00422 DBG_ASSERT(k==nele_hess);
00423 }
00424 else if (!iRow & !jCol && values)
00425 {
00426 int k = nele_hess - nnz_obj_h;
00427 values += k;
00428 for(unsigned int i = 0; i < inds_.size() ; i++)
00429 {
00430 values[i] = 2* objectiveScalingFactor_* lambda_* obj_factor;
00431 }
00432 DBG_ASSERT(k==nele_hess);
00433 }
00434 else
00435 {
00436 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00437 }
00438 }
00439
00440 return ret_code;
00441 }
00442
00443 void
00444 TNLP2FPNLP::finalize_solution(SolverReturn status,
00445 Index n, const Number* x, const Number* z_L, const Number* z_U,
00446 Index m, const Number* g, const Number* lambda,
00447 Number obj_value,
00448 const IpoptData* ip_data,
00449 IpoptCalculatedQuantities* ip_cq)
00450 {
00451 int m2 = m;
00452 if(use_cutoff_constraint_) {
00453 m2--;
00454 }
00455 if(use_local_branching_constraint_) {
00456 m2--;
00457 }
00458 tnlp_->finalize_solution(status,n, x, z_L, z_U,m2, g, lambda, obj_value,
00459 ip_data, ip_cq);
00460 }
00461 }
00462