00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "BonTNLP2FPNLP.hpp"
00012 #include "IpBlas.hpp"
00013 namespace Bonmin
00014 {
00015 TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor):
00016 tnlp_(tnlp),
00017 inds_(),
00018 vals_(),
00019 lambda_(1.),
00020 sigma_(1.),
00021 norm_(2),
00022 objectiveScalingFactor_(objectiveScalingFactor),
00023 use_feasibility_pump_objective_(false),
00024 use_cutoff_constraint_(false),
00025 use_local_branching_constraint_(false),
00026 cutoff_(COIN_DBL_MAX),
00027 rhs_local_branching_constraint_(COIN_DBL_MAX),
00028 index_style_(TNLP::C_STYLE)
00029 {}
00030
00031 TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, const SmartPtr<TNLP2FPNLP> other):
00032 tnlp_(tnlp),
00033 inds_(other->inds_),
00034 vals_(other->vals_),
00035 lambda_(other->lambda_),
00036 sigma_(other->sigma_),
00037 norm_(other->norm_),
00038 objectiveScalingFactor_(other->objectiveScalingFactor_),
00039 use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
00040 use_cutoff_constraint_(other->use_cutoff_constraint_),
00041 use_local_branching_constraint_(other->use_local_branching_constraint_),
00042 cutoff_(other->cutoff_),
00043 rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
00044 index_style_(other->index_style_)
00045 {}
00046
00047 TNLP2FPNLP::~TNLP2FPNLP()
00048 {
00049 }
00050
00051 void
00052 TNLP2FPNLP::set_cutoff(Number cutoff)
00053 {
00054 Number epsilon = 1.0e-6;
00055 if(cutoff > 1.0e-8)
00056 cutoff_ = (1-epsilon) * cutoff;
00057 else if(cutoff < -1.0e-8)
00058 cutoff_ = (1+epsilon) * cutoff;
00059 else
00060 cutoff_ = - epsilon;
00061 }
00062
00063 void
00064 TNLP2FPNLP::set_dist2point_obj(int n, const Number * vals, const Index * inds)
00065 {
00066 inds_.resize(n);
00067 vals_.resize(n);
00068 IpBlasDcopy(n, vals, 1, vals_(), 1);
00069 CoinCopyN(inds, n, inds_());
00070 }
00071
00073 double
00074 TNLP2FPNLP::dist2point(const Number *x)
00075 {
00076 double ret_val = 0;
00077 assert(vals_.size() == inds_.size());
00078 if(norm_ == 2){
00079 for(unsigned int i = 0; i < vals_.size() ; i++) {
00080 ret_val += ( x[inds_[i]] - vals_[i] ) * ( x[inds_[i]] - vals_[i] );
00081 }
00082 }
00083 else if(norm_ == 1){
00084 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00085 if(vals_[i] <= 0.1)
00086 ret_val += x[inds_[i]];
00087 else{
00088 ret_val += (1.0 - x[inds_[i]]);
00089
00090 }
00091 }
00092 }
00093 return ret_val;
00094 }
00095
00096 bool
00097 TNLP2FPNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00098 Index& nnz_h_lag,
00099 TNLP::IndexStyleEnum& index_style)
00100 {
00101 bool ret_code = tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
00102 index_style);
00103
00104 index_style_ = index_style;
00105
00106
00107 if(use_feasibility_pump_objective_ && norm_ == 2)
00108 nnz_h_lag += vals_.size();
00109
00110 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00111 m += 2;
00112 nnz_jac_g += (n + vals_.size());
00113 }
00114 else if(use_cutoff_constraint_) {
00115 m++;
00116 nnz_jac_g += n;
00117 }
00118 else if(use_local_branching_constraint_) {
00119 m++;
00120 nnz_jac_g += vals_.size();
00121 }
00122
00123 return ret_code;
00124 }
00125
00126 bool
00127 TNLP2FPNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00128 Index m, Number* g_l, Number* g_u)
00129 {
00130 bool ret_code;
00131
00132 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00133 tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
00134 g_l[m-2] = - COIN_DBL_MAX;
00135 g_u[m-2] = cutoff_;
00136 g_l[m-1] = - COIN_DBL_MAX;
00137 g_u[m-1] = rhs_local_branching_constraint_;
00138 }
00139 else if(use_cutoff_constraint_) {
00140 g_l[m-1] = - COIN_DBL_MAX;
00141 g_u[m-1] = cutoff_;
00142 }
00143 else if(use_local_branching_constraint_) {
00144 g_l[m-1] = - COIN_DBL_MAX;
00145 g_u[m-1] = rhs_local_branching_constraint_;
00146 }
00147
00148 return ret_code;
00149 }
00150
00151 bool
00152 TNLP2FPNLP::eval_f(Index n, const Number* x, bool new_x,
00153 Number& obj_value)
00154 {
00155 bool ret_code = tnlp_->eval_f(n, x, new_x, obj_value);
00156
00157 if(use_feasibility_pump_objective_) {
00158 obj_value *= (1 - lambda_) * sigma_;
00159 obj_value += objectiveScalingFactor_*lambda_*dist2point(x);
00160 }
00161
00162 return ret_code;
00163 }
00164
00165 bool
00166 TNLP2FPNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00167 Number* grad_f)
00168 {
00169 bool ret_code = tnlp_->eval_grad_f(n, x, new_x, grad_f);
00170
00171 if(use_feasibility_pump_objective_) {
00172 for(int i = 0 ; i < n ; i++) {
00173 grad_f[i] *= (1-lambda_) * sigma_;
00174 }
00175 if(norm_ ==2){
00176 for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00177 grad_f[inds_[i]] += objectiveScalingFactor_*2*lambda_*( x[inds_[i]] - vals_[i] );
00178 }
00179 }
00180 else{
00181 for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00182 if(vals_[i] <= 0.1)
00183 grad_f[inds_[i]] += objectiveScalingFactor_*lambda_;
00184 else
00185 grad_f[inds_[i]] -= objectiveScalingFactor_*lambda_;
00186 }
00187 }
00188 }
00189
00190 return ret_code;
00191 }
00192
00193 bool
00194 TNLP2FPNLP::eval_g(Index n, const Number* x, bool new_x,
00195 Index m, Number* g)
00196 {
00197 bool ret_code;
00198
00199 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00200 ret_code = tnlp_->eval_g(n, x, new_x, m-2, g);
00201
00202 Number obj_value;
00203 if(eval_f(n, x, new_x, obj_value))
00204 g[m-2] = obj_value;
00205 else
00206 ret_code = false;
00207
00208 Number g_local_branching = 0.0;
00209 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00210 if(vals_[i] <= 0.1)
00211 g_local_branching += x[inds_[i]];
00212 else
00213 g_local_branching += (1.0 - x[inds_[i]]);
00214 }
00215 g[m-1] = g_local_branching;
00216 }
00217 else if(use_cutoff_constraint_) {
00218 ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00219 Number obj_value;
00220 if(eval_f(n, x, new_x, obj_value))
00221 g[m-1] = obj_value;
00222 else
00223 ret_code = false;
00224 }
00225 else if(use_local_branching_constraint_) {
00226 ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00227 Number g_local_branching = 0.0;
00228 for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00229 if(vals_[i] <= 0.1)
00230 g_local_branching += x[inds_[i]];
00231 else
00232 g_local_branching += (1.0 - x[inds_[i]]);
00233 }
00234 g[m-1] = g_local_branching;
00235 }
00236 else
00237 ret_code = tnlp_->eval_g(n, x, new_x, m, g);
00238
00239 return ret_code;
00240 }
00241
00242 bool
00243 TNLP2FPNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00244 Index m, Index nele_jac, Index* iRow,
00245 Index *jCol, Number* values)
00246 {
00247 bool ret_code;
00248
00249 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00250 int n_integers = vals_.size();
00251 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers,
00252 iRow, jCol, values);
00253
00254 if (iRow && jCol && !values) {
00255 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00256
00257 int k = nele_jac - n - n_integers;
00258 iRow += k;
00259 jCol += k;
00260 for(int i = 0; i< n; i++) {
00261 iRow[i] = m - 2 + index_correction;
00262 jCol[i] = i + index_correction;
00263 }
00264
00265 k = nele_jac - n_integers;
00266 iRow += k;
00267 jCol += k;
00268 for(int i = 0; i< n_integers; i++) {
00269 iRow[i] = m - 1 + index_correction;
00270 jCol[i] = inds_[i] + index_correction;
00271 }
00272 }
00273 else if (!iRow & !jCol && values) {
00274
00275 Number* grad_f = new Number[n];
00276 bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00277 if(ret_code_grad_f) {
00278 int k = nele_jac - n - n_integers;
00279 values += k;
00280 for(int i = 0; i< n; i++) {
00281 values[i] = grad_f[i];
00282 }
00283 }
00284 else
00285 ret_code = false;
00286 delete [] grad_f;
00287
00288 int k = nele_jac - n_integers;
00289 values += k;
00290 for(int i = 0; i< n_integers; i++) {
00291 if(vals_[i] <= 0.1)
00292 values[i] = 1.0;
00293 else
00294 values[i] = -1.0;
00295 }
00296 }
00297 else {
00298 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00299 }
00300 }
00301 else if(use_cutoff_constraint_) {
00302 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n,
00303 iRow, jCol, values);
00304
00305 if (iRow && jCol && !values) {
00306 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00307 int k = nele_jac - n;
00308 iRow += k;
00309 jCol += k;
00310 for(int i = 0; i< n; i++) {
00311 iRow[i] = m - 1 + index_correction;
00312 jCol[i] = i + index_correction;
00313 }
00314 }
00315 else if (!iRow & !jCol && values) {
00316 Number* grad_f = new Number[n];
00317 bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00318 if(ret_code_grad_f) {
00319 int k = nele_jac - n;
00320 values += k;
00321 for(int i = 0; i< n; i++) {
00322 values[i] = grad_f[i];
00323 }
00324 }
00325 else
00326 ret_code = false;
00327 delete [] grad_f;
00328 }
00329 else {
00330 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00331 }
00332 }
00333 else if(use_local_branching_constraint_) {
00334 int n_integers = vals_.size();
00335 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers,
00336 iRow, jCol, values);
00337
00338 if (iRow && jCol && !values) {
00339 int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00340 int k = nele_jac - n_integers;
00341 iRow += k;
00342 jCol += k;
00343 for(int i = 0; i< n_integers; i++) {
00344 iRow[i] = m - 1 + index_correction;
00345 jCol[i] = inds_[i] + index_correction;
00346 }
00347 }
00348 else if (!iRow & !jCol && values) {
00349 int k = nele_jac - n_integers;
00350 values += k;
00351 for(int i = 0; i< n_integers; i++) {
00352 if(vals_[i] <= 0.1)
00353 values[i] = 1.0;
00354 else
00355 values[i] = -1.0;
00356 }
00357 }
00358 else {
00359 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00360 }
00361 }
00362 else
00363 ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00364 iRow, jCol, values);
00365
00366 return ret_code;
00367 }
00368
00369 bool
00370 TNLP2FPNLP::eval_h(Index n, const Number* x, bool new_x,
00371 Number obj_factor, Index m, const Number* lambda,
00372 bool new_lambda, Index nele_hess,
00373 Index* iRow, Index* jCol, Number* values)
00374 {
00375 bool ret_code;
00376
00377 int nnz_obj_h = (norm_ == 2) ? inds_.size() : 0;
00378 if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00379 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + lambda[m-2],
00380 m - 2, lambda, new_lambda, nele_hess - nnz_obj_h,
00381 iRow, jCol, values);
00382 }
00383 else if(use_cutoff_constraint_) {
00384 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + lambda[m-1],
00385 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
00386 iRow, jCol, values);
00387 }
00388 else if(use_local_branching_constraint_) {
00389 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
00390 m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
00391 iRow, jCol, values);
00392 }
00393 else {
00394 ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
00395 m, lambda, new_lambda, nele_hess - nnz_obj_h,
00396 iRow, jCol, values);
00397 }
00398
00399
00400 if(use_feasibility_pump_objective_ && norm_ == 2){
00401 if (iRow && jCol && !values)
00402 {
00403 int k = nele_hess - nnz_obj_h;
00404 iRow += k;
00405 jCol += k;
00406 for(unsigned int i = 0; i < inds_.size() ; i++)
00407 {
00408 iRow[i] = inds_[i] + 1;
00409 jCol[i] = inds_[i] + 1;
00410 }
00411 DBG_ASSERT(k==nele_hess);
00412 }
00413 else if (!iRow & !jCol && values)
00414 {
00415 int k = nele_hess - nnz_obj_h;
00416 values += k;
00417 for(unsigned int i = 0; i < inds_.size() ; i++)
00418 {
00419 values[i] = 2* objectiveScalingFactor_* lambda_* obj_factor;
00420 }
00421 DBG_ASSERT(k==nele_hess);
00422 }
00423 else
00424 {
00425 DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00426 }
00427 }
00428
00429 return ret_code;
00430 }
00431
00432 void
00433 TNLP2FPNLP::finalize_solution(SolverReturn status,
00434 Index n, const Number* x, const Number* z_L, const Number* z_U,
00435 Index m, const Number* g, const Number* lambda,
00436 Number obj_value,
00437 const IpoptData* ip_data,
00438 IpoptCalculatedQuantities* ip_cq)
00439 {
00440 tnlp_->finalize_solution(status,n, x, z_L, z_U,m, g, lambda, obj_value,
00441 ip_data, ip_cq);
00442 }
00443 }
00444