/home/coin/SVN-release/OS-2.1.0/Bonmin/src/Interfaces/BonTNLP2FPNLP.cpp

Go to the documentation of this file.
00001 // (C) Copyright Carnegie Mellon University 2005
00002 // All Rights Reserved.
00003 // This code is published under the Common Public License.
00004 //
00005 // Authors :
00006 // Pierre Bonami, Carnegie Mellon University,
00007 //
00008 // Date : 05/25/2005
00009 
00010 
00011 #include "BonTNLP2FPNLP.hpp"
00012 #include "IpBlas.hpp"
00013 namespace Bonmin
00014 {
00015   TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor):
00016       tnlp_(tnlp),
00017       inds_(),
00018       vals_(),
00019       lambda_(1.),
00020       sigma_(1.),
00021       norm_(2),
00022       objectiveScalingFactor_(objectiveScalingFactor),
00023       use_feasibility_pump_objective_(false),
00024       use_cutoff_constraint_(false),
00025       use_local_branching_constraint_(false),
00026       cutoff_(COIN_DBL_MAX),
00027       rhs_local_branching_constraint_(COIN_DBL_MAX),
00028       index_style_(TNLP::C_STYLE)
00029   {}
00030 
00031   TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, const SmartPtr<TNLP2FPNLP> other):
00032       tnlp_(tnlp),
00033       inds_(other->inds_),
00034       vals_(other->vals_),
00035       lambda_(other->lambda_),
00036       sigma_(other->sigma_),
00037       norm_(other->norm_),
00038       objectiveScalingFactor_(other->objectiveScalingFactor_),
00039       use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
00040       use_cutoff_constraint_(other->use_cutoff_constraint_),
00041       use_local_branching_constraint_(other->use_local_branching_constraint_),
00042       cutoff_(other->cutoff_),
00043       rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
00044       index_style_(other->index_style_)
00045   {}
00046 
00047   TNLP2FPNLP::~TNLP2FPNLP()
00048   {
00049   }
00050 
00051   void
00052   TNLP2FPNLP::set_cutoff(Number cutoff)
00053   {
00054     Number epsilon = 1.0e-6;
00055     if(cutoff > 1.0e-8)
00056       cutoff_ = (1-epsilon) * cutoff;
00057     else if(cutoff < -1.0e-8)
00058       cutoff_ = (1+epsilon) * cutoff;
00059     else
00060       cutoff_ = - epsilon;
00061   }
00062 
00063   void
00064   TNLP2FPNLP::set_dist2point_obj(int n, const Number * vals, const Index * inds)
00065   {
00066     inds_.resize(n);
00067     vals_.resize(n);
00068     IpBlasDcopy(n, vals, 1, vals_(), 1);
00069     CoinCopyN(inds, n, inds_());
00070   }
00071 
00073   double
00074   TNLP2FPNLP::dist2point(const Number *x)
00075   {
00076     double ret_val = 0;
00077     assert(vals_.size() == inds_.size());
00078     if(norm_ == 2){
00079       for(unsigned int i = 0; i < vals_.size() ; i++) {
00080         ret_val += ( x[inds_[i]] - vals_[i] ) * ( x[inds_[i]] - vals_[i] );
00081       }
00082     }
00083     else if(norm_ == 1){
00084       for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00085         if(vals_[i] <= 0.1)
00086          ret_val += x[inds_[i]];
00087         else{
00088           ret_val += (1.0 - x[inds_[i]]);
00089          //         ret_val -= x[inds_[i]];
00090         }
00091       }
00092     }
00093     return ret_val;
00094   }
00095 
00096   bool
00097   TNLP2FPNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00098       Index& nnz_h_lag,
00099       TNLP::IndexStyleEnum& index_style)
00100   {
00101     bool ret_code = tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
00102         index_style);
00103 
00104     index_style_ = index_style; // this function is called before any
00105                                 // other that uses index_style_
00106 
00107     if(use_feasibility_pump_objective_ && norm_ == 2)
00108       nnz_h_lag += vals_.size();
00109 
00110     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00111       m += 2;
00112       nnz_jac_g += (n + vals_.size());
00113     }
00114     else if(use_cutoff_constraint_) {
00115       m++;
00116       nnz_jac_g += n;
00117     }
00118     else if(use_local_branching_constraint_) {
00119       m++;
00120       nnz_jac_g += vals_.size();
00121     }
00122 
00123     return ret_code;
00124   }
00125 
00126   bool 
00127   TNLP2FPNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00128                               Index m, Number* g_l, Number* g_u)
00129   {
00130     bool ret_code;
00131 
00132     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00133       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
00134       g_l[m-2] = - COIN_DBL_MAX;
00135       g_u[m-2] = cutoff_;
00136       g_l[m-1] = - COIN_DBL_MAX;
00137       g_u[m-1] = rhs_local_branching_constraint_;
00138     }
00139     else if(use_cutoff_constraint_) {
00140       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
00141       g_l[m-1] = - COIN_DBL_MAX;
00142       g_u[m-1] = cutoff_;
00143     }
00144     else if(use_local_branching_constraint_) {
00145       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
00146       g_l[m-1] = - COIN_DBL_MAX;
00147       g_u[m-1] = rhs_local_branching_constraint_;
00148     }
00149     else
00150       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m, g_l, g_u);
00151 
00152     return ret_code;
00153   }
00154 
00155   bool
00156   TNLP2FPNLP::eval_f(Index n, const Number* x, bool new_x,
00157       Number& obj_value)
00158   {
00159     bool ret_code = tnlp_->eval_f(n, x, new_x, obj_value);//for new_x
00160 
00161     if(use_feasibility_pump_objective_) {
00162       obj_value *= (1 - lambda_) * sigma_;
00163       obj_value += objectiveScalingFactor_*lambda_*dist2point(x);
00164     }
00165 
00166     return ret_code;
00167   }
00168 
00169   bool
00170   TNLP2FPNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00171       Number* grad_f)
00172   {
00173     bool ret_code = tnlp_->eval_grad_f(n, x, new_x, grad_f);
00174 
00175     if(use_feasibility_pump_objective_) {
00176       for(int i = 0 ; i < n ; i++) {
00177         grad_f[i] *= (1-lambda_) * sigma_;
00178       }
00179       if(norm_ ==2){
00180         for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00181           grad_f[inds_[i]] += objectiveScalingFactor_*2*lambda_*( x[inds_[i]] - vals_[i] );
00182         }
00183       }
00184       else{
00185         for(unsigned int i = 0 ; i < inds_.size() ; i++) {
00186           if(vals_[i] <= 0.1)
00187             grad_f[inds_[i]] += objectiveScalingFactor_*lambda_;
00188           else
00189             grad_f[inds_[i]] -= objectiveScalingFactor_*lambda_;
00190         }
00191       }
00192     }
00193    
00194     return ret_code;
00195   }
00196 
00197   bool
00198   TNLP2FPNLP::eval_g(Index n, const Number* x, bool new_x,
00199                      Index m, Number* g)
00200   {
00201     bool ret_code;
00202 
00203     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00204       ret_code = tnlp_->eval_g(n, x, new_x, m-2, g);
00205       // compute the value of the cutoff constraint
00206       Number obj_value;
00207       if(eval_f(n, x, new_x, obj_value))
00208         g[m-2] = obj_value;
00209       else
00210         ret_code = false;
00211       // compute the value of the local branching constraint
00212       Number g_local_branching = 0.0;
00213       for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00214         if(vals_[i] <= 0.1)
00215           g_local_branching += x[inds_[i]];
00216         else
00217           g_local_branching += (1.0 - x[inds_[i]]);
00218       }
00219       g[m-1] = g_local_branching;
00220     }
00221     else if(use_cutoff_constraint_) {
00222       ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00223       Number obj_value;
00224       if(eval_f(n, x, new_x, obj_value))
00225         g[m-1] = obj_value;
00226       else
00227         ret_code = false;
00228     }
00229     else if(use_local_branching_constraint_) {
00230       ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
00231       Number g_local_branching = 0.0;
00232       for(unsigned int i = 0 ; i < vals_.size() ; i++) {
00233         if(vals_[i] <= 0.1)
00234           g_local_branching += x[inds_[i]];
00235         else
00236           g_local_branching += (1.0 - x[inds_[i]]);
00237       }
00238       g[m-1] = g_local_branching;
00239     }
00240     else
00241       ret_code = tnlp_->eval_g(n, x, new_x, m, g);
00242     
00243     return ret_code;
00244   }
00245 
00246   bool
00247   TNLP2FPNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00248                          Index m, Index nele_jac, Index* iRow,
00249                          Index *jCol, Number* values)
00250   {
00251     bool ret_code;
00252 
00253     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00254       int n_integers = vals_.size();
00255       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers, 
00256                                    iRow, jCol, values);
00257 
00258       if (iRow && jCol && !values) { //Initialization phase 
00259         int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00260         // compute the jacobian contribution of the cutoff constraint
00261         int k = nele_jac - n - n_integers;
00262         iRow += k;
00263         jCol += k;
00264         for(int i = 0; i< n; i++) {
00265           iRow[i] = m - 2 + index_correction;
00266           jCol[i] = i + index_correction;
00267         }
00268         // compute the jacobian contribution of the local branching constraint
00269         k = nele_jac - n_integers;
00270         iRow += k;
00271         jCol += k;
00272         for(int i = 0; i< n_integers; i++) {
00273           iRow[i] = m - 1 + index_correction;
00274           jCol[i] = inds_[i] + index_correction;
00275         }
00276       }
00277       else if (!iRow & !jCol && values) { //computation phase
00278         // compute the jacobian contribution of the cutoff constraint
00279         Number* grad_f = new Number[n];
00280         bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00281         if(ret_code_grad_f) {
00282           int k = nele_jac - n - n_integers;
00283           values += k;
00284           for(int i = 0; i< n; i++) {
00285             values[i] = grad_f[i];
00286           }
00287         }
00288         else
00289           ret_code = false;
00290         delete [] grad_f;
00291         // compute the jacobian contribution of the local branching constraint
00292         int k = nele_jac - n_integers;
00293         values += k;
00294         for(int i = 0; i< n_integers; i++) {
00295           if(vals_[i] <= 0.1)
00296             values[i] = 1.0;
00297           else
00298             values[i] = -1.0;
00299         }
00300       }   
00301       else { //error phase
00302         DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00303       }
00304     }
00305     else if(use_cutoff_constraint_) {
00306       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n, 
00307                                    iRow, jCol, values);
00308       
00309       if (iRow && jCol && !values) { //Initialization phase 
00310         int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00311         int k = nele_jac - n;
00312         iRow += k;
00313         jCol += k;
00314         for(int i = 0; i< n; i++) {
00315           iRow[i] = m - 1 + index_correction;
00316           jCol[i] = i + index_correction;
00317         }
00318       }
00319       else if (!iRow & !jCol && values) { //computation phase
00320         Number* grad_f = new Number[n];
00321         bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
00322         if(ret_code_grad_f) {
00323           int k = nele_jac - n;
00324           values += k;
00325           for(int i = 0; i< n; i++) {
00326             values[i] = grad_f[i];
00327           }
00328         }
00329         else
00330           ret_code = false;
00331         delete [] grad_f;
00332       }   
00333       else { //error phase
00334         DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00335       }
00336     }
00337     else if(use_local_branching_constraint_) {
00338       int n_integers = vals_.size();
00339       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers, 
00340                                    iRow, jCol, values);
00341       
00342       if (iRow && jCol && !values) { //Initialization phase 
00343         int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00344         int k = nele_jac - n_integers;
00345         iRow += k;
00346         jCol += k;
00347         for(int i = 0; i< n_integers; i++) {
00348           iRow[i] = m - 1 + index_correction;
00349           jCol[i] = inds_[i] + index_correction;
00350         }
00351       }
00352       else if (!iRow & !jCol && values) { //computation phase
00353         int k = nele_jac - n_integers;
00354         values += k;
00355         for(int i = 0; i< n_integers; i++) {
00356           if(vals_[i] <= 0.1)
00357             values[i] = 1.0;
00358           else
00359             values[i] = -1.0;
00360         }
00361       }   
00362       else { //error phase
00363         DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00364       }
00365     }
00366     else
00367       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac, 
00368                                    iRow, jCol, values);
00369     
00370     return ret_code;
00371   }
00372 
00373   bool
00374   TNLP2FPNLP::eval_h(Index n, const Number* x, bool new_x,
00375                      Number obj_factor, Index m, const Number* lambda,
00376                      bool new_lambda, Index nele_hess,
00377                      Index* iRow, Index* jCol, Number* values)
00378   {
00379     bool ret_code;
00380 
00381     int  nnz_obj_h = (norm_ == 2) ? inds_.size() : 0;
00382 
00383     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
00384       double coef_obj = (iRow != NULL)?0 : lambda[m - 2];
00385       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj, 
00386                                m - 2, lambda, new_lambda, nele_hess - nnz_obj_h, 
00387                                iRow, jCol, values);
00388     }
00389     else if(use_cutoff_constraint_) {
00390       double coef_obj = (iRow != NULL)?0 : lambda[m - 1];
00391       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj, 
00392                                m - 1, lambda, new_lambda, nele_hess - nnz_obj_h, 
00393                                iRow, jCol, values);
00394     }
00395     else if(use_local_branching_constraint_) {
00396       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_, 
00397                                m - 1, lambda, new_lambda, nele_hess - nnz_obj_h, 
00398                                iRow, jCol, values);
00399     }
00400     else { // this is the original feasibility pump implementation
00401       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_, 
00402                                m, lambda, new_lambda, nele_hess - nnz_obj_h, 
00403                                iRow, jCol, values);
00404     }
00405 
00406     //Now add extra elements corresponding to the hessian of the distance
00407     if(use_feasibility_pump_objective_ && norm_ == 2){
00408       if (iRow && jCol && !values) //Initialization phase
00409         {
00410     int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
00411           int k = nele_hess - nnz_obj_h;
00412           iRow += k;
00413           jCol += k;
00414           for(unsigned int i = 0; i < inds_.size() ; i++)
00415             {
00416               iRow[i] = inds_[i] + index_correction;
00417               jCol[i] = inds_[i] + index_correction;
00418             }
00419           DBG_ASSERT(k==nele_hess);
00420         }
00421       else if (!iRow & !jCol && values) //computation phase
00422         {
00423           int k = nele_hess - nnz_obj_h;
00424           values += k;
00425           for(unsigned int i = 0; i < inds_.size() ; i++)
00426             {
00427               values[i] = 2* objectiveScalingFactor_* lambda_* obj_factor;
00428             }
00429           DBG_ASSERT(k==nele_hess);
00430         }
00431       else //error phase
00432         {
00433           DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
00434         }
00435     }
00436 
00437     return ret_code;
00438   }
00439 
00440   void
00441   TNLP2FPNLP::finalize_solution(SolverReturn status,
00442       Index n, const Number* x, const Number* z_L, const Number* z_U,
00443       Index m, const Number* g, const Number* lambda,
00444       Number obj_value,
00445       const IpoptData* ip_data,
00446       IpoptCalculatedQuantities* ip_cq)
00447   {
00448       int m2 = m;
00449       if(use_cutoff_constraint_) {
00450         m2--;
00451       }
00452       if(use_local_branching_constraint_) {
00453         m2--;
00454       }
00455     tnlp_->finalize_solution(status,n, x, z_L, z_U,m2, g, lambda, obj_value,
00456                              ip_data, ip_cq);
00457   }
00458 }
00459 

Generated on Tue Mar 30 03:04:35 2010 by  doxygen 1.4.7