00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include "BonTMINLP2Quad.hpp"
00011 #include <climits>
00012
00013 using namespace Ipopt;
00014
00015
00016 namespace Bonmin {
00017
00018 TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const SmartPtr<Bonmin::TMINLP> tminlp):
00019 TMINLP2TNLP(tminlp)
00020 {
00021
00022
00023
00024 const int nnz_h = TMINLP2TNLP::nnz_h_lag();
00025 curr_nnz_jac_ = TMINLP2TNLP::nnz_jac_g();
00026 if(nnz_h > 0){
00027 int * jCol = new int [nnz_h];
00028 int * iRow = new int [nnz_h];
00029
00030 TMINLP2TNLP::eval_h(num_variables(), NULL, false,
00031 0., TMINLP2TNLP::num_constraints(), NULL, false,
00032 nnz_h, jCol, iRow, NULL);
00033
00034 for(int i = 0 ; i < nnz_h ; i++){
00035 std::pair< AdjustableMat::iterator , bool> res =
00036 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
00037 std::make_pair(i, -1)));
00038 assert(res.second == true);
00039 }
00040 delete [] jCol;
00041 delete [] iRow;
00042 }
00043 assert(nnz_h == (int) H_.size());
00044 obj_.reserve(TMINLP2TNLP::num_variables());
00045 }
00046
00047
00051 TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const TMINLP2TNLPQuadCuts &other):
00052 TMINLP2TNLP(other),
00053 quadRows_(other.quadRows_),
00054 H_(),
00055 curr_nnz_jac_(other.curr_nnz_jac_),
00056 obj_(other.obj_)
00057 {
00058
00059 const int nnz_h = TMINLP2TNLP::nnz_h_lag();
00060
00061 if(nnz_h > 0){
00062 int * jCol = new int [nnz_h];
00063 int * iRow = new int [nnz_h];
00064 int m = TMINLP2TNLP::num_constraints() - quadRows_.size();
00065 TMINLP2TNLP::eval_h(num_variables(), NULL, false,
00066 0., m, NULL, false,
00067 nnz_h, jCol, iRow, NULL);
00068
00069 for(int i = 0 ; i < nnz_h ; i++){
00070 std::pair< AdjustableMat::iterator , bool> res =
00071 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
00072 std::make_pair(i, -1)));
00073 assert(res.second == true);
00074 }
00075 delete [] jCol;
00076 delete [] iRow;
00077 }
00078 assert(nnz_h == (int) H_.size());
00079
00080
00081 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00082 quadRows_[i] = new QuadRow(*quadRows_[i]);
00083 }
00084
00085 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00086 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00087 quadRows_[i]->add_to_hessian(H_, offset);
00088 }
00089 }
00090
00091
00093 TMINLP2TNLPQuadCuts::~TMINLP2TNLPQuadCuts(){
00094 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00095 delete quadRows_[i];
00096 }
00097 }
00098
00099
00100 bool TMINLP2TNLPQuadCuts::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00101 Index& nnz_h_lag,
00102 TNLP::IndexStyleEnum& index_style){
00103 bool ret_val = TMINLP2TNLP::get_nlp_info(n,m,nnz_jac_g, nnz_h_lag, index_style);
00104 nnz_h_lag = H_.size();
00105 nnz_jac_g = curr_nnz_jac_;
00106
00107 return ret_val;
00108 }
00109
00112 bool TMINLP2TNLPQuadCuts::get_bounds_info(Index n, Number* x_l, Number* x_u,
00113 Index m, Number* g_l, Number* g_u){
00114 bool ret_val = TMINLP2TNLP::get_bounds_info(n, x_l, x_u,
00115 m, g_l, g_u);
00116 return ret_val;
00117 }
00118
00119 bool
00120 TMINLP2TNLPQuadCuts::get_constraints_linearity(Index m, LinearityType* const_types)
00121 {
00122 bool ret_val = TMINLP2TNLP::get_constraints_linearity(m - quadRows_.size(), const_types);
00123 const_types += m - quadRows_.size();
00124 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00125 if(quadRows_[i]->isLinear())
00126 const_types[i] = TNLP::LINEAR;
00127 else
00128 const_types[i] = TNLP::NON_LINEAR;
00129 }
00130 return ret_val;
00131 }
00132
00135 bool TMINLP2TNLPQuadCuts::get_starting_point(Index n, bool init_x, Number* x,
00136 bool init_z, Number* z_L, Number* z_U,
00137 Index m, bool init_lambda,
00138 Number* lambda){
00139 return TMINLP2TNLP::get_starting_point(n, init_x, x, init_z, z_L, z_U, m, init_lambda, lambda);
00140 }
00141
00145 bool TMINLP2TNLPQuadCuts::get_scaling_parameters(Number& obj_scaling,
00146 bool& use_x_scaling, Index n,
00147 Number* x_scaling,
00148 bool& use_g_scaling, Index m,
00149 Number* g_scaling){
00150 assert(num_constraints() == m);
00151 bool retval = get_scaling_parameters(obj_scaling, use_x_scaling, n, x_scaling, use_g_scaling, m - quadRows_.size(), g_scaling);
00152 if(use_g_scaling){
00153 g_scaling += m - quadRows_.size();
00154 CoinFillN(g_scaling, quadRows_.size(), 1.);}
00155 return retval;
00156 }
00157
00159 bool
00160 TMINLP2TNLPQuadCuts::eval_f(Index n, const Number* x, bool new_x,
00161 Number& obj_value){
00162 if(obj_.empty()){
00163 return TMINLP2TNLP::eval_f(n, x, new_x, obj_value);
00164 }
00165 if(new_x){
00166 TMINLP2TNLP::eval_f(n,x, new_x, obj_value);
00167 }
00168 obj_value = c_;
00169 assert(n == (int) obj_.size());
00170 for(int i = 0 ; i < n ; i++){
00171 obj_value += obj_[i] * x[i];
00172 }
00173 return true;
00174 }
00175
00178 bool
00179 TMINLP2TNLPQuadCuts::eval_grad_f(Index n, const Number* x, bool new_x,
00180 Number* grad_f){
00181 if(obj_.empty()){
00182 return TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
00183 if(new_x){
00184 TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
00185 assert(n == (int) obj_.size());
00186 for(int i = 0 ; i < n ; i++){
00187 grad_f[i] = obj_[i];
00188 }
00189 return true;
00190 }
00191
00192 bool TMINLP2TNLPQuadCuts::eval_gi(Index n, const Number* x, bool new_x,
00193 Index i, Number& gi)
00194 {
00195 int m_orig = num_constraints() - quadRows_.size();
00196 if(i < m_orig){
00197 return TMINLP2TNLP::eval_gi(n, x, new_x, i, gi);
00198 }
00199 i -= m_orig;
00200 gi = quadRows_[i]->eval_f(x, new_x);
00201 return false;
00202 }
00203
00204
00206 bool TMINLP2TNLPQuadCuts::eval_g(Index n, const Number* x, bool new_x,
00207 Index m, Number* g){
00208 int m_tminlp = m - quadRows_.size();
00209 bool retval = TMINLP2TNLP::eval_g(n, x, new_x, m_tminlp, g);
00210 g+= (m_tminlp);
00211 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00212 g[i] = quadRows_[i]->eval_f(x, new_x);
00213 }
00214 return retval;
00215 }
00216
00222 bool TMINLP2TNLPQuadCuts::eval_jac_g(Index n, const Number* x, bool new_x,
00223 Index m, Index nele_jac, Index* iRow,
00224 Index *jCol, Number* values){
00225 int n_ele_orig = TMINLP2TNLP::nnz_jac_g();
00226 int m_orig = m - quadRows_.size();
00227 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00228
00229 bool retval = TMINLP2TNLP::eval_jac_g(n, x, new_x, m_orig ,
00230 n_ele_orig, iRow, jCol, values);
00231 if(values == NULL){
00232 assert(iRow != NULL);
00233 assert(jCol != NULL);
00234 iRow += n_ele_orig;
00235 jCol += n_ele_orig;
00236 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00237 const int & nnz = quadRows_[i]->nnz_grad();
00238 Ipopt::Index mi = m_orig + i + offset;
00239 CoinFillN(iRow, nnz, mi);
00240 quadRows_[i]->gradiant_struct(nnz, jCol, offset);
00241 iRow += nnz;
00242 jCol += nnz;
00243 }
00244 }
00245 else {
00246 assert(iRow == NULL);
00247 assert(jCol == NULL);
00248 values += n_ele_orig;
00249 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00250 const int & nnz = quadRows_[i]->nnz_grad();
00251 quadRows_[i]->eval_grad(nnz, x, new_x, values);
00252 values+=nnz;
00253 }
00254 }
00255 return retval;
00256 }
00257
00258 bool TMINLP2TNLPQuadCuts::eval_grad_gi(Index n, const Number* x, bool new_x,
00259 Index i, Index& nele_grad_gi, Index* jCol,
00260 Number* values)
00261 {
00262 int m_orig = num_constraints() - quadRows_.size();
00263 if(i < m_orig){
00264 return TMINLP2TNLP::eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00265 }
00266 i -= m_orig;
00267 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00268 if(values == NULL){
00269 assert(jCol != NULL);
00270 nele_grad_gi = quadRows_[i]->nnz_grad();
00271 quadRows_[i]->gradiant_struct(nele_grad_gi, jCol, offset);
00272 }
00273 else{
00274 assert(jCol == NULL);
00275 quadRows_[i]->eval_grad(nele_grad_gi, x, new_x, values);
00276 }
00277 return false;
00278 }
00286 bool TMINLP2TNLPQuadCuts::eval_h(Index n, const Number* x, bool new_x,
00287 Number obj_factor, Index m, const Number* lambda,
00288 bool new_lambda, Index nele_hess,
00289 Index* iRow, Index* jCol, Number* values){
00290 if(!obj_.empty()) obj_factor = 0;
00291 if(values == NULL){
00292 assert(iRow != NULL);
00293 assert(jCol != NULL);
00294 #ifdef DEBUG
00295 std::cout<<"Hessian structure"<<std::endl;
00296 #endif
00297 int nnz = 0;
00298 int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
00299 int nnz_sup = nnz_h_lag_orig;
00300 for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
00301 if(i->second.second == -1){
00302 assert(i->second.first < nnz_h_lag_orig);
00303 }
00304 else {
00305 assert(i->second.second > 0);
00306 assert(i->second.first >= nnz_h_lag_orig);
00307 i->second.first = nnz_sup;
00308 nnz_sup++;
00309 }
00310 iRow[i->second.first] = i->first.first;
00311 jCol[i->second.first] = i->first.second;
00312 #ifdef DEBUG
00313 printf("iRow %i, jCol %i : nnz %i\n",
00314 i->first.second, i->first.first,
00315 i->second.first);
00316 #endif
00317
00318 nnz++;
00319 }
00320 assert(nnz == (int) H_.size());
00321 return true;
00322 }
00323 else {
00324 #ifdef DEBUG
00325 std::cout<<"Computing hessian"<<std::endl;
00326 #endif
00327 assert(iRow == NULL);
00328 assert(jCol == NULL);
00329 int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
00330 int m_orig = m - quadRows_.size();
00331 bool ret_val = TMINLP2TNLP::eval_h(n, x, new_x, obj_factor, m_orig, lambda, new_lambda,
00332 nnz_h_lag_orig, iRow, jCol, values);
00333 CoinZeroN(values + nnz_h_lag_orig, H_.size() - nnz_h_lag_orig);
00334 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00335 quadRows_[i]->eval_hessian(lambda[i + m_orig], values);
00336 }
00337 return ret_val;
00338 }
00339 }
00341
00343 void
00344 TMINLP2TNLPQuadCuts::addCuts(const Cuts & cuts, bool safe){
00345 assert(cuts.sizeColCuts() == 0);
00346 #ifdef DEBUG
00347 printf("Adding %i cuts\n", cuts.sizeRowCuts());
00348 #endif
00349 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00350
00351 g_l_.reserve(g_l_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00352 g_u_.reserve(g_u_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00353 quadRows_.reserve(quadRows_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00354
00355 int n = cuts.sizeQuadCuts();
00356 for(int i = 0 ; i < n ; i++){
00357 g_l_.push_back(cuts.quadCut(i).lb());
00358 g_u_.push_back(cuts.quadCut(i).ub());
00359 quadRows_.push_back(new QuadRow(cuts.quadCut(i)));
00360 quadRows_.back()->add_to_hessian(H_, offset);
00361 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00362 }
00363 addRowCuts((OsiCuts) cuts, safe);
00364 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00365 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00366 duals_init_ = x_init_() + x_l_.size();
00367 }
00368
00370 void TMINLP2TNLPQuadCuts::addCuts(unsigned int numcuts,
00371 const OsiRowCut ** cuts){
00372 #ifdef DEBUG
00373 printf("Adding %i cuts\n", numcuts);
00374 #endif
00375 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00376 g_l_.reserve(g_l_.size() + numcuts);
00377 g_u_.reserve(g_u_.size() + numcuts);
00378 quadRows_.reserve(quadRows_.size() + numcuts);
00379 for(unsigned int i = 0 ; i < numcuts ; i++){
00380 g_l_.push_back(cuts[i]->lb());
00381 g_u_.push_back(cuts[i]->ub());
00382
00383 const QuadCut * quadCut = dynamic_cast<const QuadCut *> (cuts[i]);
00384 if(quadCut){
00385 quadRows_.push_back(new QuadRow(*quadCut));
00386 quadRows_.back()->add_to_hessian(H_, offset);
00387 }
00388 else
00389 quadRows_.push_back(new QuadRow(*cuts[i]));
00390 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00391 }
00392 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00393 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00394 duals_init_ = x_init_() + x_l_.size();
00395 }
00397 void TMINLP2TNLPQuadCuts::addCuts(const OsiCuts& cuts){
00398 assert(cuts.sizeColCuts() == 0);
00399 #ifdef DEBUG
00400 printf("Adding %i cuts\n", cuts.sizeRowCuts());
00401 #endif
00402
00403 const Cuts * quadCuts = dynamic_cast<const Cuts *>(&cuts);
00404 if(quadCuts) {
00405 addCuts(*quadCuts, true);
00406 return;}
00407
00408 addRowCuts(cuts, true);
00409 }
00411 void TMINLP2TNLPQuadCuts::addRowCuts(const OsiCuts& cuts, bool safe){
00412
00413 int n = cuts.sizeRowCuts();
00414 g_l_.reserve(g_l_.size() + n);
00415 g_u_.reserve(g_u_.size() + n);
00416 quadRows_.reserve(quadRows_.size() + n);
00417
00418 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00419
00420 for(int i = 0 ; i < n ; i++){
00421 g_l_.push_back(cuts.rowCut(i).lb());
00422 g_u_.push_back(cuts.rowCut(i).ub());
00423 if(safe == false){
00424 assert(dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i)) == NULL);
00425 }
00426 else {
00427 const QuadCut * cut = dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i));
00428 if(cut){
00429 quadRows_.push_back(new QuadRow(*cut));
00430 quadRows_.back()->add_to_hessian(H_, offset);
00431 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00432 continue;
00433 }
00434 }
00435 quadRows_.push_back(new QuadRow(cuts.rowCut(i)));
00436 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00437 }
00438 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00439 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00440 duals_init_ = x_init_() + x_l_.size();
00441 }
00442
00444 void TMINLP2TNLPQuadCuts::removeCuts(unsigned int n,const int * idxs){
00445 if(n == 0) return;
00446 vector< int > order(quadRows_.size());
00447 int m_tminlp = num_constraints() - quadRows_.size();
00448
00449 for(unsigned int k = 0; k < n ; k++){
00450 int idx = idxs[k] - m_tminlp ;
00451 quadRows_[idx]->remove_from_hessian(H_);
00452 curr_nnz_jac_ -= quadRows_[idx]->nnz_grad();
00453 delete quadRows_[idx];
00454 quadRows_[idx] = NULL;}
00455
00456 for(unsigned int i = 0 ; i < order.size() ; i++){
00457 order[i] = i;
00458 }
00459 for(unsigned int i = 0 ; i < n ; i++){
00460 assert(idxs[i] - m_tminlp >= 0);
00461 order[ idxs[i] - m_tminlp ] = INT_MAX;
00462 }
00463
00464 std::sort(order.begin(), order.end());
00465
00466
00467 int i;
00468 double * g_l = g_l_() + m_tminlp;
00469 double * g_u = g_u_() + m_tminlp;
00470 for(i = 0 ; order[i] < INT_MAX ; i++){
00471 assert(order[i] >= i);
00472 quadRows_[i] = quadRows_[order[i]];
00473 g_l[i] = g_l[order[i]];
00474 g_u[i] = g_u[order[i]];
00475 }
00476 quadRows_.erase(quadRows_.begin() + i, quadRows_.end());
00477 g_l_.erase(g_l_.begin() + m_tminlp + i, g_l_.end());
00478 g_u_.erase(g_u_.begin() + m_tminlp + i, g_u_.end());
00479 }
00480
00481 void
00482 TMINLP2TNLPQuadCuts::printH(){
00483 int nnz = 0;
00484 for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
00485 std::cout<<"nnz: "<<nnz
00486 <<"jCol: "<<i->first.first
00487 <<", iRow "<<i->first.second<<std::endl;
00488 nnz++;
00489 }
00490 }
00491
00492 void
00493 TMINLP2TNLPQuadCuts::set_linear_objective(int n_var, const double * obj, double c_0){
00494 assert(n_var == TMINLP2TNLP::num_variables());
00495 obj_.resize(n_var);
00496 CoinCopyN(obj, n_var, obj_());
00497 c_ = c_0;
00498 }
00499 }
00500