00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include "BonTMINLP2Quad.hpp"
00011 #include <climits>
00012
00013 using namespace Ipopt;
00014
00015
00016 namespace Bonmin {
00017
00018 TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const SmartPtr<Bonmin::TMINLP> tminlp):
00019 TMINLP2TNLP(tminlp)
00020 {
00021
00022
00023
00024 const int nnz_h = TMINLP2TNLP::nnz_h_lag();
00025 curr_nnz_jac_ = TMINLP2TNLP::nnz_jac_g();
00026 if(nnz_h > 0){
00027 int * jCol = new int [nnz_h];
00028 int * iRow = new int [nnz_h];
00029
00030 TMINLP2TNLP::eval_h(num_variables(), NULL, false,
00031 0., TMINLP2TNLP::num_constraints(), NULL, false,
00032 nnz_h, jCol, iRow, NULL);
00033
00034 for(int i = 0 ; i < nnz_h ; i++){
00035 #ifndef NDEBUG
00036 bool inserted =
00037 #endif
00038 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
00039 std::make_pair(i, -1))).second;
00040 assert(inserted == true);
00041 }
00042 delete [] jCol;
00043 delete [] iRow;
00044 }
00045 assert(nnz_h == (int) H_.size());
00046 obj_.reserve(TMINLP2TNLP::num_variables());
00047 }
00048
00049
00053 TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const TMINLP2TNLPQuadCuts &other):
00054 TMINLP2TNLP(other),
00055 quadRows_(other.quadRows_),
00056 H_(),
00057 curr_nnz_jac_(other.curr_nnz_jac_),
00058 obj_(other.obj_)
00059 {
00060
00061 const size_t nnz_h = TMINLP2TNLP::nnz_h_lag();
00062
00063 if(nnz_h > 0){
00064 int * jCol = new int [nnz_h];
00065 int * iRow = new int [nnz_h];
00066 int m = TMINLP2TNLP::num_constraints() - (int)quadRows_.size();
00067 TMINLP2TNLP::eval_h(num_variables(), NULL, false,
00068 0., m, NULL, false,
00069 (int)nnz_h, jCol, iRow, NULL);
00070
00071 for(size_t i = 0 ; i < nnz_h ; i++){
00072 #ifndef NDEBUG
00073 bool inserted =
00074 #endif
00075 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
00076 std::make_pair(i, -1))).second;
00077 assert(inserted == true);
00078 }
00079 delete [] jCol;
00080 delete [] iRow;
00081 }
00082 assert(nnz_h == H_.size());
00083
00084
00085 for(size_t i = 0 ; i < quadRows_.size() ; i++){
00086 quadRows_[i] = new QuadRow(*quadRows_[i]);
00087 }
00088
00089 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00090 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00091 quadRows_[i]->add_to_hessian(H_, offset);
00092 }
00093 }
00094
00095
00097 TMINLP2TNLPQuadCuts::~TMINLP2TNLPQuadCuts(){
00098 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00099 delete quadRows_[i];
00100 }
00101 }
00102
00103
00104 bool TMINLP2TNLPQuadCuts::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00105 Index& nnz_h_lag,
00106 TNLP::IndexStyleEnum& index_style){
00107 bool ret_val = TMINLP2TNLP::get_nlp_info(n,m,nnz_jac_g, nnz_h_lag, index_style);
00108 nnz_h_lag = (int)H_.size();
00109 nnz_jac_g = curr_nnz_jac_;
00110
00111 return ret_val;
00112 }
00113
00116 bool TMINLP2TNLPQuadCuts::get_bounds_info(Index n, Number* x_l, Number* x_u,
00117 Index m, Number* g_l, Number* g_u){
00118 bool ret_val = TMINLP2TNLP::get_bounds_info(n, x_l, x_u,
00119 m, g_l, g_u);
00120 return ret_val;
00121 }
00122
00123 bool
00124 TMINLP2TNLPQuadCuts::get_constraints_linearity(Index m, LinearityType* const_types)
00125 {
00126 bool ret_val = TMINLP2TNLP::get_constraints_linearity(m - (int)quadRows_.size(), const_types);
00127 const_types += m - (int)quadRows_.size();
00128 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00129 if(quadRows_[i]->isLinear())
00130 const_types[i] = TNLP::LINEAR;
00131 else
00132 const_types[i] = TNLP::NON_LINEAR;
00133 }
00134 return ret_val;
00135 }
00136
00139 bool TMINLP2TNLPQuadCuts::get_starting_point(Index n, bool init_x, Number* x,
00140 bool init_z, Number* z_L, Number* z_U,
00141 Index m, bool init_lambda,
00142 Number* lambda){
00143 return TMINLP2TNLP::get_starting_point(n, init_x, x, init_z, z_L, z_U, m, init_lambda, lambda);
00144 }
00145
00149 bool TMINLP2TNLPQuadCuts::get_scaling_parameters(Number& obj_scaling,
00150 bool& use_x_scaling, Index n,
00151 Number* x_scaling,
00152 bool& use_g_scaling, Index m,
00153 Number* g_scaling){
00154 assert(num_constraints() == m);
00155 bool retval = get_scaling_parameters(obj_scaling, use_x_scaling, n, x_scaling, use_g_scaling, m - (int)quadRows_.size(), g_scaling);
00156 if(use_g_scaling){
00157 g_scaling += m - (int)quadRows_.size();
00158 CoinFillN(g_scaling, (int)quadRows_.size(), 1.);}
00159 return retval;
00160 }
00161
00163 bool
00164 TMINLP2TNLPQuadCuts::eval_f(Index n, const Number* x, bool new_x,
00165 Number& obj_value){
00166 if(obj_.empty()){
00167 return TMINLP2TNLP::eval_f(n, x, new_x, obj_value);
00168 }
00169 if(new_x){
00170 TMINLP2TNLP::eval_f(n,x, new_x, obj_value);
00171 }
00172 obj_value = c_;
00173 assert(n == (int) obj_.size());
00174 for(int i = 0 ; i < n ; i++){
00175 obj_value += obj_[i] * x[i];
00176 }
00177 return true;
00178 }
00179
00182 bool
00183 TMINLP2TNLPQuadCuts::eval_grad_f(Index n, const Number* x, bool new_x,
00184 Number* grad_f){
00185 if(obj_.empty()){
00186 return TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
00187 if(new_x){
00188 TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
00189 assert(n == (int) obj_.size());
00190 for(int i = 0 ; i < n ; i++){
00191 grad_f[i] = obj_[i];
00192 }
00193 return true;
00194 }
00195
00196 bool TMINLP2TNLPQuadCuts::eval_gi(Index n, const Number* x, bool new_x,
00197 Index i, Number& gi)
00198 {
00199 int m_orig = num_constraints() - (int)quadRows_.size();
00200 if(i < m_orig){
00201 return TMINLP2TNLP::eval_gi(n, x, new_x, i, gi);
00202 }
00203 i -= m_orig;
00204 gi = quadRows_[i]->eval_f(x, new_x);
00205 return false;
00206 }
00207
00208
00210 bool TMINLP2TNLPQuadCuts::eval_g(Index n, const Number* x, bool new_x,
00211 Index m, Number* g){
00212 int m_tminlp = m - (int)quadRows_.size();
00213 bool retval = TMINLP2TNLP::eval_g(n, x, new_x, m_tminlp, g);
00214 g+= (m_tminlp);
00215 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00216 g[i] = quadRows_[i]->eval_f(x, new_x);
00217 }
00218 return retval;
00219 }
00220
00226 bool TMINLP2TNLPQuadCuts::eval_jac_g(Index n, const Number* x, bool new_x,
00227 Index m, Index nele_jac, Index* iRow,
00228 Index *jCol, Number* values){
00229 int n_ele_orig = TMINLP2TNLP::nnz_jac_g();
00230 int m_orig = m - (int)quadRows_.size();
00231 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00232
00233 bool retval = TMINLP2TNLP::eval_jac_g(n, x, new_x, m_orig ,
00234 n_ele_orig, iRow, jCol, values);
00235 if(values == NULL){
00236 assert(iRow != NULL);
00237 assert(jCol != NULL);
00238 iRow += n_ele_orig;
00239 jCol += n_ele_orig;
00240 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00241 const int & nnz = quadRows_[i]->nnz_grad();
00242 Ipopt::Index mi = m_orig + i + offset;
00243 CoinFillN(iRow, nnz, mi);
00244 quadRows_[i]->gradiant_struct(nnz, jCol, offset);
00245 iRow += nnz;
00246 jCol += nnz;
00247 }
00248 }
00249 else {
00250 assert(iRow == NULL);
00251 assert(jCol == NULL);
00252 values += n_ele_orig;
00253 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00254 const int & nnz = quadRows_[i]->nnz_grad();
00255 quadRows_[i]->eval_grad(nnz, x, new_x, values);
00256 values+=nnz;
00257 }
00258 }
00259 return retval;
00260 }
00261
00262 bool TMINLP2TNLPQuadCuts::eval_grad_gi(Index n, const Number* x, bool new_x,
00263 Index i, Index& nele_grad_gi, Index* jCol,
00264 Number* values)
00265 {
00266 int m_orig = num_constraints() - (int)quadRows_.size();
00267 if(i < m_orig){
00268 return TMINLP2TNLP::eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00269 }
00270 i -= m_orig;
00271 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00272 if(values == NULL){
00273 assert(jCol != NULL);
00274 nele_grad_gi = quadRows_[i]->nnz_grad();
00275 quadRows_[i]->gradiant_struct(nele_grad_gi, jCol, offset);
00276 }
00277 else{
00278 assert(jCol == NULL);
00279 quadRows_[i]->eval_grad(nele_grad_gi, x, new_x, values);
00280 }
00281 return false;
00282 }
00290 bool TMINLP2TNLPQuadCuts::eval_h(Index n, const Number* x, bool new_x,
00291 Number obj_factor, Index m, const Number* lambda,
00292 bool new_lambda, Index nele_hess,
00293 Index* iRow, Index* jCol, Number* values){
00294 if(!obj_.empty()) obj_factor = 0;
00295 if(values == NULL){
00296 assert(iRow != NULL);
00297 assert(jCol != NULL);
00298 #ifdef DEBUG
00299 std::cout<<"Hessian structure"<<std::endl;
00300 #endif
00301 int nnz = 0;
00302 int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
00303 int nnz_sup = nnz_h_lag_orig;
00304 for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
00305 if(i->second.second == -1){
00306 assert(i->second.first < nnz_h_lag_orig);
00307 }
00308 else {
00309 assert(i->second.second > 0);
00310 assert(i->second.first >= nnz_h_lag_orig);
00311 i->second.first = nnz_sup;
00312 nnz_sup++;
00313 }
00314 iRow[i->second.first] = i->first.first;
00315 jCol[i->second.first] = i->first.second;
00316 #ifdef DEBUG
00317 printf("iRow %i, jCol %i : nnz %i\n",
00318 i->first.second, i->first.first,
00319 i->second.first);
00320 #endif
00321
00322 nnz++;
00323 }
00324 assert(nnz == (int) H_.size());
00325 return true;
00326 }
00327 else {
00328 #ifdef DEBUG
00329 std::cout<<"Computing hessian"<<std::endl;
00330 #endif
00331 assert(iRow == NULL);
00332 assert(jCol == NULL);
00333 int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
00334 int m_orig = m - (int)quadRows_.size();
00335 bool ret_val = TMINLP2TNLP::eval_h(n, x, new_x, obj_factor, m_orig, lambda, new_lambda,
00336 nnz_h_lag_orig, iRow, jCol, values);
00337 CoinZeroN(values + nnz_h_lag_orig, (int)H_.size() - nnz_h_lag_orig);
00338 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00339 quadRows_[i]->eval_hessian(lambda[i + m_orig], values);
00340 }
00341 return ret_val;
00342 }
00343 }
00345
00347 void
00348 TMINLP2TNLPQuadCuts::addCuts(const Cuts & cuts, bool safe){
00349 assert(cuts.sizeColCuts() == 0);
00350 #ifdef DEBUG
00351 printf("Adding %i cuts\n", cuts.sizeRowCuts());
00352 #endif
00353 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00354
00355 g_l_.reserve(g_l_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00356 g_u_.reserve(g_u_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00357 quadRows_.reserve(quadRows_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00358
00359 int n = cuts.sizeQuadCuts();
00360 for(int i = 0 ; i < n ; i++){
00361 g_l_.push_back(cuts.quadCut(i).lb());
00362 g_u_.push_back(cuts.quadCut(i).ub());
00363 quadRows_.push_back(new QuadRow(cuts.quadCut(i)));
00364 quadRows_.back()->add_to_hessian(H_, offset);
00365 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00366 }
00367 addRowCuts((OsiCuts) cuts, safe);
00368 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00369 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00370 duals_init_ = x_init_() + x_l_.size();
00371 }
00372
00374 void TMINLP2TNLPQuadCuts::addCuts(unsigned int numcuts,
00375 const OsiRowCut ** cuts){
00376 #ifdef DEBUG
00377 printf("Adding %i cuts\n", numcuts);
00378 #endif
00379 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00380 g_l_.reserve(g_l_.size() + numcuts);
00381 g_u_.reserve(g_u_.size() + numcuts);
00382 quadRows_.reserve(quadRows_.size() + numcuts);
00383 for(unsigned int i = 0 ; i < numcuts ; i++){
00384 g_l_.push_back(cuts[i]->lb());
00385 g_u_.push_back(cuts[i]->ub());
00386
00387 const QuadCut * quadCut = dynamic_cast<const QuadCut *> (cuts[i]);
00388 if(quadCut){
00389 quadRows_.push_back(new QuadRow(*quadCut));
00390 quadRows_.back()->add_to_hessian(H_, offset);
00391 }
00392 else
00393 quadRows_.push_back(new QuadRow(*cuts[i]));
00394 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00395 }
00396 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00397 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00398 duals_init_ = x_init_() + x_l_.size();
00399 }
00401 void TMINLP2TNLPQuadCuts::addCuts(const OsiCuts& cuts){
00402 assert(cuts.sizeColCuts() == 0);
00403 #ifdef DEBUG
00404 printf("Adding %i cuts\n", cuts.sizeRowCuts());
00405 #endif
00406
00407 const Cuts * quadCuts = dynamic_cast<const Cuts *>(&cuts);
00408 if(quadCuts) {
00409 addCuts(*quadCuts, true);
00410 return;}
00411
00412 addRowCuts(cuts, true);
00413 }
00415 void TMINLP2TNLPQuadCuts::addRowCuts(const OsiCuts& cuts, bool safe){
00416
00417 int n = cuts.sizeRowCuts();
00418 g_l_.reserve(g_l_.size() + n);
00419 g_u_.reserve(g_u_.size() + n);
00420 quadRows_.reserve(quadRows_.size() + n);
00421
00422 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00423
00424 for(int i = 0 ; i < n ; i++){
00425 g_l_.push_back(cuts.rowCut(i).lb());
00426 g_u_.push_back(cuts.rowCut(i).ub());
00427 if(safe == false){
00428 assert(dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i)) == NULL);
00429 }
00430 else {
00431 const QuadCut * cut = dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i));
00432 if(cut){
00433 quadRows_.push_back(new QuadRow(*cut));
00434 quadRows_.back()->add_to_hessian(H_, offset);
00435 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00436 continue;
00437 }
00438 }
00439 quadRows_.push_back(new QuadRow(cuts.rowCut(i)));
00440 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00441 }
00442 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00443 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00444 duals_init_ = x_init_() + x_l_.size();
00445 }
00446
00448 void TMINLP2TNLPQuadCuts::removeCuts(unsigned int n,const int * idxs){
00449 if(n == 0) return;
00450 vector< int > order(quadRows_.size());
00451 int m_tminlp = num_constraints() - (int)quadRows_.size();
00452
00453 for(unsigned int k = 0; k < n ; k++){
00454 int idx = idxs[k] - m_tminlp ;
00455 quadRows_[idx]->remove_from_hessian(H_);
00456 curr_nnz_jac_ -= quadRows_[idx]->nnz_grad();
00457 delete quadRows_[idx];
00458 quadRows_[idx] = NULL;}
00459
00460 for(unsigned int i = 0 ; i < order.size() ; i++){
00461 order[i] = i;
00462 }
00463 for(unsigned int i = 0 ; i < n ; i++){
00464 assert(idxs[i] - m_tminlp >= 0);
00465 order[ idxs[i] - m_tminlp ] = INT_MAX;
00466 }
00467
00468 std::sort(order.begin(), order.end());
00469
00470
00471 int i;
00472 double * g_l = g_l_() + m_tminlp;
00473 double * g_u = g_u_() + m_tminlp;
00474 for(i = 0 ; order[i] < INT_MAX ; i++){
00475 assert(order[i] >= i);
00476 quadRows_[i] = quadRows_[order[i]];
00477 g_l[i] = g_l[order[i]];
00478 g_u[i] = g_u[order[i]];
00479 }
00480 quadRows_.erase(quadRows_.begin() + i, quadRows_.end());
00481 g_l_.erase(g_l_.begin() + m_tminlp + i, g_l_.end());
00482 g_u_.erase(g_u_.begin() + m_tminlp + i, g_u_.end());
00483 }
00484
00485 void
00486 TMINLP2TNLPQuadCuts::printH(){
00487 int nnz = 0;
00488 for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
00489 std::cout<<"nnz: "<<nnz
00490 <<"jCol: "<<i->first.first
00491 <<", iRow "<<i->first.second<<std::endl;
00492 nnz++;
00493 }
00494 }
00495
00496 void
00497 TMINLP2TNLPQuadCuts::set_linear_objective(int n_var, const double * obj, double c_0){
00498 assert(n_var == TMINLP2TNLP::num_variables());
00499 obj_.resize(n_var);
00500 CoinCopyN(obj, n_var, obj_());
00501 c_ = c_0;
00502 }
00503 }
00504