00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include "BonTMINLP2Quad.hpp"
00011 #include <climits>
00012
00013
00014 namespace Bonmin {
00015
00016 TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const SmartPtr<Bonmin::TMINLP> tminlp):
00017 TMINLP2TNLP(tminlp)
00018 {
00019
00020
00021
00022 const int nnz_h = TMINLP2TNLP::nnz_h_lag();
00023 curr_nnz_jac_ = TMINLP2TNLP::nnz_jac_g();
00024 if(nnz_h > 0){
00025 int * jCol = new int [nnz_h];
00026 int * iRow = new int [nnz_h];
00027
00028 TMINLP2TNLP::eval_h(num_variables(), NULL, false,
00029 0., TMINLP2TNLP::num_constraints(), NULL, false,
00030 nnz_h, jCol, iRow, NULL);
00031
00032 for(int i = 0 ; i < nnz_h ; i++){
00033 std::pair< AdjustableMat::iterator , bool> res =
00034 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
00035 std::make_pair(i, -1)));
00036 assert(res.second == true);
00037 }
00038 delete [] jCol;
00039 delete [] iRow;
00040 }
00041 assert(nnz_h == (int) H_.size());
00042 obj_.reserve(TMINLP2TNLP::num_variables());
00043 }
00044
00045
00049 TMINLP2TNLPQuadCuts::TMINLP2TNLPQuadCuts(const TMINLP2TNLPQuadCuts &other):
00050 TMINLP2TNLP(other),
00051 quadRows_(other.quadRows_),
00052 H_(),
00053 curr_nnz_jac_(other.curr_nnz_jac_),
00054 obj_(other.obj_)
00055 {
00056
00057 const int nnz_h = TMINLP2TNLP::nnz_h_lag();
00058
00059 if(nnz_h > 0){
00060 int * jCol = new int [nnz_h];
00061 int * iRow = new int [nnz_h];
00062 int m = TMINLP2TNLP::num_constraints() - quadRows_.size();
00063 TMINLP2TNLP::eval_h(num_variables(), NULL, false,
00064 0., m, NULL, false,
00065 nnz_h, jCol, iRow, NULL);
00066
00067 for(int i = 0 ; i < nnz_h ; i++){
00068 std::pair< AdjustableMat::iterator , bool> res =
00069 H_.insert(std::make_pair( std::make_pair(jCol[i], iRow[i]),
00070 std::make_pair(i, -1)));
00071 assert(res.second == true);
00072 }
00073 delete [] jCol;
00074 delete [] iRow;
00075 }
00076 assert(nnz_h == (int) H_.size());
00077
00078
00079 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00080 quadRows_[i] = new QuadRow(*quadRows_[i]);
00081 }
00082
00083 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00084 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00085 quadRows_[i]->add_to_hessian(H_, offset);
00086 }
00087 }
00088
00089
00091 TMINLP2TNLPQuadCuts::~TMINLP2TNLPQuadCuts(){
00092 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00093 delete quadRows_[i];
00094 }
00095 }
00096
00097
00098 bool TMINLP2TNLPQuadCuts::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00099 Index& nnz_h_lag,
00100 TNLP::IndexStyleEnum& index_style){
00101 bool ret_val = TMINLP2TNLP::get_nlp_info(n,m,nnz_jac_g, nnz_h_lag, index_style);
00102 nnz_h_lag = H_.size();
00103 nnz_jac_g = curr_nnz_jac_;
00104 return ret_val;
00105 }
00106
00109 bool TMINLP2TNLPQuadCuts::get_bounds_info(Index n, Number* x_l, Number* x_u,
00110 Index m, Number* g_l, Number* g_u){
00111 bool ret_val = TMINLP2TNLP::get_bounds_info(n, x_l, x_u,
00112 m, g_l, g_u);
00113 return ret_val;
00114 }
00115
00116 bool
00117 TMINLP2TNLPQuadCuts::get_constraints_linearity(Index m, LinearityType* const_types)
00118 {
00119 bool ret_val = TMINLP2TNLP::get_constraints_linearity(m - quadRows_.size(), const_types);
00120 const_types += m - quadRows_.size();
00121 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00122 if(quadRows_[i]->isLinear())
00123 const_types[i] = TNLP::LINEAR;
00124 else
00125 const_types[i] = TNLP::NON_LINEAR;
00126 }
00127 return ret_val;
00128 }
00129
00132 bool TMINLP2TNLPQuadCuts::get_starting_point(Index n, bool init_x, Number* x,
00133 bool init_z, Number* z_L, Number* z_U,
00134 Index m, bool init_lambda,
00135 Number* lambda){
00136 return TMINLP2TNLP::get_starting_point(n, init_x, x, init_z, z_L, z_U, m, init_lambda, lambda);
00137 }
00138
00142 bool TMINLP2TNLPQuadCuts::get_scaling_parameters(Number& obj_scaling,
00143 bool& use_x_scaling, Index n,
00144 Number* x_scaling,
00145 bool& use_g_scaling, Index m,
00146 Number* g_scaling){
00147 assert(num_constraints() == m);
00148 bool retval = get_scaling_parameters(obj_scaling, use_x_scaling, n, x_scaling, use_g_scaling, m - quadRows_.size(), g_scaling);
00149 if(use_g_scaling){
00150 g_scaling += m - quadRows_.size();
00151 CoinFillN(g_scaling, quadRows_.size(), 1.);}
00152 return retval;
00153 }
00154
00156 bool
00157 TMINLP2TNLPQuadCuts::eval_f(Index n, const Number* x, bool new_x,
00158 Number& obj_value){
00159 if(obj_.empty()){
00160 return TMINLP2TNLP::eval_f(n, x, new_x, obj_value);
00161 }
00162 if(new_x){
00163 TMINLP2TNLP::eval_f(n,x, new_x, obj_value);
00164 }
00165 obj_value = c_;
00166 assert(n == (int) obj_.size());
00167 for(int i = 0 ; i < n ; i++){
00168 obj_value += obj_[i] * x[i];
00169 }
00170 return true;
00171 }
00172
00175 bool
00176 TMINLP2TNLPQuadCuts::eval_grad_f(Index n, const Number* x, bool new_x,
00177 Number* grad_f){
00178 if(obj_.empty()){
00179 return TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
00180 if(new_x){
00181 TMINLP2TNLP::eval_grad_f(n, x, new_x, grad_f);}
00182 assert(n == (int) obj_.size());
00183 for(int i = 0 ; i < n ; i++){
00184 grad_f[i] = obj_[i];
00185 }
00186 return true;
00187 }
00188
00189 bool TMINLP2TNLPQuadCuts::eval_gi(Index n, const Number* x, bool new_x,
00190 Index i, Number& gi)
00191 {
00192 int m_orig = num_constraints() - quadRows_.size();
00193 if(i < m_orig){
00194 return TMINLP2TNLP::eval_gi(n, x, new_x, i, gi);
00195 }
00196 i -= m_orig;
00197 gi = quadRows_[i]->eval_f(x, new_x);
00198 return false;
00199 }
00200
00201
00203 bool TMINLP2TNLPQuadCuts::eval_g(Index n, const Number* x, bool new_x,
00204 Index m, Number* g){
00205 int m_tminlp = m - quadRows_.size();
00206 bool retval = TMINLP2TNLP::eval_g(n, x, new_x, m_tminlp, g);
00207 g+= (m_tminlp);
00208 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00209 g[i] = quadRows_[i]->eval_f(x, new_x);
00210 }
00211 return retval;
00212 }
00213
00219 bool TMINLP2TNLPQuadCuts::eval_jac_g(Index n, const Number* x, bool new_x,
00220 Index m, Index nele_jac, Index* iRow,
00221 Index *jCol, Number* values){
00222 int n_ele_orig = TMINLP2TNLP::nnz_jac_g();
00223 int m_orig = m - quadRows_.size();
00224 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00225
00226 bool retval = TMINLP2TNLP::eval_jac_g(n, x, new_x, m_orig ,
00227 n_ele_orig, iRow, jCol, values);
00228 if(values == NULL){
00229 assert(iRow != NULL);
00230 assert(jCol != NULL);
00231 iRow += n_ele_orig;
00232 jCol += n_ele_orig;
00233 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00234 const int & nnz = quadRows_[i]->nnz_grad();
00235 Ipopt::Index mi = m_orig + i + offset;
00236 CoinFillN(iRow, nnz, mi);
00237 quadRows_[i]->gradiant_struct(nnz, jCol, offset);
00238 iRow += nnz;
00239 jCol += nnz;
00240 }
00241 }
00242 else {
00243 assert(iRow == NULL);
00244 assert(jCol == NULL);
00245 values += n_ele_orig;
00246 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00247 const int & nnz = quadRows_[i]->nnz_grad();
00248 quadRows_[i]->eval_grad(nnz, x, new_x, values);
00249 values+=nnz;
00250 }
00251 }
00252 return retval;
00253 }
00254
00255 bool TMINLP2TNLPQuadCuts::eval_grad_gi(Index n, const Number* x, bool new_x,
00256 Index i, Index& nele_grad_gi, Index* jCol,
00257 Number* values)
00258 {
00259 int m_orig = num_constraints() - quadRows_.size();
00260 if(i < m_orig){
00261 return TMINLP2TNLP::eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00262 }
00263 i -= m_orig;
00264 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00265 if(values == NULL){
00266 assert(jCol != NULL);
00267 nele_grad_gi = quadRows_[i]->nnz_grad();
00268 quadRows_[i]->gradiant_struct(nele_grad_gi, jCol, offset);
00269 }
00270 else{
00271 assert(jCol == NULL);
00272 quadRows_[i]->eval_grad(nele_grad_gi, x, new_x, values);
00273 }
00274 return false;
00275 }
00283 bool TMINLP2TNLPQuadCuts::eval_h(Index n, const Number* x, bool new_x,
00284 Number obj_factor, Index m, const Number* lambda,
00285 bool new_lambda, Index nele_hess,
00286 Index* iRow, Index* jCol, Number* values){
00287 if(!obj_.empty()) obj_factor = 0;
00288 if(values == NULL){
00289 assert(iRow != NULL);
00290 assert(jCol != NULL);
00291 #ifdef DEBUG
00292 std::cout<<"Hessian structure"<<std::endl;
00293 #endif
00294 int nnz = 0;
00295 int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
00296 int nnz_sup = nnz_h_lag_orig;
00297 for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
00298 if(i->second.second == -1){
00299 assert(i->second.first < nnz_h_lag_orig);
00300 }
00301 else {
00302 assert(i->second.second > 0);
00303 assert(i->second.first >= nnz_h_lag_orig);
00304 i->second.first = nnz_sup;
00305 nnz_sup++;
00306 }
00307 iRow[i->second.first] = i->first.first;
00308 jCol[i->second.first] = i->first.second;
00309 #ifdef DEBUG
00310 printf("iRow %i, jCol %i : nnz %i\n",
00311 i->first.second, i->first.first,
00312 i->second.first);
00313 #endif
00314
00315 nnz++;
00316 }
00317 assert(nnz == (int) H_.size());
00318 return true;
00319 }
00320 else {
00321 #ifdef DEBUG
00322 std::cout<<"Computing hessian"<<std::endl;
00323 #endif
00324 assert(iRow == NULL);
00325 assert(jCol == NULL);
00326 int nnz_h_lag_orig = TMINLP2TNLP::nnz_h_lag();
00327 int m_orig = m - quadRows_.size();
00328 bool ret_val = TMINLP2TNLP::eval_h(n, x, new_x, obj_factor, m_orig, lambda, new_lambda,
00329 nnz_h_lag_orig, iRow, jCol, values);
00330 CoinZeroN(values + nnz_h_lag_orig, H_.size() - nnz_h_lag_orig);
00331 for(unsigned int i = 0 ; i < quadRows_.size() ; i++){
00332 quadRows_[i]->eval_hessian(lambda[i + m_orig], values);
00333 }
00334 return ret_val;
00335 }
00336 }
00338
00340 void
00341 TMINLP2TNLPQuadCuts::addCuts(const Cuts & cuts, bool safe){
00342 assert(cuts.sizeColCuts() == 0);
00343 #ifdef DEBUG
00344 printf("Adding %i cuts\n", cuts.sizeRowCuts());
00345 #endif
00346 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00347
00348 g_l_.reserve(g_l_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00349 g_u_.reserve(g_u_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00350 quadRows_.reserve(quadRows_.size() + cuts.sizeQuadCuts() + cuts.sizeRowCuts());
00351
00352 int n = cuts.sizeQuadCuts();
00353 for(int i = 0 ; i < n ; i++){
00354 g_l_.push_back(cuts.quadCut(i).lb());
00355 g_u_.push_back(cuts.quadCut(i).ub());
00356 quadRows_.push_back(new QuadRow(cuts.quadCut(i)));
00357 quadRows_.back()->add_to_hessian(H_, offset);
00358 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00359 }
00360 addRowCuts((OsiCuts) cuts, safe);
00361 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00362 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00363 duals_init_ = x_init_() + x_l_.size();
00364 }
00365
00367 void TMINLP2TNLPQuadCuts::addCuts(unsigned int numcuts,
00368 const OsiRowCut ** cuts){
00369 #ifdef DEBUG
00370 printf("Adding %i cuts\n", numcuts);
00371 #endif
00372 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00373 g_l_.reserve(g_l_.size() + numcuts);
00374 g_u_.reserve(g_u_.size() + numcuts);
00375 quadRows_.reserve(quadRows_.size() + numcuts);
00376 for(unsigned int i = 0 ; i < numcuts ; i++){
00377 g_l_.push_back(cuts[i]->lb());
00378 g_u_.push_back(cuts[i]->ub());
00379
00380 const QuadCut * quadCut = dynamic_cast<const QuadCut *> (cuts[i]);
00381 if(quadCut){
00382 quadRows_.push_back(new QuadRow(*quadCut));
00383 quadRows_.back()->add_to_hessian(H_, offset);
00384 }
00385 else
00386 quadRows_.push_back(new QuadRow(*cuts[i]));
00387 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00388 }
00389 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00390 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00391 duals_init_ = x_init_() + x_l_.size();
00392 }
00394 void TMINLP2TNLPQuadCuts::addCuts(const OsiCuts& cuts){
00395 assert(cuts.sizeColCuts() == 0);
00396 #ifdef DEBUG
00397 printf("Adding %i cuts\n", cuts.sizeRowCuts());
00398 #endif
00399
00400 const Cuts * quadCuts = dynamic_cast<const Cuts *>(&cuts);
00401 if(quadCuts) {
00402 addCuts(*quadCuts, true);
00403 return;}
00404
00405 addRowCuts(cuts, true);
00406 }
00408 void TMINLP2TNLPQuadCuts::addRowCuts(const OsiCuts& cuts, bool safe){
00409
00410 int n = cuts.sizeRowCuts();
00411 g_l_.reserve(g_l_.size() + n);
00412 g_u_.reserve(g_u_.size() + n);
00413 quadRows_.reserve(quadRows_.size() + n);
00414
00415 int offset = TMINLP2TNLP::index_style() == Ipopt::TNLP::FORTRAN_STYLE;
00416
00417 for(int i = 0 ; i < n ; i++){
00418 g_l_.push_back(cuts.rowCut(i).lb());
00419 g_u_.push_back(cuts.rowCut(i).ub());
00420 if(safe == false){
00421 assert(dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i)) == NULL);
00422 }
00423 else {
00424 const QuadCut * cut = dynamic_cast<const QuadCut *> (cuts.rowCutPtr(i));
00425 if(cut){
00426 quadRows_.push_back(new QuadRow(*cut));
00427 quadRows_.back()->add_to_hessian(H_, offset);
00428 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00429 continue;
00430 }
00431 }
00432 quadRows_.push_back(new QuadRow(cuts.rowCut(i)));
00433 curr_nnz_jac_ += quadRows_.back()->nnz_grad();
00434 }
00435 duals_sol_.resize(g_l_.size() + 2*x_l_.size(), 0.);
00436 x_init_.resize(g_l_.size() + 3*x_l_.size(), 0.);
00437 duals_init_ = x_init_() + x_l_.size();
00438 }
00439
00441 void TMINLP2TNLPQuadCuts::removeCuts(unsigned int n,const int * idxs){
00442 if(n == 0) return;
00443 vector< int > order(quadRows_.size());
00444 int m_tminlp = num_constraints() - quadRows_.size();
00445
00446 for(unsigned int k = 0; k < n ; k++){
00447 int idx = idxs[k] - m_tminlp ;
00448 quadRows_[idx]->remove_from_hessian(H_);
00449 curr_nnz_jac_ -= quadRows_[idx]->nnz_grad();
00450 delete quadRows_[idx];
00451 quadRows_[idx] = NULL;}
00452
00453 for(unsigned int i = 0 ; i < order.size() ; i++){
00454 order[i] = i;
00455 }
00456 for(unsigned int i = 0 ; i < n ; i++){
00457 assert(idxs[i] - m_tminlp >= 0);
00458 order[ idxs[i] - m_tminlp ] = INT_MAX;
00459 }
00460
00461 std::sort(order.begin(), order.end());
00462
00463
00464 int i;
00465 double * g_l = g_l_() + m_tminlp;
00466 double * g_u = g_u_() + m_tminlp;
00467 for(i = 0 ; order[i] < INT_MAX ; i++){
00468 assert(order[i] >= i);
00469 quadRows_[i] = quadRows_[order[i]];
00470 g_l[i] = g_l[order[i]];
00471 g_u[i] = g_u[order[i]];
00472 }
00473 quadRows_.erase(quadRows_.begin() + i, quadRows_.end());
00474 g_l_.erase(g_l_.begin() + m_tminlp + i, g_l_.end());
00475 g_u_.erase(g_u_.begin() + m_tminlp + i, g_u_.end());
00476 }
00477
00478 void
00479 TMINLP2TNLPQuadCuts::printH(){
00480 int nnz = 0;
00481 for(AdjustableMat::iterator i = H_.begin() ; i != H_.end() ; i++){
00482 std::cout<<"nnz: "<<nnz
00483 <<"jCol: "<<i->first.first
00484 <<", iRow "<<i->first.second<<std::endl;
00485 nnz++;
00486 }
00487 }
00488
00489 void
00490 TMINLP2TNLPQuadCuts::set_linear_objective(int n_var, const double * obj, double c_0){
00491 assert(n_var == TMINLP2TNLP::num_variables());
00492 obj_.resize(n_var);
00493 CoinCopyN(obj, n_var, obj_());
00494 c_ = c_0;
00495 }
00496 }
00497