00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include "BonTMINLP2TNLP.hpp"
00014 #include "IpBlas.hpp"
00015 #include "IpAlgTypes.hpp"
00016 #include <climits>
00017 #include <string>
00018 #include <fstream>
00019 #include <sstream>
00020 #include "Ipopt/BonIpoptInteriorWarmStarter.hpp"
00021
00022 extern bool BonminAbortAll;
00023
00024 namespace Bonmin
00025 {
00026
00027 TMINLP2TNLP::TMINLP2TNLP(const SmartPtr<TMINLP> tminlp
00028 #ifdef WARM_STARTER
00029 ,
00030 const OptionsList& options
00031 #endif
00032 )
00033 :
00034 var_types_(),
00035 x_l_(),
00036 x_u_(),
00037 orig_x_l_(),
00038 orig_x_u_(),
00039 g_l_(),
00040 g_u_(),
00041 x_init_(),
00042 duals_init_(NULL),
00043 x_init_user_(),
00044 x_sol_(),
00045 g_sol_(),
00046 duals_sol_(),
00047 tminlp_(tminlp),
00048 nnz_jac_g_(0),
00049 nnz_h_lag_(0),
00050 index_style_(TNLP::FORTRAN_STYLE),
00051 obj_value_(1e100),
00052 curr_warm_starter_(),
00053 nlp_lower_bound_inf_(-DBL_MAX),
00054 nlp_upper_bound_inf_(DBL_MAX),
00055 warm_start_entire_iterate_(true),
00056 need_new_warm_starter_(true)
00057 {
00058
00059
00060
00061 assert(IsValid(tminlp_));
00062 Index n,m;
00063 bool retval =
00064 tminlp_->get_nlp_info(n, m, nnz_jac_g_, nnz_h_lag_, index_style_);
00065
00066 ASSERT_EXCEPTION(retval, TMINLP_INVALID,
00067 "get_nlp_info of TMINLP returns false.");
00068
00069
00070 var_types_.resize(n);
00071
00072
00073 tminlp_->get_variables_types(n, var_types_());
00074
00075
00076 x_l_.resize(n);
00077 x_u_.resize(n);
00078 orig_x_l_.resize(n);
00079 orig_x_u_.resize(n);
00080
00081 g_l_.resize(m);
00082 g_u_.resize(m);
00083
00084
00085 tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, g_l_(), g_u_());
00086 IpBlasDcopy(n, x_l_(), 1, orig_x_l_(), 1);
00087 IpBlasDcopy(n, x_u_(), 1, orig_x_u_(), 1);
00088
00089
00090
00091 x_init_.reserve(3*n+2*m);
00092 x_init_.resize(3*n + m);
00093 tminlp_->get_starting_point(n, true, x_init_(), false, NULL, NULL,
00094 m, false, NULL);
00095 CoinZeroN(x_init_() + n , 2*n + m);
00096 x_init_user_.resize(n);
00097 IpBlasDcopy(n, x_init_(), 1, x_init_user_(), 1);
00098 duals_init_ = NULL;
00099
00100 #ifdef WARM_STARTER
00101
00102 options.GetNumericValue("nlp_lower_bound_inf", nlp_lower_bound_inf_, "");
00103 options.GetNumericValue("nlp_upper_bound_inf", nlp_upper_bound_inf_, "");
00104 options.GetBoolValue("warm_start_entire_iterate",
00105 warm_start_entire_iterate_, "");
00106 #endif
00107 }
00108
00109 TMINLP2TNLP::TMINLP2TNLP(const TMINLP2TNLP& other)
00110 :
00111 var_types_(),
00112 x_l_(),
00113 x_u_(),
00114 orig_x_l_(),
00115 orig_x_u_(),
00116 g_l_(),
00117 g_u_(),
00118 x_init_(),
00119 duals_init_(NULL),
00120 x_init_user_(),
00121 x_sol_(),
00122 g_sol_(),
00123 duals_sol_(),
00124 tminlp_(other.tminlp_),
00125 nnz_jac_g_(other.nnz_jac_g_),
00126 nnz_h_lag_(other.nnz_h_lag_),
00127 index_style_(other.index_style_),
00128 return_status_(other.return_status_),
00129 obj_value_(other.obj_value_),
00130 curr_warm_starter_(other.curr_warm_starter_),
00131 nlp_lower_bound_inf_(other.nlp_lower_bound_inf_),
00132 nlp_upper_bound_inf_(other.nlp_upper_bound_inf_),
00133 warm_start_entire_iterate_(other.warm_start_entire_iterate_),
00134 need_new_warm_starter_(other.need_new_warm_starter_)
00135 {
00136 gutsOfCopy(other);
00137 }
00138
00140 TMINLP2TNLP &
00141 TMINLP2TNLP::operator=(const TMINLP2TNLP& rhs){
00142 if(this != &rhs){
00143 tminlp_ = rhs.tminlp_;
00144 nnz_jac_g_ = rhs.nnz_jac_g_;
00145 nnz_h_lag_ = rhs.nnz_h_lag_;
00146 index_style_ = rhs.index_style_;
00147 return_status_ = rhs.return_status_;
00148 obj_value_ = rhs.obj_value_;
00149 curr_warm_starter_ = rhs.curr_warm_starter_;
00150 nlp_lower_bound_inf_ = rhs.nlp_lower_bound_inf_;
00151 nlp_upper_bound_inf_ = rhs.nlp_upper_bound_inf_;
00152 warm_start_entire_iterate_ = rhs.warm_start_entire_iterate_;
00153 need_new_warm_starter_ = rhs.need_new_warm_starter_;
00154
00155 gutsOfDelete();
00156 gutsOfCopy(rhs);
00157
00158 }
00159 return (*this);
00160 }
00161
00162 TMINLP2TNLP::~TMINLP2TNLP()
00163 {
00164 gutsOfDelete();
00165 }
00166
00167 void
00168 TMINLP2TNLP::gutsOfDelete(){
00169 }
00170
00176 void
00177 TMINLP2TNLP::gutsOfCopy(const TMINLP2TNLP& other)
00178 {
00179 Index n = other.num_variables();
00180 Index m = other.num_constraints();
00181
00182 if(n > 0){
00183 var_types_ = other.var_types_;
00184
00185 x_l_.resize(n);
00186 x_u_.resize(n);
00187 IpBlasDcopy(n, other.x_l_(), 1, x_l_(), 1);
00188 IpBlasDcopy(n, other.x_u_(), 1, x_u_(), 1);
00189
00190 orig_x_l_.resize(n);
00191 orig_x_u_.resize(n);
00192 IpBlasDcopy(n, other.orig_x_l_(), 1, orig_x_l_(), 1);
00193 IpBlasDcopy(n, other.orig_x_u_(), 1, orig_x_u_(), 1);
00194 x_init_user_.resize(n);
00195 IpBlasDcopy(n, other.x_init_user_(), 1, x_init_user_(), 1);
00196 if(!other.x_sol_.empty()) {
00197 Set_x_sol(n,other.x_sol_());
00198 }
00199 }
00200
00201 if(!other.g_l_.empty()){
00202 const int& size = other.g_l_.size();
00203 g_l_.resize(size);
00204 g_u_.resize(size);
00205 }
00206
00207 if(m > 0){
00208 IpBlasDcopy(m, other.g_l_(), 1, g_l_(), 1);
00209 IpBlasDcopy(m, other.g_u_(), 1, g_u_(), 1);
00210 if(!other.g_sol_.empty()) {
00211 g_sol_.resize(m);
00212 IpBlasDcopy(m, other.g_sol_(), 1, g_sol_(), 1);
00213 }
00214 }
00215
00216
00217 x_init_ = other.x_init_;
00218
00219 if(other.duals_init_) {
00220 duals_init_ = x_init_() + n;
00221 }
00222 else
00223 duals_init_ = NULL;
00224
00225
00226 if(!other.duals_sol_.empty()) {
00227 duals_sol_.resize(m + 2*n);
00228 IpBlasDcopy(duals_sol_.size(), other.duals_sol_(), 1, duals_sol_(), 1);
00229 }
00230
00231 }
00232
00233 void TMINLP2TNLP::SetVariablesBounds(Index n,
00234 const Number * x_l,
00235 const Number * x_u)
00236 {
00237 assert(n==num_variables());
00238 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00239 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00240 }
00241
00242 void TMINLP2TNLP::SetVariablesLowerBounds(Index n,
00243 const Number * x_l)
00244 {
00245 assert(n==num_variables());
00246 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00247 }
00248
00249 void TMINLP2TNLP::SetVariablesUpperBounds(Index n,
00250 const Number * x_u)
00251 {
00252 assert(n==num_variables());
00253 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00254 }
00255
00256 void TMINLP2TNLP::SetVariableBounds(Index var_no, Number x_l, Number x_u)
00257 {
00258 assert(var_no >= 0 && var_no < num_variables());
00259 x_l_[var_no] = x_l;
00260 x_u_[var_no] = x_u;
00261 }
00262
00263 void TMINLP2TNLP::SetVariableLowerBound(Index var_no, Number x_l)
00264 {
00265 assert(var_no >= 0 && var_no < num_variables());
00266 x_l_[var_no] = x_l;
00267 }
00268
00269 void TMINLP2TNLP::SetVariableUpperBound(Index var_no, Number x_u)
00270 {
00271 assert(var_no >= 0 && var_no < num_variables());
00272 x_u_[var_no] = x_u;
00273 }
00274
00275 void TMINLP2TNLP::SetStartingPoint(Index n,const Number* x_init)
00276 {
00277 assert(n == num_variables());
00278 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00279 }
00280
00281 void TMINLP2TNLP::resetStartingPoint()
00282 {
00283 curr_warm_starter_ = NULL;
00284 IpBlasDcopy(x_init_user_.size(), x_init_user_(), 1, x_init_(), 1);
00285 }
00286
00287 void TMINLP2TNLP::setxInit(Index ind, const Number val)
00288 {
00289 x_init_[ind] = val;
00290 }
00291
00292 void TMINLP2TNLP::setxInit(Index n,const Number* x_init)
00293 {
00294 assert(n == num_variables());
00295 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00296 }
00297
00298 void TMINLP2TNLP::setDualInit(Index ind, const Number val)
00299 {
00300 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00301 if(!duals_init_)
00302 duals_init_ = &x_init_[num_variables()];
00303 duals_init_[ind] = val;
00304 }
00305
00306 void TMINLP2TNLP::setDualsInit(Index m, const Number* duals_init)
00307 {
00308 assert(m == num_variables() * 2 + num_constraints() );
00309 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00310 if(!duals_init_)
00311 duals_init_ = x_init_() + num_variables();
00312 if(m > 0)
00313 IpBlasDcopy(m, duals_init, 1, duals_init_, 1);
00314
00315 }
00316
00318 void TMINLP2TNLP::Set_x_sol(Index n, const Number* x_sol)
00319 {
00320 assert(n == num_variables());
00321 if (x_sol_.empty()) {
00322 x_sol_.resize(n);
00323 }
00324 assert(n == (int) x_sol_.size());
00325 IpBlasDcopy(n, x_sol, 1, x_sol_(), 1);
00326 }
00327
00329 void TMINLP2TNLP::SetVariableType(Index n, TMINLP::VariableType type)
00330 {
00331 assert(n >= 0 && n < num_variables());
00332 var_types_[n] = type;
00333 }
00334
00335 bool TMINLP2TNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00336 Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
00337 {
00338 n = num_variables();
00339 m = num_constraints();
00340 nnz_jac_g = nnz_jac_g_;
00341 nnz_h_lag = nnz_h_lag_;
00342 index_style = index_style_;
00343 return true;
00344 }
00345
00346 bool TMINLP2TNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00347 Index m, Number* g_l, Number* g_u)
00348 {
00349 assert(n==num_variables());
00350 assert(m==num_constraints());
00351 IpBlasDcopy(n, x_l_(), 1, x_l, 1);
00352 IpBlasDcopy(n, x_u_(), 1, x_u, 1);
00353 if(m > 0){
00354 IpBlasDcopy(m, g_l_(), 1, g_l, 1);
00355 IpBlasDcopy(m, g_u_(), 1, g_u, 1);
00356 }
00357 return true;
00358 }
00359
00360 bool TMINLP2TNLP::get_starting_point(Index n, bool init_x, Number* x,
00361 bool init_z, Number* z_L, Number* z_U,
00362 Index m, bool init_lambda,
00363 Number* lambda)
00364 {
00365 assert(m==num_constraints());
00366 assert(n==num_variables());
00367 x_init_.resize(3*n + m, 0.);
00368 duals_init_ = x_init_() + n;
00369 if (init_x == true) {
00370 if(x_init_.empty())
00371 return false;
00372 IpBlasDcopy(n, x_init_(), 1, x, 1);
00373 }
00374 if (init_z == true) {
00375 if(duals_init_ == NULL)
00376 return false;
00377 IpBlasDcopy(n, duals_init_, 1, z_L, 1);
00378 IpBlasDcopy(n, duals_init_ + n, 1, z_U, 1);
00379
00380 }
00381 if(init_lambda == true) {
00382 if(duals_init_ == NULL)
00383 return false;
00384 if(m > 0){
00385 IpBlasDcopy(m, duals_init_ + 2*n , 1, lambda, 1);
00386 }
00387 }
00388
00389 need_new_warm_starter_ = true;
00390 return true;
00391 }
00392
00393 bool TMINLP2TNLP::get_warm_start_iterate(IteratesVector& warm_start_iterate)
00394 {
00395 if (IsNull(curr_warm_starter_)) {
00396 return false;
00397 }
00398
00399 bool retval = curr_warm_starter_->WarmStartIterate(num_variables(), x_l_(), x_u_(),
00400 warm_start_iterate);
00401
00402 need_new_warm_starter_ = true;
00403 return retval;
00404 }
00405
00406 bool TMINLP2TNLP::eval_f(Index n, const Number* x, bool new_x,
00407 Number& obj_value)
00408 {
00409 return tminlp_->eval_f(n, x, new_x, obj_value);
00410 }
00411
00412 bool TMINLP2TNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00413 Number* grad_f)
00414 {
00415 grad_f[n-1] = 0;
00416 return tminlp_->eval_grad_f(n, x, new_x, grad_f);
00417 }
00418
00419 bool TMINLP2TNLP::eval_g(Index n, const Number* x, bool new_x,
00420 Index m, Number* g)
00421 {
00422 int return_code = tminlp_->eval_g(n, x, new_x, m, g);
00423 return return_code;
00424 }
00425
00426 bool TMINLP2TNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00427 Index m, Index nele_jac, Index* iRow,
00428 Index *jCol, Number* values)
00429 {
00430 bool return_code =
00431 tminlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00432 iRow, jCol, values);
00433 return return_code;
00434 }
00435
00436 bool TMINLP2TNLP::eval_h(Index n, const Number* x, bool new_x,
00437 Number obj_factor, Index m, const Number* lambda,
00438 bool new_lambda, Index nele_hess,
00439 Index* iRow, Index* jCol, Number* values)
00440 {
00441 return tminlp_->eval_h(n, x, new_x, obj_factor, m, lambda,
00442 new_lambda, nele_hess,
00443 iRow, jCol, values);
00444 }
00445
00446
00447 bool TMINLP2TNLP::eval_gi(Index n, const Number* x, bool new_x,
00448 Index i, Number& gi)
00449 {
00450 return tminlp_->eval_gi(n, x, new_x, i, gi);
00451 }
00452
00453 bool TMINLP2TNLP::eval_grad_gi(Index n, const Number* x, bool new_x,
00454 Index i, Index& nele_grad_gi, Index* jCol,
00455 Number* values)
00456 {
00457 return tminlp_->eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00458 }
00459
00460 void TMINLP2TNLP::finalize_solution(SolverReturn status,
00461 Index n, const Number* x, const Number* z_L, const Number* z_U,
00462 Index m, const Number* g, const Number* lambda,
00463 Number obj_value,
00464 const IpoptData* ip_data,
00465 IpoptCalculatedQuantities* ip_cq)
00466 {
00467 assert(n == (Index) num_variables());
00468 assert(m == (Index) num_constraints());
00469 x_sol_.resize(n);
00470
00471 IpBlasDcopy(n, x, 1, x_sol_(), 1);
00472
00473 if(m > 0){
00474 g_sol_.resize(m);
00475 IpBlasDcopy(m, g, 1, g_sol_(), 1);
00476 }
00477 duals_sol_.resize(m + 2*n);
00478 if(lambda){
00479 if(m > 0)
00480 IpBlasDcopy(m, lambda, 1, duals_sol_() + 2*n, 1);
00481
00482 IpBlasDcopy(n, z_L, 1 , duals_sol_() , 1);
00483 IpBlasDcopy(n, z_U, 1 , duals_sol_() + n, 1);
00484 }
00485
00486 return_status_ = status;
00487 obj_value_ = obj_value;
00488
00489 if (IsValid(curr_warm_starter_)) {
00490 curr_warm_starter_->Finalize();
00491 }
00492 }
00493
00494
00495 bool TMINLP2TNLP::intermediate_callback(AlgorithmMode mode,
00496 Index iter, Number obj_value,
00497 Number inf_pr, Number inf_du,
00498 Number mu, Number d_norm,
00499 Number regularization_size,
00500 Number alpha_du, Number alpha_pr,
00501 Index ls_trials,
00502 const IpoptData* ip_data,
00503 IpoptCalculatedQuantities* ip_cq)
00504 {
00505 if (BonminAbortAll) return false;
00506 #if WARM_STARTER
00507
00508
00509
00510 if (!warm_start_entire_iterate_) {
00511 return true;
00512 }
00513 if (need_new_warm_starter_) {
00514
00515 curr_warm_starter_ = new IpoptInteriorWarmStarter(n_, x_l_, x_u_,
00516 nlp_lower_bound_inf_,
00517 nlp_upper_bound_inf_,
00518 warm_start_entire_iterate_);
00519 need_new_warm_starter_ = false;
00520 }
00521
00522 return curr_warm_starter_->UpdateStoredIterates(mode, *ip_data, *ip_cq);
00523 #else
00524 return true;
00525 #endif
00526 }
00527
00528
00534 void
00535 TMINLP2TNLP::outputDiffs(const std::string& probName, const std::string * varNames)
00536 {
00537 const int &numcols = num_variables();
00538 const int &numrows = num_constraints();
00539
00540 const double * currentLower = x_l();
00541 const double * currentUpper = x_u();
00542
00543 const double * originalLower = orig_x_l();
00544 const double * originalUpper = orig_x_u();
00545 CoinRelFltEq eq;
00546 std::string fBoundsName = probName;
00547 std::ostringstream os;
00548 fBoundsName+=".bounds";
00549 std::string fModName = probName;
00550 fModName+= ".mod";
00551 std::ofstream fBounds;
00552 std::ofstream fMod;
00553 bool hasVarNames = 0;
00554
00555 if(varNames!=NULL )
00556 hasVarNames=1;
00557 if(hasVarNames)
00558 fMod.open(fModName.c_str());
00559 fBounds.open(fBoundsName.c_str());
00560
00561 for(int i = 0 ; i < numcols ; i++) {
00562 if(!eq(currentLower[i],originalLower[i])) {
00563 if(hasVarNames)
00564 fMod<<"bounds"<<i<<": "
00565 <<varNames[i]<<" >= "
00566 <<currentLower[i]<<";\n";
00567
00568
00569 fBounds<<"LO"<<"\t"<<i<<"\t"<<currentLower[i]<<std::endl;
00570 }
00571 if(!eq(currentUpper[i],originalUpper[i])) {
00572 if(hasVarNames)
00573 fMod<<"bounds"<<i<<": "
00574 <<varNames[i]<<" <= "
00575 <<currentUpper[i]<<";\n";
00576
00577 fBounds<<"UP"<<"\t"<<i<<"\t"<<currentUpper[i]<<std::endl;
00578 }
00579 }
00580
00581
00582 std::string fStartPointName=probName;
00583 fStartPointName+=".start";
00584
00585 std::ofstream fStartPoint(fStartPointName.c_str());
00586 const double * primals = x_init();
00587 const double * duals = duals_init();
00588 fStartPoint.precision(17);
00589 fStartPoint<<numcols<<"\t"<<2*numcols+numrows<<std::endl;
00590 for(int i = 0 ; i < numcols ; i++)
00591 fStartPoint<<primals[i]<<std::endl;
00592 int end = 2*numcols + numrows;
00593 if(duals) {
00594 for(int i = 0 ; i < end; i++)
00595 fStartPoint<<duals[i]<<std::endl;
00596 }
00597
00598 }
00599
00601 void
00602 TMINLP2TNLP::force_fractionnal_sol()
00603 {
00604 for(int i=0 ; i < num_variables() ; i++) {
00605 if( ( var_types_[i] == TMINLP::INTEGER ||
00606 var_types_[i] == TMINLP::BINARY )&&
00607 x_l_[i] < x_u_[i] + 0.5)
00608 {
00609 x_sol_[i] = ceil(x_l_[i]) + 0.5;
00610 }
00611 }
00612 }
00613
00614 bool
00615 TMINLP2TNLP::get_scaling_parameters(Number& obj_scaling,
00616 bool& use_x_scaling, Index n,
00617 Number* x_scaling,
00618 bool& use_g_scaling, Index m,
00619 Number* g_scaling)
00620 {
00621 return tminlp_->get_scaling_parameters(obj_scaling, use_x_scaling, n,
00622 x_scaling,
00623 use_g_scaling, m, g_scaling);
00624 }
00625
00626
00632 void
00633 TMINLP2TNLP::SetWarmStarter(SmartPtr<IpoptInteriorWarmStarter> warm_starter)
00634 {
00635 curr_warm_starter_ = warm_starter;
00636 }
00637 SmartPtr<IpoptInteriorWarmStarter>
00638 TMINLP2TNLP::GetWarmStarter()
00639 {
00640 return curr_warm_starter_;
00641 }
00642
00643
00645 double
00646 TMINLP2TNLP::evaluateUpperBoundingFunction(const double * x){
00647 Number help;
00648 tminlp_->eval_upper_bound_f(num_variables(), x, help);
00649 return help;
00650 }
00651
00652
00653 }
00654