00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include "BonTMINLP2TNLP.hpp"
00014 #include "IpBlas.hpp"
00015 #include "IpAlgTypes.hpp"
00016 #include <climits>
00017 #include <string>
00018 #include <fstream>
00019 #include <sstream>
00020 #include "Ipopt/BonIpoptInteriorWarmStarter.hpp"
00021
00022 extern bool BonminAbortAll;
00023
00024 namespace Bonmin
00025 {
00026
00027 TMINLP2TNLP::TMINLP2TNLP(const SmartPtr<TMINLP> tminlp
00028 #ifdef WARM_STARTER
00029 ,
00030 const OptionsList& options
00031 #endif
00032 )
00033 :
00034 var_types_(),
00035 x_l_(),
00036 x_u_(),
00037 orig_x_l_(),
00038 orig_x_u_(),
00039 g_l_(),
00040 g_u_(),
00041 x_init_(),
00042 duals_init_(NULL),
00043 x_init_user_(),
00044 x_sol_(),
00045 g_sol_(),
00046 duals_sol_(),
00047 tminlp_(tminlp),
00048 nnz_jac_g_(0),
00049 nnz_h_lag_(0),
00050 index_style_(TNLP::FORTRAN_STYLE),
00051 obj_value_(1e100),
00052 curr_warm_starter_(),
00053 nlp_lower_bound_inf_(-DBL_MAX),
00054 nlp_upper_bound_inf_(DBL_MAX),
00055 warm_start_entire_iterate_(true),
00056 need_new_warm_starter_(true)
00057 {
00058
00059
00060
00061 assert(IsValid(tminlp_));
00062 Index n,m;
00063 bool retval =
00064 tminlp_->get_nlp_info(n, m, nnz_jac_g_, nnz_h_lag_, index_style_);
00065
00066 ASSERT_EXCEPTION(retval, TMINLP_INVALID,
00067 "get_nlp_info of TMINLP returns false.");
00068
00069
00070 var_types_.resize(n);
00071
00072
00073 tminlp_->get_variables_types(n, var_types_());
00074
00075
00076 x_l_.resize(n);
00077 x_u_.resize(n);
00078 orig_x_l_.resize(n);
00079 orig_x_u_.resize(n);
00080
00081 g_l_.resize(m);
00082 g_u_.resize(m);
00083
00084
00085 tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, g_l_(), g_u_());
00086 IpBlasDcopy(n, x_l_(), 1, orig_x_l_(), 1);
00087 IpBlasDcopy(n, x_u_(), 1, orig_x_u_(), 1);
00088
00089
00090
00091 x_init_.reserve(3*n+2*m);
00092 x_init_.resize(3*n + m);
00093 tminlp_->get_starting_point(n, true, x_init_(), false, NULL, NULL,
00094 m, false, NULL);
00095 CoinZeroN(x_init_() + n , 2*n + m);
00096 x_init_user_.resize(n);
00097 IpBlasDcopy(n, x_init_(), 1, x_init_user_(), 1);
00098 duals_init_ = NULL;
00099
00100 #ifdef WARM_STARTER
00101
00102 options.GetNumericValue("nlp_lower_bound_inf", nlp_lower_bound_inf_, "");
00103 options.GetNumericValue("nlp_upper_bound_inf", nlp_upper_bound_inf_, "");
00104 options.GetBoolValue("warm_start_entire_iterate",
00105 warm_start_entire_iterate_, "");
00106 #endif
00107 }
00108
00109 TMINLP2TNLP::TMINLP2TNLP(const TMINLP2TNLP& other)
00110 :
00111 var_types_(),
00112 x_l_(),
00113 x_u_(),
00114 orig_x_l_(),
00115 orig_x_u_(),
00116 g_l_(),
00117 g_u_(),
00118 x_init_(),
00119 duals_init_(NULL),
00120 x_init_user_(),
00121 x_sol_(),
00122 g_sol_(),
00123 duals_sol_(),
00124 tminlp_(other.tminlp_),
00125 nnz_jac_g_(other.nnz_jac_g_),
00126 nnz_h_lag_(other.nnz_h_lag_),
00127 index_style_(other.index_style_),
00128 return_status_(other.return_status_),
00129 obj_value_(other.obj_value_),
00130 curr_warm_starter_(other.curr_warm_starter_),
00131 nlp_lower_bound_inf_(other.nlp_lower_bound_inf_),
00132 nlp_upper_bound_inf_(other.nlp_upper_bound_inf_),
00133 warm_start_entire_iterate_(other.warm_start_entire_iterate_),
00134 need_new_warm_starter_(other.need_new_warm_starter_)
00135 {
00136 gutsOfCopy(other);
00137 }
00138
00140 TMINLP2TNLP &
00141 TMINLP2TNLP::operator=(const TMINLP2TNLP& rhs){
00142 if(this != &rhs){
00143 tminlp_ = rhs.tminlp_;
00144 nnz_jac_g_ = rhs.nnz_jac_g_;
00145 nnz_h_lag_ = rhs.nnz_h_lag_;
00146 index_style_ = rhs.index_style_;
00147 return_status_ = rhs.return_status_;
00148 obj_value_ = rhs.obj_value_;
00149 curr_warm_starter_ = rhs.curr_warm_starter_;
00150 nlp_lower_bound_inf_ = rhs.nlp_lower_bound_inf_;
00151 nlp_upper_bound_inf_ = rhs.nlp_upper_bound_inf_;
00152 warm_start_entire_iterate_ = rhs.warm_start_entire_iterate_;
00153 need_new_warm_starter_ = rhs.need_new_warm_starter_;
00154
00155 gutsOfDelete();
00156 gutsOfCopy(rhs);
00157
00158 }
00159 return (*this);
00160 }
00161
00162 TMINLP2TNLP::~TMINLP2TNLP()
00163 {
00164 gutsOfDelete();
00165 }
00166
00167 void
00168 TMINLP2TNLP::gutsOfDelete(){
00169 }
00170
00176 void
00177 TMINLP2TNLP::gutsOfCopy(const TMINLP2TNLP& other)
00178 {
00179 Index n = other.num_variables();
00180 Index m = other.num_constraints();
00181
00182 if(n > 0){
00183 var_types_ = other.var_types_;
00184
00185 x_l_.resize(n);
00186 x_u_.resize(n);
00187 IpBlasDcopy(n, other.x_l_(), 1, x_l_(), 1);
00188 IpBlasDcopy(n, other.x_u_(), 1, x_u_(), 1);
00189
00190 orig_x_l_.resize(n);
00191 orig_x_u_.resize(n);
00192 IpBlasDcopy(n, other.orig_x_l_(), 1, orig_x_l_(), 1);
00193 IpBlasDcopy(n, other.orig_x_u_(), 1, orig_x_u_(), 1);
00194 x_init_user_.resize(n);
00195 IpBlasDcopy(n, other.x_init_user_(), 1, x_init_user_(), 1);
00196 if(!other.x_sol_.empty()) {
00197 Set_x_sol(n,other.x_sol_());
00198 }
00199 }
00200
00201 if(!other.g_l_.empty()){
00202 const int& size = other.g_l_.size();
00203 g_l_.resize(size);
00204 g_u_.resize(size);
00205 }
00206
00207 if(m > 0){
00208 IpBlasDcopy(m, other.g_l_(), 1, g_l_(), 1);
00209 IpBlasDcopy(m, other.g_u_(), 1, g_u_(), 1);
00210 if(!other.g_sol_.empty()) {
00211 g_sol_.resize(m);
00212 IpBlasDcopy(m, other.g_sol_(), 1, g_sol_(), 1);
00213 }
00214 }
00215
00216
00217 x_init_ = other.x_init_;
00218
00219 if(other.duals_init_) {
00220 duals_init_ = x_init_() + n;
00221 }
00222 else
00223 duals_init_ = NULL;
00224
00225
00226 if(!other.duals_sol_.empty()) {
00227 duals_sol_.resize(m + 2*n);
00228 IpBlasDcopy(duals_sol_.size(), other.duals_sol_(), 1, duals_sol_(), 1);
00229 }
00230
00231 }
00232
00233 void TMINLP2TNLP::SetVariablesBounds(Index n,
00234 const Number * x_l,
00235 const Number * x_u)
00236 {
00237 assert(n==num_variables());
00238 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00239 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00240 }
00241
00242 void TMINLP2TNLP::SetVariablesLowerBounds(Index n,
00243 const Number * x_l)
00244 {
00245 assert(n==num_variables());
00246 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00247 }
00248
00249 void TMINLP2TNLP::SetVariablesUpperBounds(Index n,
00250 const Number * x_u)
00251 {
00252 assert(n==num_variables());
00253 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00254 }
00255
00256 void TMINLP2TNLP::SetVariableBounds(Index var_no, Number x_l, Number x_u)
00257 {
00258 assert(var_no >= 0 && var_no < num_variables());
00259 x_l_[var_no] = x_l;
00260 x_u_[var_no] = x_u;
00261 }
00262
00263 void TMINLP2TNLP::SetVariableLowerBound(Index var_no, Number x_l)
00264 {
00265 assert(var_no >= 0 && var_no < num_variables());
00266 x_l_[var_no] = x_l;
00267 }
00268
00269 void TMINLP2TNLP::SetVariableUpperBound(Index var_no, Number x_u)
00270 {
00271 assert(var_no >= 0 && var_no < num_variables());
00272 x_u_[var_no] = x_u;
00273 }
00274
00275 void TMINLP2TNLP::SetStartingPoint(Index n,const Number* x_init)
00276 {
00277 assert(n == num_variables());
00278 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00279 }
00280
00281 void TMINLP2TNLP::resetStartingPoint()
00282 {
00283 curr_warm_starter_ = NULL;
00284 IpBlasDcopy(x_init_user_.size(), x_init_user_(), 1, x_init_(), 1);
00285 }
00286
00287 void TMINLP2TNLP::setxInit(Index ind, const Number val)
00288 {
00289 x_init_[ind] = val;
00290 }
00291
00292 void TMINLP2TNLP::setxInit(Index n,const Number* x_init)
00293 {
00294 assert(n == num_variables());
00295 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00296 }
00297
00298 void TMINLP2TNLP::setDualInit(Index ind, const Number val)
00299 {
00300 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00301 if(!duals_init_)
00302 duals_init_ = &x_init_[num_variables()];
00303 duals_init_[ind] = val;
00304 }
00305
00306 void TMINLP2TNLP::setDualsInit(Index m, const Number* duals_init)
00307 {
00308 assert(m == num_variables() * 2 + num_constraints() );
00309 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00310 if(!duals_init_)
00311 duals_init_ = x_init_() + num_variables();
00312
00313 IpBlasDcopy(m, duals_init, 1, duals_init_, 1);
00314
00315 }
00316
00318 void TMINLP2TNLP::Set_x_sol(Index n, const Number* x_sol)
00319 {
00320 assert(n == num_variables());
00321 if (x_sol_.empty()) {
00322 x_sol_.resize(n);
00323 }
00324 assert(n == (int) x_sol_.size());
00325 IpBlasDcopy(n, x_sol, 1, x_sol_(), 1);
00326 }
00327
00329 void TMINLP2TNLP::SetVariableType(Index n, TMINLP::VariableType type)
00330 {
00331 assert(n >= 0 && n < num_variables());
00332 var_types_[n] = type;
00333 }
00334
00335 bool TMINLP2TNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00336 Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
00337 {
00338 n = num_variables();
00339 m = num_constraints();
00340 nnz_jac_g = nnz_jac_g_;
00341 nnz_h_lag = nnz_h_lag_;
00342 index_style = index_style_;
00343 return true;
00344 }
00345
00346 bool TMINLP2TNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00347 Index m, Number* g_l, Number* g_u)
00348 {
00349 assert(n==num_variables());
00350 assert(m==num_constraints());
00351 IpBlasDcopy(n, x_l_(), 1, x_l, 1);
00352 IpBlasDcopy(n, x_u_(), 1, x_u, 1);
00353 IpBlasDcopy(m, g_l_(), 1, g_l, 1);
00354 IpBlasDcopy(m, g_u_(), 1, g_u, 1);
00355 return true;
00356 }
00357
00358 bool TMINLP2TNLP::get_starting_point(Index n, bool init_x, Number* x,
00359 bool init_z, Number* z_L, Number* z_U,
00360 Index m, bool init_lambda,
00361 Number* lambda)
00362 {
00363 assert(m==num_constraints());
00364 assert(n==num_variables());
00365 x_init_.resize(3*n + m, 0.);
00366 duals_init_ = x_init_() + n;
00367 if (init_x == true) {
00368 if(x_init_.empty())
00369 return false;
00370 IpBlasDcopy(n, x_init_(), 1, x, 1);
00371 }
00372 if (init_z == true) {
00373 if(duals_init_ == NULL)
00374 return false;
00375 IpBlasDcopy(n, duals_init_, 1, z_L, 1);
00376 IpBlasDcopy(n, duals_init_ + n, 1, z_U, 1);
00377
00378 }
00379 if(init_lambda == true) {
00380 if(duals_init_ == NULL)
00381 return false;
00382 IpBlasDcopy(m, duals_init_ + 2*n , 1, lambda, 1);
00383 }
00384
00385 need_new_warm_starter_ = true;
00386 return true;
00387 }
00388
00389 bool TMINLP2TNLP::get_warm_start_iterate(IteratesVector& warm_start_iterate)
00390 {
00391 if (IsNull(curr_warm_starter_)) {
00392 return false;
00393 }
00394
00395 bool retval = curr_warm_starter_->WarmStartIterate(num_variables(), x_l_(), x_u_(),
00396 warm_start_iterate);
00397
00398 need_new_warm_starter_ = true;
00399 return retval;
00400 }
00401
00402 bool TMINLP2TNLP::eval_f(Index n, const Number* x, bool new_x,
00403 Number& obj_value)
00404 {
00405 return tminlp_->eval_f(n, x, new_x, obj_value);
00406 }
00407
00408 bool TMINLP2TNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00409 Number* grad_f)
00410 {
00411 grad_f[n-1] = 0;
00412 return tminlp_->eval_grad_f(n, x, new_x, grad_f);
00413 }
00414
00415 bool TMINLP2TNLP::eval_g(Index n, const Number* x, bool new_x,
00416 Index m, Number* g)
00417 {
00418 int return_code = tminlp_->eval_g(n, x, new_x, m, g);
00419 return return_code;
00420 }
00421
00422 bool TMINLP2TNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00423 Index m, Index nele_jac, Index* iRow,
00424 Index *jCol, Number* values)
00425 {
00426 bool return_code =
00427 tminlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00428 iRow, jCol, values);
00429 return return_code;
00430 }
00431
00432 bool TMINLP2TNLP::eval_h(Index n, const Number* x, bool new_x,
00433 Number obj_factor, Index m, const Number* lambda,
00434 bool new_lambda, Index nele_hess,
00435 Index* iRow, Index* jCol, Number* values)
00436 {
00437 return tminlp_->eval_h(n, x, new_x, obj_factor, m, lambda,
00438 new_lambda, nele_hess,
00439 iRow, jCol, values);
00440 }
00441
00442
00443 bool TMINLP2TNLP::eval_gi(Index n, const Number* x, bool new_x,
00444 Index i, Number& gi)
00445 {
00446 return tminlp_->eval_gi(n, x, new_x, i, gi);
00447 }
00448
00449 bool TMINLP2TNLP::eval_grad_gi(Index n, const Number* x, bool new_x,
00450 Index i, Index& nele_grad_gi, Index* jCol,
00451 Number* values)
00452 {
00453 return tminlp_->eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00454 }
00455
00456 void TMINLP2TNLP::finalize_solution(SolverReturn status,
00457 Index n, const Number* x, const Number* z_L, const Number* z_U,
00458 Index m, const Number* g, const Number* lambda,
00459 Number obj_value,
00460 const IpoptData* ip_data,
00461 IpoptCalculatedQuantities* ip_cq)
00462 {
00463 assert(n == (Index) num_variables());
00464 assert(m == (Index) num_constraints());
00465 x_sol_.resize(n);
00466
00467 IpBlasDcopy(n, x, 1, x_sol_(), 1);
00468
00469 g_sol_.resize(m);
00470 IpBlasDcopy(m, g, 1, g_sol_(), 1);
00471 duals_sol_.resize(m + 2*n);
00472 if(lambda){
00473 IpBlasDcopy(m, lambda, 1, duals_sol_() + 2*n, 1);
00474
00475 IpBlasDcopy(n, z_L, 1 , duals_sol_() , 1);
00476 IpBlasDcopy(n, z_U, 1 , duals_sol_() + n, 1);
00477 }
00478
00479 return_status_ = status;
00480 obj_value_ = obj_value;
00481
00482 if (IsValid(curr_warm_starter_)) {
00483 curr_warm_starter_->Finalize();
00484 }
00485 }
00486
00487
00488 bool TMINLP2TNLP::intermediate_callback(AlgorithmMode mode,
00489 Index iter, Number obj_value,
00490 Number inf_pr, Number inf_du,
00491 Number mu, Number d_norm,
00492 Number regularization_size,
00493 Number alpha_du, Number alpha_pr,
00494 Index ls_trials,
00495 const IpoptData* ip_data,
00496 IpoptCalculatedQuantities* ip_cq)
00497 {
00498 if (BonminAbortAll) return false;
00499 #if WARM_STARTER
00500
00501
00502
00503 if (!warm_start_entire_iterate_) {
00504 return true;
00505 }
00506 if (need_new_warm_starter_) {
00507
00508 curr_warm_starter_ = new IpoptInteriorWarmStarter(n_, x_l_, x_u_,
00509 nlp_lower_bound_inf_,
00510 nlp_upper_bound_inf_,
00511 warm_start_entire_iterate_);
00512 need_new_warm_starter_ = false;
00513 }
00514
00515 return curr_warm_starter_->UpdateStoredIterates(mode, *ip_data, *ip_cq);
00516 #else
00517 return true;
00518 #endif
00519 }
00520
00521
00527 void
00528 TMINLP2TNLP::outputDiffs(const std::string& probName, const std::string * varNames)
00529 {
00530 const int &numcols = num_variables();
00531 const int &numrows = num_constraints();
00532
00533 const double * currentLower = x_l();
00534 const double * currentUpper = x_u();
00535
00536 const double * originalLower = orig_x_l();
00537 const double * originalUpper = orig_x_u();
00538 CoinRelFltEq eq;
00539 std::string fBoundsName = probName;
00540 std::ostringstream os;
00541 fBoundsName+=".bounds";
00542 std::string fModName = probName;
00543 fModName+= ".mod";
00544 std::ofstream fBounds;
00545 std::ofstream fMod;
00546 bool hasVarNames = 0;
00547
00548 if(varNames!=NULL )
00549 hasVarNames=1;
00550 if(hasVarNames)
00551 fMod.open(fModName.c_str());
00552 fBounds.open(fBoundsName.c_str());
00553
00554 for(int i = 0 ; i < numcols ; i++) {
00555 if(!eq(currentLower[i],originalLower[i])) {
00556 if(hasVarNames)
00557 fMod<<"bounds"<<i<<": "
00558 <<varNames[i]<<" >= "
00559 <<currentLower[i]<<";\n";
00560
00561
00562 fBounds<<"LO"<<"\t"<<i<<"\t"<<currentLower[i]<<std::endl;
00563 }
00564 if(!eq(currentUpper[i],originalUpper[i])) {
00565 if(hasVarNames)
00566 fMod<<"bounds"<<i<<": "
00567 <<varNames[i]<<" <= "
00568 <<currentUpper[i]<<";\n";
00569
00570 fBounds<<"UP"<<"\t"<<i<<"\t"<<currentUpper[i]<<std::endl;
00571 }
00572 }
00573
00574
00575 std::string fStartPointName=probName;
00576 fStartPointName+=".start";
00577
00578 std::ofstream fStartPoint(fStartPointName.c_str());
00579 const double * primals = x_init();
00580 const double * duals = duals_init();
00581 fStartPoint.precision(17);
00582 fStartPoint<<numcols<<"\t"<<2*numcols+numrows<<std::endl;
00583 for(int i = 0 ; i < numcols ; i++)
00584 fStartPoint<<primals[i]<<std::endl;
00585 int end = 2*numcols + numrows;
00586 if(duals) {
00587 for(int i = 0 ; i < end; i++)
00588 fStartPoint<<duals[i]<<std::endl;
00589 }
00590
00591 }
00592
00594 void
00595 TMINLP2TNLP::force_fractionnal_sol()
00596 {
00597 for(int i=0 ; i < num_variables() ; i++) {
00598 if( ( var_types_[i] == TMINLP::INTEGER ||
00599 var_types_[i] == TMINLP::BINARY )&&
00600 x_l_[i] < x_u_[i] + 0.5)
00601 {
00602 x_sol_[i] = ceil(x_l_[i]) + 0.5;
00603 }
00604 }
00605 }
00606
00607 bool
00608 TMINLP2TNLP::get_scaling_parameters(Number& obj_scaling,
00609 bool& use_x_scaling, Index n,
00610 Number* x_scaling,
00611 bool& use_g_scaling, Index m,
00612 Number* g_scaling)
00613 {
00614 return tminlp_->get_scaling_parameters(obj_scaling, use_x_scaling, n,
00615 x_scaling,
00616 use_g_scaling, m, g_scaling);
00617 }
00618
00619
00625 void
00626 TMINLP2TNLP::SetWarmStarter(SmartPtr<IpoptInteriorWarmStarter> warm_starter)
00627 {
00628 curr_warm_starter_ = warm_starter;
00629 }
00630 SmartPtr<IpoptInteriorWarmStarter>
00631 TMINLP2TNLP::GetWarmStarter()
00632 {
00633 return curr_warm_starter_;
00634 }
00635
00636
00638 double
00639 TMINLP2TNLP::evaluateUpperBoundingFunction(const double * x){
00640 Number help;
00641 tminlp_->eval_upper_bound_f(num_variables(), x, help);
00642 return help;
00643 }
00644
00645
00646 }
00647