00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include "BonTMINLP2TNLP.hpp"
00014 #include "IpBlas.hpp"
00015 #include "IpAlgTypes.hpp"
00016 #include "IpIpoptCalculatedQuantities.hpp"
00017 #include <climits>
00018 #include <string>
00019 #include <fstream>
00020 #include <sstream>
00021 #include "Ipopt/BonIpoptInteriorWarmStarter.hpp"
00022 #include "OsiBranchingObject.hpp"
00023
00024 using namespace Ipopt;
00025
00026 extern bool BonminAbortAll;
00027 class OsiObject;
00028 namespace Bonmin
00029 {
00030
00031 TMINLP2TNLP::TMINLP2TNLP(const SmartPtr<TMINLP> tminlp
00032 #ifdef WARM_STARTER
00033 ,
00034 const OptionsList& options
00035 #endif
00036 )
00037 :
00038 var_types_(),
00039 x_l_(),
00040 x_u_(),
00041 orig_x_l_(),
00042 orig_x_u_(),
00043 g_l_(),
00044 g_u_(),
00045 x_init_(),
00046 duals_init_(NULL),
00047 x_init_user_(),
00048 x_sol_(),
00049 g_sol_(),
00050 duals_sol_(),
00051 tminlp_(tminlp),
00052 nnz_jac_g_(0),
00053 nnz_h_lag_(0),
00054 index_style_(TNLP::FORTRAN_STYLE),
00055 obj_value_(1e100),
00056 curr_warm_starter_(),
00057 nlp_lower_bound_inf_(-DBL_MAX),
00058 nlp_upper_bound_inf_(DBL_MAX),
00059 warm_start_entire_iterate_(true),
00060 need_new_warm_starter_(true)
00061 {
00062
00063
00064
00065 assert(IsValid(tminlp_));
00066 Index n,m;
00067 bool retval =
00068 tminlp_->get_nlp_info(n, m, nnz_jac_g_, nnz_h_lag_, index_style_);
00069
00070 ASSERT_EXCEPTION(retval, TMINLP_INVALID,
00071 "get_nlp_info of TMINLP returns false.");
00072
00073
00074 var_types_.resize(n);
00075
00076
00077 tminlp_->get_variables_types(n, var_types_());
00078
00079
00080 x_l_.resize(n);
00081 x_u_.resize(n);
00082 orig_x_l_.resize(n);
00083 orig_x_u_.resize(n);
00084
00085 g_l_.resize(m);
00086 g_u_.resize(m);
00087
00088
00089 if(m){
00090 tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, g_l_(), g_u_());
00091 }
00092 else {
00093 tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, NULL, NULL);
00094 }
00095 IpBlasDcopy(n, x_l_(), 1, orig_x_l_(), 1);
00096 IpBlasDcopy(n, x_u_(), 1, orig_x_u_(), 1);
00097
00098
00099
00100 x_init_user_.resize(n);
00101 tminlp_->get_starting_point(n, true, x_init_user_(), false, NULL, NULL,
00102 m, false, NULL);
00103
00104 #ifdef WARM_STARTER
00105
00106 options.GetNumericValue("nlp_lower_bound_inf", nlp_lower_bound_inf_, "");
00107 options.GetNumericValue("nlp_upper_bound_inf", nlp_upper_bound_inf_, "");
00108 options.GetBoolValue("warm_start_entire_iterate",
00109 warm_start_entire_iterate_, "");
00110 #endif
00111 }
00112
00113 TMINLP2TNLP::TMINLP2TNLP(const TMINLP2TNLP& other)
00114 :
00115 var_types_(),
00116 x_l_(),
00117 x_u_(),
00118 orig_x_l_(),
00119 orig_x_u_(),
00120 g_l_(),
00121 g_u_(),
00122 x_init_(),
00123 duals_init_(NULL),
00124 x_init_user_(),
00125 x_sol_(),
00126 g_sol_(),
00127 duals_sol_(),
00128 tminlp_(other.tminlp_),
00129 nnz_jac_g_(other.nnz_jac_g_),
00130 nnz_h_lag_(other.nnz_h_lag_),
00131 index_style_(other.index_style_),
00132 return_status_(other.return_status_),
00133 obj_value_(other.obj_value_),
00134 curr_warm_starter_(other.curr_warm_starter_),
00135 nlp_lower_bound_inf_(other.nlp_lower_bound_inf_),
00136 nlp_upper_bound_inf_(other.nlp_upper_bound_inf_),
00137 warm_start_entire_iterate_(other.warm_start_entire_iterate_),
00138 need_new_warm_starter_(other.need_new_warm_starter_)
00139 {
00140 gutsOfCopy(other);
00141 }
00142
00144 TMINLP2TNLP &
00145 TMINLP2TNLP::operator=(const TMINLP2TNLP& rhs){
00146 if(this != &rhs){
00147 tminlp_ = rhs.tminlp_;
00148 nnz_jac_g_ = rhs.nnz_jac_g_;
00149 nnz_h_lag_ = rhs.nnz_h_lag_;
00150 index_style_ = rhs.index_style_;
00151 return_status_ = rhs.return_status_;
00152 obj_value_ = rhs.obj_value_;
00153 curr_warm_starter_ = rhs.curr_warm_starter_;
00154 nlp_lower_bound_inf_ = rhs.nlp_lower_bound_inf_;
00155 nlp_upper_bound_inf_ = rhs.nlp_upper_bound_inf_;
00156 warm_start_entire_iterate_ = rhs.warm_start_entire_iterate_;
00157 need_new_warm_starter_ = rhs.need_new_warm_starter_;
00158
00159 gutsOfDelete();
00160 gutsOfCopy(rhs);
00161
00162 }
00163 return (*this);
00164 }
00165
00166 TMINLP2TNLP::~TMINLP2TNLP()
00167 {
00168 gutsOfDelete();
00169 }
00170
00171 void
00172 TMINLP2TNLP::gutsOfDelete(){
00173 }
00174
00180 void
00181 TMINLP2TNLP::gutsOfCopy(const TMINLP2TNLP& other)
00182 {
00183 Index n = other.num_variables();
00184 Index m = other.num_constraints();
00185
00186 if(n > 0){
00187 var_types_ = other.var_types_;
00188
00189 x_l_.resize(n);
00190 x_u_.resize(n);
00191 IpBlasDcopy(n, other.x_l_(), 1, x_l_(), 1);
00192 IpBlasDcopy(n, other.x_u_(), 1, x_u_(), 1);
00193
00194 orig_x_l_.resize(n);
00195 orig_x_u_.resize(n);
00196 IpBlasDcopy(n, other.orig_x_l_(), 1, orig_x_l_(), 1);
00197 IpBlasDcopy(n, other.orig_x_u_(), 1, orig_x_u_(), 1);
00198 x_init_user_.resize(n);
00199 IpBlasDcopy(n, other.x_init_user_(), 1, x_init_user_(), 1);
00200 if(!other.x_sol_.empty()) {
00201 Set_x_sol(n,other.x_sol_());
00202 }
00203 }
00204
00205 if(!other.g_l_.empty()){
00206 const size_t& size = other.g_l_.size();
00207 g_l_.resize(size);
00208 g_u_.resize(size);
00209 }
00210
00211 if(m > 0){
00212 IpBlasDcopy(m, other.g_l_(), 1, g_l_(), 1);
00213 IpBlasDcopy(m, other.g_u_(), 1, g_u_(), 1);
00214 if(!other.g_sol_.empty()) {
00215 g_sol_.resize(m);
00216 IpBlasDcopy(m, other.g_sol_(), 1, g_sol_(), 1);
00217 }
00218 }
00219
00220
00221 x_init_ = other.x_init_;
00222
00223 if(other.duals_init_) {
00224 duals_init_ = x_init_() + n;
00225 }
00226 else
00227 duals_init_ = NULL;
00228
00229
00230 if(!other.duals_sol_.empty()) {
00231 duals_sol_.resize(m + 2*n);
00232 IpBlasDcopy((int) duals_sol_.size(), other.duals_sol_(), 1, duals_sol_(), 1);
00233 }
00234
00235 }
00236
00237 void TMINLP2TNLP::SetVariablesBounds(Index n,
00238 const Number * x_l,
00239 const Number * x_u)
00240 {
00241 assert(n==num_variables());
00242 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00243 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00244 }
00245
00246 void TMINLP2TNLP::SetVariablesLowerBounds(Index n,
00247 const Number * x_l)
00248 {
00249 assert(n==num_variables());
00250 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00251 }
00252
00253 void TMINLP2TNLP::SetVariablesUpperBounds(Index n,
00254 const Number * x_u)
00255 {
00256 assert(n==num_variables());
00257 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00258 }
00259
00260 void TMINLP2TNLP::SetVariableBounds(Index var_no, Number x_l, Number x_u)
00261 {
00262 assert(var_no >= 0 && var_no < num_variables());
00263 x_l_[var_no] = x_l;
00264 x_u_[var_no] = x_u;
00265 }
00266
00267 void TMINLP2TNLP::SetVariableLowerBound(Index var_no, Number x_l)
00268 {
00269 assert(var_no >= 0 && var_no < num_variables());
00270 x_l_[var_no] = x_l;
00271 }
00272
00273 void TMINLP2TNLP::SetVariableUpperBound(Index var_no, Number x_u)
00274 {
00275 assert(var_no >= 0 && var_no < num_variables());
00276 x_u_[var_no] = x_u;
00277 }
00278
00279 void TMINLP2TNLP::resetStartingPoint()
00280 {
00281 curr_warm_starter_ = NULL;
00282 x_init_.clear();
00283 }
00284
00285 void TMINLP2TNLP::setxInit(Index n,const Number* x_init)
00286 {
00287 assert(n == num_variables());
00288 if(x_init_.size() < n)
00289 x_init_.resize(n);
00290 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00291 }
00292
00293 void TMINLP2TNLP::setDualsInit(Index m, const Number* duals_init)
00294 {
00295 assert(m == num_variables() * 2 + num_constraints() );
00296 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00297 duals_init_ = x_init_() + num_variables();
00298
00299 if(m >0)
00300 IpBlasDcopy(m, duals_init, 1, duals_init_, 1);
00301
00302 }
00303
00305 void TMINLP2TNLP::Set_x_sol(Index n, const Number* x_sol)
00306 {
00307 assert(n == num_variables());
00308 if (x_sol_.empty()) {
00309 x_sol_.resize(n);
00310 }
00311 assert(n == (int) x_sol_.size());
00312 IpBlasDcopy(n, x_sol, 1, x_sol_(), 1);
00313 }
00314
00316 void TMINLP2TNLP::Set_dual_sol(Index n, const Number* dual_sol)
00317 {
00318 assert(n == num_variables() *2 + num_constraints());
00319 if (duals_sol_.empty()) {
00320 duals_sol_.resize(n);
00321 }
00322 assert(n == (int) duals_sol_.size());
00323 IpBlasDcopy(n, dual_sol, 1, duals_sol_(), 1);
00324 }
00325
00327 void TMINLP2TNLP::SetVariableType(Index n, TMINLP::VariableType type)
00328 {
00329 assert(n >= 0 && n < num_variables());
00330 var_types_[n] = type;
00331 }
00332
00333 bool TMINLP2TNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00334 Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
00335 {
00336 n = num_variables();
00337 m = num_constraints();
00338 nnz_jac_g = nnz_jac_g_;
00339 nnz_h_lag = nnz_h_lag_;
00340 index_style = index_style_;
00341
00342 return true;
00343 }
00344
00345 bool TMINLP2TNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00346 Index m, Number* g_l, Number* g_u)
00347 {
00348 assert(n==num_variables());
00349 assert(m==num_constraints());
00350 IpBlasDcopy(n, x_l_(), 1, x_l, 1);
00351 IpBlasDcopy(n, x_u_(), 1, x_u, 1);
00352 if (m > 0){
00353 IpBlasDcopy(m, g_l_(), 1, g_l, 1);
00354 IpBlasDcopy(m, g_u_(), 1, g_u, 1);
00355 }
00356 return true;
00357 }
00358
00359 bool TMINLP2TNLP::get_starting_point(Index n, bool init_x, Number* x,
00360 bool init_z, Number* z_L, Number* z_U,
00361 Index m, bool init_lambda,
00362 Number* lambda)
00363 {
00364 assert(m==num_constraints());
00365 assert(n==num_variables());
00366 #if 0
00367 x_init_.resize(3*n + m, 0.);
00368 duals_init_ = x_init_() + n;
00369 #endif
00370 if (init_x == true) {
00371 if(x_init_.empty()){
00372 assert(x_init_user_.size() >= n);
00373 IpBlasDcopy(n, x_init_user_(), 1, x, 1);
00374 }
00375 else
00376 IpBlasDcopy(n, x_init_(), 1, x, 1);
00377 }
00378 if (init_z == true) {
00379 if(duals_init_ == NULL)
00380 return false;
00381 assert(x_init_.size() == 3*n + m && duals_init_ == x_init_() + n);
00382 IpBlasDcopy(n, duals_init_, 1, z_L, 1);
00383 IpBlasDcopy(n, duals_init_ + n, 1, z_U, 1);
00384
00385 }
00386 if(init_lambda == true) {
00387 if(duals_init_ == NULL)
00388 return false;
00389 assert(x_init_.size() == 3*n + m && duals_init_ == x_init_() + n);
00390 if(m > 0)
00391 IpBlasDcopy(m, duals_init_ + 2*n , 1, lambda, 1);
00392 }
00393
00394 need_new_warm_starter_ = true;
00395 return true;
00396 }
00397
00398 bool TMINLP2TNLP::get_warm_start_iterate(IteratesVector& warm_start_iterate)
00399 {
00400 if (IsNull(curr_warm_starter_)) {
00401 return false;
00402 }
00403
00404 bool retval = curr_warm_starter_->WarmStartIterate(num_variables(), x_l_(), x_u_(),
00405 warm_start_iterate);
00406
00407 need_new_warm_starter_ = true;
00408 return retval;
00409 }
00410
00411 bool TMINLP2TNLP::eval_f(Index n, const Number* x, bool new_x,
00412 Number& obj_value)
00413 {
00414 return tminlp_->eval_f(n, x, new_x, obj_value);
00415 }
00416
00417 bool TMINLP2TNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00418 Number* grad_f)
00419 {
00420 grad_f[n-1] = 0;
00421 return tminlp_->eval_grad_f(n, x, new_x, grad_f);
00422 }
00423
00424 bool TMINLP2TNLP::eval_g(Index n, const Number* x, bool new_x,
00425 Index m, Number* g)
00426 {
00427 int return_code = tminlp_->eval_g(n, x, new_x, m, g);
00428 return return_code;
00429 }
00430
00431 bool TMINLP2TNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00432 Index m, Index nele_jac, Index* iRow,
00433 Index *jCol, Number* values)
00434 {
00435 bool return_code =
00436 tminlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00437 iRow, jCol, values);
00438 if(iRow != NULL){
00439 Index buf;
00440 for(Index k = 0; k < nele_jac ; k++){
00441 buf = iRow[k];
00442 iRow[k] = -1;
00443 iRow[k] = buf;
00444 }
00445 }
00446 return return_code;
00447 }
00448
00449 bool TMINLP2TNLP::eval_h(Index n, const Number* x, bool new_x,
00450 Number obj_factor, Index m, const Number* lambda,
00451 bool new_lambda, Index nele_hess,
00452 Index* iRow, Index* jCol, Number* values)
00453 {
00454 return tminlp_->eval_h(n, x, new_x, obj_factor, m, lambda,
00455 new_lambda, nele_hess,
00456 iRow, jCol, values);
00457 }
00458
00459
00460 bool TMINLP2TNLP::eval_gi(Index n, const Number* x, bool new_x,
00461 Index i, Number& gi)
00462 {
00463 return tminlp_->eval_gi(n, x, new_x, i, gi);
00464 }
00465
00466 bool TMINLP2TNLP::eval_grad_gi(Index n, const Number* x, bool new_x,
00467 Index i, Index& nele_grad_gi, Index* jCol,
00468 Number* values)
00469 {
00470 return tminlp_->eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00471 }
00472
00473 void TMINLP2TNLP::finalize_solution(SolverReturn status,
00474 Index n, const Number* x, const Number* z_L, const Number* z_U,
00475 Index m, const Number* g, const Number* lambda,
00476 Number obj_value,
00477 const IpoptData* ip_data,
00478 IpoptCalculatedQuantities* ip_cq)
00479 {
00480 assert(n == (Index) num_variables());
00481 assert(m == (Index) num_constraints());
00482 x_sol_.resize(n);
00483
00484 IpBlasDcopy(n, x, 1, x_sol_(), 1);
00485
00486 if(m > 0){
00487 g_sol_.resize(m);
00488 IpBlasDcopy(m, g, 1, g_sol_(), 1);
00489 }
00490 duals_sol_.resize(m + 2*n);
00491 if(lambda){
00492 if(m > 0)
00493 IpBlasDcopy(m, lambda, 1, duals_sol_() + 2*n, 1);
00494
00495 IpBlasDcopy(n, z_L, 1 , duals_sol_() , 1);
00496 IpBlasDcopy(n, z_U, 1 , duals_sol_() + n, 1);
00497 }
00498
00499 return_status_ = status;
00500 obj_value_ = obj_value;
00501
00502 if(status == Ipopt::LOCAL_INFEASIBILITY && ip_cq != NULL){
00503 obj_value_ = ip_cq->curr_nlp_constraint_violation(NORM_MAX);
00504 }
00505 if (IsValid(curr_warm_starter_)) {
00506 curr_warm_starter_->Finalize();
00507 }
00508 }
00509
00510
00511 bool TMINLP2TNLP::intermediate_callback(AlgorithmMode mode,
00512 Index iter, Number obj_value,
00513 Number inf_pr, Number inf_du,
00514 Number mu, Number d_norm,
00515 Number regularization_size,
00516 Number alpha_du, Number alpha_pr,
00517 Index ls_trials,
00518 const IpoptData* ip_data,
00519 IpoptCalculatedQuantities* ip_cq)
00520 {
00521 if (BonminAbortAll) return false;
00522 #if WARM_STARTER
00523
00524
00525
00526 if (!warm_start_entire_iterate_) {
00527 return true;
00528 }
00529 if (need_new_warm_starter_) {
00530
00531 curr_warm_starter_ = new IpoptInteriorWarmStarter(n_, x_l_, x_u_,
00532 nlp_lower_bound_inf_,
00533 nlp_upper_bound_inf_,
00534 warm_start_entire_iterate_);
00535 need_new_warm_starter_ = false;
00536 }
00537
00538 return curr_warm_starter_->UpdateStoredIterates(mode, *ip_data, *ip_cq);
00539 #else
00540 return true;
00541 #endif
00542 }
00543
00544
00550 void
00551 TMINLP2TNLP::outputDiffs(const std::string& probName, const std::string * varNames)
00552 {
00553 const int &numcols = num_variables();
00554 const int &numrows = num_constraints();
00555
00556 const double * currentLower = x_l();
00557 const double * currentUpper = x_u();
00558
00559 const double * originalLower = orig_x_l();
00560 const double * originalUpper = orig_x_u();
00561 CoinRelFltEq eq;
00562 std::string fBoundsName = probName;
00563 std::ostringstream os;
00564 fBoundsName+=".bounds";
00565 std::string fModName = probName;
00566 fModName+= ".mod";
00567 std::ofstream fBounds;
00568 std::ofstream fMod;
00569 bool hasVarNames = 0;
00570
00571 if(varNames!=NULL )
00572 hasVarNames=1;
00573 if(hasVarNames)
00574 fMod.open(fModName.c_str());
00575 fBounds.open(fBoundsName.c_str());
00576
00577 for(int i = 0 ; i < numcols ; i++) {
00578 if(!eq(currentLower[i],originalLower[i])) {
00579 if(hasVarNames)
00580 fMod<<"bounds"<<i<<": "
00581 <<varNames[i]<<" >= "
00582 <<currentLower[i]<<";\n";
00583
00584
00585 fBounds<<"LO"<<"\t"<<i<<"\t"<<currentLower[i]<<std::endl;
00586 }
00587 if(!eq(currentUpper[i],originalUpper[i])) {
00588 if(hasVarNames)
00589 fMod<<"bounds"<<i<<": "
00590 <<varNames[i]<<" <= "
00591 <<currentUpper[i]<<";\n";
00592
00593 fBounds<<"UP"<<"\t"<<i<<"\t"<<currentUpper[i]<<std::endl;
00594 }
00595 }
00596
00597
00598 std::string fStartPointName=probName;
00599 fStartPointName+=".start";
00600
00601 std::ofstream fStartPoint(fStartPointName.c_str());
00602 const double * primals = x_init();
00603 const double * duals = duals_init();
00604 fStartPoint.precision(17);
00605 fStartPoint<<numcols<<"\t"<<2*numcols+numrows<<std::endl;
00606 for(int i = 0 ; i < numcols ; i++)
00607 fStartPoint<<primals[i]<<std::endl;
00608 int end = 2*numcols + numrows;
00609 if(duals) {
00610 for(int i = 0 ; i < end; i++)
00611 fStartPoint<<duals[i]<<std::endl;
00612 }
00613
00614 }
00615
00617 void
00618 TMINLP2TNLP::force_fractionnal_sol()
00619 {
00620 for(int i=0 ; i < num_variables() ; i++) {
00621 if( ( var_types_[i] == TMINLP::INTEGER ||
00622 var_types_[i] == TMINLP::BINARY )&&
00623 x_l_[i] < x_u_[i] + 0.5)
00624 {
00625 x_sol_[i] = ceil(x_l_[i]) + 0.5;
00626 }
00627 }
00628 }
00629
00630 bool
00631 TMINLP2TNLP::get_scaling_parameters(Number& obj_scaling,
00632 bool& use_x_scaling, Index n,
00633 Number* x_scaling,
00634 bool& use_g_scaling, Index m,
00635 Number* g_scaling)
00636 {
00637 return tminlp_->get_scaling_parameters(obj_scaling, use_x_scaling, n,
00638 x_scaling,
00639 use_g_scaling, m, g_scaling);
00640 }
00641
00642
00647 void
00648 TMINLP2TNLP::SetWarmStarter(SmartPtr<IpoptInteriorWarmStarter> warm_starter)
00649 {
00650 curr_warm_starter_ = warm_starter;
00651 }
00652 SmartPtr<IpoptInteriorWarmStarter>
00653 TMINLP2TNLP::GetWarmStarter()
00654 {
00655 return curr_warm_starter_;
00656 }
00657
00658
00660 double
00661 TMINLP2TNLP::evaluateUpperBoundingFunction(const double * x){
00662 Number help;
00663 tminlp_->eval_upper_bound_f(num_variables(), x, help);
00664 return help;
00665 }
00666
00667 double
00668 TMINLP2TNLP::check_solution(OsiObject ** objects, int nObjects){
00669 assert(x_sol_.size() == num_variables());
00670 assert(g_sol_.size() == num_constraints());
00671 if (objects) {
00672 for (int i = 0 ; i < nObjects ; i++) {
00673 OsiSimpleInteger * obj = dynamic_cast<OsiSimpleInteger *>(objects[i]);
00674 if(obj){
00675 int colNumber = obj->columnNumber();
00676 x_sol_[colNumber] = floor(x_sol_[colNumber]+0.5);
00677 }
00678 }
00679 }
00680 else {
00681 for (unsigned int i = 0; i < x_sol_.size() ; i++) {
00682 if (var_types_[i] == TMINLP::INTEGER || var_types_[i] == TMINLP::BINARY) {
00683 x_sol_[i] = floor(x_sol_[i]+0.5);
00684 }
00685 }
00686 }
00687 eval_g(x_sol_.size(), x_sol_(), true, g_sol_.size(), g_sol_());
00688 eval_f(x_sol_.size(), x_sol_(), false, obj_value_);
00689 double error = 0;
00690 for(unsigned int i = 0 ; i < g_sol_.size() ; i++){
00691 error = std::max(error, std::max(0., g_l_[i] - g_sol_[i]));
00692 error = std::max(error, std::max(0., - g_u_[i] + g_sol_[i]));
00693 }
00694 return error;
00695 }
00696
00697 }
00698