00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include "BonTMINLP2TNLP.hpp"
00014 #include "IpBlas.hpp"
00015 #include "IpAlgTypes.hpp"
00016 #include <climits>
00017 #include <string>
00018 #include <fstream>
00019 #include <sstream>
00020 #include "Ipopt/BonIpoptInteriorWarmStarter.hpp"
00021 #include "OsiBranchingObject.hpp"
00022
00023 extern bool BonminAbortAll;
00024 class OsiObject;
00025 namespace Bonmin
00026 {
00027
00028 TMINLP2TNLP::TMINLP2TNLP(const SmartPtr<TMINLP> tminlp
00029 #ifdef WARM_STARTER
00030 ,
00031 const OptionsList& options
00032 #endif
00033 )
00034 :
00035 var_types_(),
00036 x_l_(),
00037 x_u_(),
00038 orig_x_l_(),
00039 orig_x_u_(),
00040 g_l_(),
00041 g_u_(),
00042 x_init_(),
00043 duals_init_(NULL),
00044 x_init_user_(),
00045 x_sol_(),
00046 g_sol_(),
00047 duals_sol_(),
00048 tminlp_(tminlp),
00049 nnz_jac_g_(0),
00050 nnz_h_lag_(0),
00051 index_style_(TNLP::FORTRAN_STYLE),
00052 obj_value_(1e100),
00053 curr_warm_starter_(),
00054 nlp_lower_bound_inf_(-DBL_MAX),
00055 nlp_upper_bound_inf_(DBL_MAX),
00056 warm_start_entire_iterate_(true),
00057 need_new_warm_starter_(true)
00058 {
00059
00060
00061
00062 assert(IsValid(tminlp_));
00063 Index n,m;
00064 bool retval =
00065 tminlp_->get_nlp_info(n, m, nnz_jac_g_, nnz_h_lag_, index_style_);
00066
00067 ASSERT_EXCEPTION(retval, TMINLP_INVALID,
00068 "get_nlp_info of TMINLP returns false.");
00069
00070
00071 var_types_.resize(n);
00072
00073
00074 tminlp_->get_variables_types(n, var_types_());
00075
00076
00077 x_l_.resize(n);
00078 x_u_.resize(n);
00079 orig_x_l_.resize(n);
00080 orig_x_u_.resize(n);
00081
00082 g_l_.resize(m);
00083 g_u_.resize(m);
00084
00085
00086 if(m){
00087 tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, g_l_(), g_u_());
00088 }
00089 else {
00090 tminlp_->get_bounds_info(n, x_l_(), x_u_(), m, NULL, NULL);
00091 }
00092 IpBlasDcopy(n, x_l_(), 1, orig_x_l_(), 1);
00093 IpBlasDcopy(n, x_u_(), 1, orig_x_u_(), 1);
00094
00095
00096
00097 x_init_.reserve(3*n+2*m);
00098 x_init_.resize(3*n + m);
00099 tminlp_->get_starting_point(n, true, x_init_(), false, NULL, NULL,
00100 m, false, NULL);
00101 CoinZeroN(x_init_() + n , 2*n + m);
00102 x_init_user_.resize(n);
00103 IpBlasDcopy(n, x_init_(), 1, x_init_user_(), 1);
00104 duals_init_ = NULL;
00105
00106 #ifdef WARM_STARTER
00107
00108 options.GetNumericValue("nlp_lower_bound_inf", nlp_lower_bound_inf_, "");
00109 options.GetNumericValue("nlp_upper_bound_inf", nlp_upper_bound_inf_, "");
00110 options.GetBoolValue("warm_start_entire_iterate",
00111 warm_start_entire_iterate_, "");
00112 #endif
00113 }
00114
00115 TMINLP2TNLP::TMINLP2TNLP(const TMINLP2TNLP& other)
00116 :
00117 var_types_(),
00118 x_l_(),
00119 x_u_(),
00120 orig_x_l_(),
00121 orig_x_u_(),
00122 g_l_(),
00123 g_u_(),
00124 x_init_(),
00125 duals_init_(NULL),
00126 x_init_user_(),
00127 x_sol_(),
00128 g_sol_(),
00129 duals_sol_(),
00130 tminlp_(other.tminlp_),
00131 nnz_jac_g_(other.nnz_jac_g_),
00132 nnz_h_lag_(other.nnz_h_lag_),
00133 index_style_(other.index_style_),
00134 return_status_(other.return_status_),
00135 obj_value_(other.obj_value_),
00136 curr_warm_starter_(other.curr_warm_starter_),
00137 nlp_lower_bound_inf_(other.nlp_lower_bound_inf_),
00138 nlp_upper_bound_inf_(other.nlp_upper_bound_inf_),
00139 warm_start_entire_iterate_(other.warm_start_entire_iterate_),
00140 need_new_warm_starter_(other.need_new_warm_starter_)
00141 {
00142 gutsOfCopy(other);
00143 }
00144
00146 TMINLP2TNLP &
00147 TMINLP2TNLP::operator=(const TMINLP2TNLP& rhs){
00148 if(this != &rhs){
00149 tminlp_ = rhs.tminlp_;
00150 nnz_jac_g_ = rhs.nnz_jac_g_;
00151 nnz_h_lag_ = rhs.nnz_h_lag_;
00152 index_style_ = rhs.index_style_;
00153 return_status_ = rhs.return_status_;
00154 obj_value_ = rhs.obj_value_;
00155 curr_warm_starter_ = rhs.curr_warm_starter_;
00156 nlp_lower_bound_inf_ = rhs.nlp_lower_bound_inf_;
00157 nlp_upper_bound_inf_ = rhs.nlp_upper_bound_inf_;
00158 warm_start_entire_iterate_ = rhs.warm_start_entire_iterate_;
00159 need_new_warm_starter_ = rhs.need_new_warm_starter_;
00160
00161 gutsOfDelete();
00162 gutsOfCopy(rhs);
00163
00164 }
00165 return (*this);
00166 }
00167
00168 TMINLP2TNLP::~TMINLP2TNLP()
00169 {
00170 gutsOfDelete();
00171 }
00172
00173 void
00174 TMINLP2TNLP::gutsOfDelete(){
00175 }
00176
00182 void
00183 TMINLP2TNLP::gutsOfCopy(const TMINLP2TNLP& other)
00184 {
00185 Index n = other.num_variables();
00186 Index m = other.num_constraints();
00187
00188 if(n > 0){
00189 var_types_ = other.var_types_;
00190
00191 x_l_.resize(n);
00192 x_u_.resize(n);
00193 IpBlasDcopy(n, other.x_l_(), 1, x_l_(), 1);
00194 IpBlasDcopy(n, other.x_u_(), 1, x_u_(), 1);
00195
00196 orig_x_l_.resize(n);
00197 orig_x_u_.resize(n);
00198 IpBlasDcopy(n, other.orig_x_l_(), 1, orig_x_l_(), 1);
00199 IpBlasDcopy(n, other.orig_x_u_(), 1, orig_x_u_(), 1);
00200 x_init_user_.resize(n);
00201 IpBlasDcopy(n, other.x_init_user_(), 1, x_init_user_(), 1);
00202 if(!other.x_sol_.empty()) {
00203 Set_x_sol(n,other.x_sol_());
00204 }
00205 }
00206
00207 if(!other.g_l_.empty()){
00208 const int& size = other.g_l_.size();
00209 g_l_.resize(size);
00210 g_u_.resize(size);
00211 }
00212
00213 if(m > 0){
00214 IpBlasDcopy(m, other.g_l_(), 1, g_l_(), 1);
00215 IpBlasDcopy(m, other.g_u_(), 1, g_u_(), 1);
00216 if(!other.g_sol_.empty()) {
00217 g_sol_.resize(m);
00218 IpBlasDcopy(m, other.g_sol_(), 1, g_sol_(), 1);
00219 }
00220 }
00221
00222
00223 x_init_ = other.x_init_;
00224
00225 if(other.duals_init_) {
00226 duals_init_ = x_init_() + n;
00227 }
00228 else
00229 duals_init_ = NULL;
00230
00231
00232 if(!other.duals_sol_.empty()) {
00233 duals_sol_.resize(m + 2*n);
00234 IpBlasDcopy(duals_sol_.size(), other.duals_sol_(), 1, duals_sol_(), 1);
00235 }
00236
00237 }
00238
00239 void TMINLP2TNLP::SetVariablesBounds(Index n,
00240 const Number * x_l,
00241 const Number * x_u)
00242 {
00243 assert(n==num_variables());
00244 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00245 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00246 }
00247
00248 void TMINLP2TNLP::SetVariablesLowerBounds(Index n,
00249 const Number * x_l)
00250 {
00251 assert(n==num_variables());
00252 IpBlasDcopy(n, x_l, 1, x_l_(), 1);
00253 }
00254
00255 void TMINLP2TNLP::SetVariablesUpperBounds(Index n,
00256 const Number * x_u)
00257 {
00258 assert(n==num_variables());
00259 IpBlasDcopy(n, x_u, 1, x_u_(), 1);
00260 }
00261
00262 void TMINLP2TNLP::SetVariableBounds(Index var_no, Number x_l, Number x_u)
00263 {
00264 assert(var_no >= 0 && var_no < num_variables());
00265 x_l_[var_no] = x_l;
00266 x_u_[var_no] = x_u;
00267 }
00268
00269 void TMINLP2TNLP::SetVariableLowerBound(Index var_no, Number x_l)
00270 {
00271 assert(var_no >= 0 && var_no < num_variables());
00272 x_l_[var_no] = x_l;
00273 }
00274
00275 void TMINLP2TNLP::SetVariableUpperBound(Index var_no, Number x_u)
00276 {
00277 assert(var_no >= 0 && var_no < num_variables());
00278 x_u_[var_no] = x_u;
00279 }
00280
00281 void TMINLP2TNLP::SetStartingPoint(Index n,const Number* x_init)
00282 {
00283 assert(n == num_variables());
00284 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00285 }
00286
00287 void TMINLP2TNLP::resetStartingPoint()
00288 {
00289 curr_warm_starter_ = NULL;
00290 IpBlasDcopy(x_init_user_.size(), x_init_user_(), 1, x_init_(), 1);
00291 }
00292
00293 void TMINLP2TNLP::setxInit(Index ind, const Number val)
00294 {
00295 x_init_[ind] = val;
00296 }
00297
00298 void TMINLP2TNLP::setxInit(Index n,const Number* x_init)
00299 {
00300 assert(n == num_variables());
00301 IpBlasDcopy(n, x_init, 1, x_init_(), 1);
00302 }
00303
00304 void TMINLP2TNLP::setDualInit(Index ind, const Number val)
00305 {
00306 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00307 if(!duals_init_)
00308 duals_init_ = &x_init_[num_variables()];
00309 duals_init_[ind] = val;
00310 }
00311
00312 void TMINLP2TNLP::setDualsInit(Index m, const Number* duals_init)
00313 {
00314 assert(m == num_variables() * 2 + num_constraints() );
00315 x_init_.resize(num_variables() * 3 + num_constraints(), 0.);
00316 if(!duals_init_)
00317 duals_init_ = x_init_() + num_variables();
00318
00319 if(m >0)
00320 IpBlasDcopy(m, duals_init, 1, duals_init_, 1);
00321
00322 }
00323
00325 void TMINLP2TNLP::Set_x_sol(Index n, const Number* x_sol)
00326 {
00327 assert(n == num_variables());
00328 if (x_sol_.empty()) {
00329 x_sol_.resize(n);
00330 }
00331 assert(n == (int) x_sol_.size());
00332 IpBlasDcopy(n, x_sol, 1, x_sol_(), 1);
00333 }
00334
00336 void TMINLP2TNLP::SetVariableType(Index n, TMINLP::VariableType type)
00337 {
00338 assert(n >= 0 && n < num_variables());
00339 var_types_[n] = type;
00340 }
00341
00342 bool TMINLP2TNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00343 Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
00344 {
00345 n = num_variables();
00346 m = num_constraints();
00347 nnz_jac_g = nnz_jac_g_;
00348 nnz_h_lag = nnz_h_lag_;
00349 index_style = index_style_;
00350 return true;
00351 }
00352
00353 bool TMINLP2TNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
00354 Index m, Number* g_l, Number* g_u)
00355 {
00356 assert(n==num_variables());
00357 assert(m==num_constraints());
00358 IpBlasDcopy(n, x_l_(), 1, x_l, 1);
00359 IpBlasDcopy(n, x_u_(), 1, x_u, 1);
00360 if (m > 0){
00361 IpBlasDcopy(m, g_l_(), 1, g_l, 1);
00362 IpBlasDcopy(m, g_u_(), 1, g_u, 1);
00363 }
00364 return true;
00365 }
00366
00367 bool TMINLP2TNLP::get_starting_point(Index n, bool init_x, Number* x,
00368 bool init_z, Number* z_L, Number* z_U,
00369 Index m, bool init_lambda,
00370 Number* lambda)
00371 {
00372 assert(m==num_constraints());
00373 assert(n==num_variables());
00374 x_init_.resize(3*n + m, 0.);
00375 duals_init_ = x_init_() + n;
00376 if (init_x == true) {
00377 if(x_init_.empty())
00378 return false;
00379 IpBlasDcopy(n, x_init_(), 1, x, 1);
00380 }
00381 if (init_z == true) {
00382 if(duals_init_ == NULL)
00383 return false;
00384 IpBlasDcopy(n, duals_init_, 1, z_L, 1);
00385 IpBlasDcopy(n, duals_init_ + n, 1, z_U, 1);
00386
00387 }
00388 if(init_lambda == true) {
00389 if(duals_init_ == NULL)
00390 return false;
00391 if(m > 0)
00392 IpBlasDcopy(m, duals_init_ + 2*n , 1, lambda, 1);
00393 }
00394
00395 need_new_warm_starter_ = true;
00396 return true;
00397 }
00398
00399 bool TMINLP2TNLP::get_warm_start_iterate(IteratesVector& warm_start_iterate)
00400 {
00401 if (IsNull(curr_warm_starter_)) {
00402 return false;
00403 }
00404
00405 bool retval = curr_warm_starter_->WarmStartIterate(num_variables(), x_l_(), x_u_(),
00406 warm_start_iterate);
00407
00408 need_new_warm_starter_ = true;
00409 return retval;
00410 }
00411
00412 bool TMINLP2TNLP::eval_f(Index n, const Number* x, bool new_x,
00413 Number& obj_value)
00414 {
00415 return tminlp_->eval_f(n, x, new_x, obj_value);
00416 }
00417
00418 bool TMINLP2TNLP::eval_grad_f(Index n, const Number* x, bool new_x,
00419 Number* grad_f)
00420 {
00421 grad_f[n-1] = 0;
00422 return tminlp_->eval_grad_f(n, x, new_x, grad_f);
00423 }
00424
00425 bool TMINLP2TNLP::eval_g(Index n, const Number* x, bool new_x,
00426 Index m, Number* g)
00427 {
00428 int return_code = tminlp_->eval_g(n, x, new_x, m, g);
00429 return return_code;
00430 }
00431
00432 bool TMINLP2TNLP::eval_jac_g(Index n, const Number* x, bool new_x,
00433 Index m, Index nele_jac, Index* iRow,
00434 Index *jCol, Number* values)
00435 {
00436 bool return_code =
00437 tminlp_->eval_jac_g(n, x, new_x, m, nele_jac,
00438 iRow, jCol, values);
00439 return return_code;
00440 }
00441
00442 bool TMINLP2TNLP::eval_h(Index n, const Number* x, bool new_x,
00443 Number obj_factor, Index m, const Number* lambda,
00444 bool new_lambda, Index nele_hess,
00445 Index* iRow, Index* jCol, Number* values)
00446 {
00447 return tminlp_->eval_h(n, x, new_x, obj_factor, m, lambda,
00448 new_lambda, nele_hess,
00449 iRow, jCol, values);
00450 }
00451
00452
00453 bool TMINLP2TNLP::eval_gi(Index n, const Number* x, bool new_x,
00454 Index i, Number& gi)
00455 {
00456 return tminlp_->eval_gi(n, x, new_x, i, gi);
00457 }
00458
00459 bool TMINLP2TNLP::eval_grad_gi(Index n, const Number* x, bool new_x,
00460 Index i, Index& nele_grad_gi, Index* jCol,
00461 Number* values)
00462 {
00463 return tminlp_->eval_grad_gi(n, x, new_x, i, nele_grad_gi, jCol, values);
00464 }
00465
00466 void TMINLP2TNLP::finalize_solution(SolverReturn status,
00467 Index n, const Number* x, const Number* z_L, const Number* z_U,
00468 Index m, const Number* g, const Number* lambda,
00469 Number obj_value,
00470 const IpoptData* ip_data,
00471 IpoptCalculatedQuantities* ip_cq)
00472 {
00473 assert(n == (Index) num_variables());
00474 assert(m == (Index) num_constraints());
00475 x_sol_.resize(n);
00476
00477 IpBlasDcopy(n, x, 1, x_sol_(), 1);
00478
00479 if(m > 0){
00480 g_sol_.resize(m);
00481 IpBlasDcopy(m, g, 1, g_sol_(), 1);
00482 }
00483 duals_sol_.resize(m + 2*n);
00484 if(lambda){
00485 if(m > 0)
00486 IpBlasDcopy(m, lambda, 1, duals_sol_() + 2*n, 1);
00487
00488 IpBlasDcopy(n, z_L, 1 , duals_sol_() , 1);
00489 IpBlasDcopy(n, z_U, 1 , duals_sol_() + n, 1);
00490 }
00491
00492 return_status_ = status;
00493 obj_value_ = obj_value;
00494
00495 if (IsValid(curr_warm_starter_)) {
00496 curr_warm_starter_->Finalize();
00497 }
00498 }
00499
00500
00501 bool TMINLP2TNLP::intermediate_callback(AlgorithmMode mode,
00502 Index iter, Number obj_value,
00503 Number inf_pr, Number inf_du,
00504 Number mu, Number d_norm,
00505 Number regularization_size,
00506 Number alpha_du, Number alpha_pr,
00507 Index ls_trials,
00508 const IpoptData* ip_data,
00509 IpoptCalculatedQuantities* ip_cq)
00510 {
00511 if (BonminAbortAll) return false;
00512 #if WARM_STARTER
00513
00514
00515
00516 if (!warm_start_entire_iterate_) {
00517 return true;
00518 }
00519 if (need_new_warm_starter_) {
00520
00521 curr_warm_starter_ = new IpoptInteriorWarmStarter(n_, x_l_, x_u_,
00522 nlp_lower_bound_inf_,
00523 nlp_upper_bound_inf_,
00524 warm_start_entire_iterate_);
00525 need_new_warm_starter_ = false;
00526 }
00527
00528 return curr_warm_starter_->UpdateStoredIterates(mode, *ip_data, *ip_cq);
00529 #else
00530 return true;
00531 #endif
00532 }
00533
00534
00540 void
00541 TMINLP2TNLP::outputDiffs(const std::string& probName, const std::string * varNames)
00542 {
00543 const int &numcols = num_variables();
00544 const int &numrows = num_constraints();
00545
00546 const double * currentLower = x_l();
00547 const double * currentUpper = x_u();
00548
00549 const double * originalLower = orig_x_l();
00550 const double * originalUpper = orig_x_u();
00551 CoinRelFltEq eq;
00552 std::string fBoundsName = probName;
00553 std::ostringstream os;
00554 fBoundsName+=".bounds";
00555 std::string fModName = probName;
00556 fModName+= ".mod";
00557 std::ofstream fBounds;
00558 std::ofstream fMod;
00559 bool hasVarNames = 0;
00560
00561 if(varNames!=NULL )
00562 hasVarNames=1;
00563 if(hasVarNames)
00564 fMod.open(fModName.c_str());
00565 fBounds.open(fBoundsName.c_str());
00566
00567 for(int i = 0 ; i < numcols ; i++) {
00568 if(!eq(currentLower[i],originalLower[i])) {
00569 if(hasVarNames)
00570 fMod<<"bounds"<<i<<": "
00571 <<varNames[i]<<" >= "
00572 <<currentLower[i]<<";\n";
00573
00574
00575 fBounds<<"LO"<<"\t"<<i<<"\t"<<currentLower[i]<<std::endl;
00576 }
00577 if(!eq(currentUpper[i],originalUpper[i])) {
00578 if(hasVarNames)
00579 fMod<<"bounds"<<i<<": "
00580 <<varNames[i]<<" <= "
00581 <<currentUpper[i]<<";\n";
00582
00583 fBounds<<"UP"<<"\t"<<i<<"\t"<<currentUpper[i]<<std::endl;
00584 }
00585 }
00586
00587
00588 std::string fStartPointName=probName;
00589 fStartPointName+=".start";
00590
00591 std::ofstream fStartPoint(fStartPointName.c_str());
00592 const double * primals = x_init();
00593 const double * duals = duals_init();
00594 fStartPoint.precision(17);
00595 fStartPoint<<numcols<<"\t"<<2*numcols+numrows<<std::endl;
00596 for(int i = 0 ; i < numcols ; i++)
00597 fStartPoint<<primals[i]<<std::endl;
00598 int end = 2*numcols + numrows;
00599 if(duals) {
00600 for(int i = 0 ; i < end; i++)
00601 fStartPoint<<duals[i]<<std::endl;
00602 }
00603
00604 }
00605
00607 void
00608 TMINLP2TNLP::force_fractionnal_sol()
00609 {
00610 for(int i=0 ; i < num_variables() ; i++) {
00611 if( ( var_types_[i] == TMINLP::INTEGER ||
00612 var_types_[i] == TMINLP::BINARY )&&
00613 x_l_[i] < x_u_[i] + 0.5)
00614 {
00615 x_sol_[i] = ceil(x_l_[i]) + 0.5;
00616 }
00617 }
00618 }
00619
00620 bool
00621 TMINLP2TNLP::get_scaling_parameters(Number& obj_scaling,
00622 bool& use_x_scaling, Index n,
00623 Number* x_scaling,
00624 bool& use_g_scaling, Index m,
00625 Number* g_scaling)
00626 {
00627 return tminlp_->get_scaling_parameters(obj_scaling, use_x_scaling, n,
00628 x_scaling,
00629 use_g_scaling, m, g_scaling);
00630 }
00631
00632
00637 void
00638 TMINLP2TNLP::SetWarmStarter(SmartPtr<IpoptInteriorWarmStarter> warm_starter)
00639 {
00640 curr_warm_starter_ = warm_starter;
00641 }
00642 SmartPtr<IpoptInteriorWarmStarter>
00643 TMINLP2TNLP::GetWarmStarter()
00644 {
00645 return curr_warm_starter_;
00646 }
00647
00648
00650 double
00651 TMINLP2TNLP::evaluateUpperBoundingFunction(const double * x){
00652 Number help;
00653 tminlp_->eval_upper_bound_f(num_variables(), x, help);
00654 return help;
00655 }
00656
00657 double
00658 TMINLP2TNLP::check_solution(OsiObject ** objects, int nObjects){
00659 assert(x_sol_.size() == num_variables());
00660 assert(g_sol_.size() == num_constraints());
00661 if (objects) {
00662 for (int i = 0 ; i < nObjects ; i++) {
00663 OsiSimpleInteger * obj = dynamic_cast<OsiSimpleInteger *>(objects[i]);
00664 if(obj){
00665 int colNumber = obj->columnNumber();
00666 x_sol_[colNumber] = floor(x_sol_[colNumber]+0.5);
00667 }
00668 }
00669 }
00670 else {
00671 for (unsigned int i = 0; i < x_sol_.size() ; i++) {
00672 if (var_types_[i] == TMINLP::INTEGER || var_types_[i] == TMINLP::BINARY) {
00673 x_sol_[i] = floor(x_sol_[i]+0.5);
00674 }
00675 }
00676 }
00677 eval_g(x_sol_.size(), x_sol_(), true, g_sol_.size(), g_sol_());
00678 eval_f(x_sol_.size(), x_sol_(), false, obj_value_);
00679 double error = 0;
00680 for(unsigned int i = 0 ; i < g_sol_.size() ; i++){
00681 error = std::max(error, std::max(0., g_l_[i] - g_sol_[i]));
00682 error = std::max(error, std::max(0., - g_u_[i] + g_sol_[i]));
00683 }
00684 return error;
00685 }
00686
00687 }
00688