00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "BonOuterDescription.hpp"
00012 #include "BonOsiTMINLPInterface.hpp"
00013
00014 namespace Bonmin{
00015
00016
00017
00018
00019
00020 static inline
00021 bool cleanNnz(double &value, double colLower, double colUpper,
00022 double rowLower, double rowUpper, double colsol,
00023 double & lb, double &ub, double tiny, double veryTiny)
00024 {
00025 if(fabs(value)>= tiny) return 1;
00026
00027 if(fabs(value)<veryTiny) return 0;
00028
00029
00030 double infty = 1e20;
00031 bool colUpBounded = colUpper < 10000;
00032 bool colLoBounded = colLower > -10000;
00033 bool rowNotLoBounded = rowLower <= - infty;
00034 bool rowNotUpBounded = rowUpper >= infty;
00035 bool pos = value > 0;
00036
00037 if(colLoBounded && pos && rowNotUpBounded) {
00038 lb += value * (colsol - colLower);
00039 return 0;
00040 }
00041 else
00042 if(colLoBounded && !pos && rowNotLoBounded) {
00043 ub += value * (colsol - colLower);
00044 return 0;
00045 }
00046 else
00047 if(colUpBounded && !pos && rowNotUpBounded) {
00048 lb += value * (colsol - colUpper);
00049 return 0;
00050 }
00051 else
00052 if(colUpBounded && pos && rowNotLoBounded) {
00053 ub += value * (colsol - colUpper);
00054 return 0;
00055 }
00056
00057 if(pos) value = tiny;
00058 else
00059 value = - tiny;
00060 return 1;
00061 }
00062
00063
00067 void getMyOuterApproximation(
00068 OsiTMINLPInterface &si, OsiCuts &cs, int ind,
00069 const double * x, int getObj, const double * x2, double theta,
00070 bool global) {
00071 int n, m, nnz_jac_g, nnz_h_lag;
00072 Ipopt::TNLP::IndexStyleEnum index_style;
00073 TMINLP2TNLP* problem = si.problem();
00074 problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
00075
00076 double g_i = 0;
00077 problem->eval_gi(n, x, 1, ind, g_i);
00078 vector<int> jCol(n);
00079 int nnz;
00080 problem->eval_grad_gi(n, x, 0, ind, nnz, jCol(), NULL);
00081 vector<double> jValues(nnz);
00082 problem->eval_grad_gi(n, x, 0, ind, nnz, NULL, jValues());
00083
00084 CoinPackedVector cut;
00085 double lb;
00086 double ub;
00087
00088 const double * rowLower = si.getRowLower();
00089 const double * rowUpper = si.getRowUpper();
00090 const double * colLower = si.getColLower();
00091 const double * colUpper = si.getColUpper();
00092 const double * duals = si.getRowPrice() + 2 * n;
00093 double infty = si.getInfinity();
00094 double nlp_infty = infty;
00095 int rowIdx = ind;
00096
00097 if (rowLower[rowIdx] > -nlp_infty)
00098 lb = rowLower[rowIdx] - g_i;
00099 else
00100 lb = -infty;
00101 if (rowUpper[rowIdx] < nlp_infty)
00102 ub = rowUpper[rowIdx] - g_i;
00103 else
00104 ub = infty;
00105 if (rowLower[rowIdx] > -infty && rowUpper[rowIdx] < infty) {
00106 if (duals[rowIdx] >= 0)
00107 lb = -infty;
00108 if (duals[rowIdx] <= 0)
00109 ub = infty;
00110 }
00111
00112 double tiny = 1e-08;
00113 double veryTiny = 1e-20;
00114
00115 for (int i = 0; i < nnz; i++) {
00116 if(index_style == Ipopt::TNLP::FORTRAN_STYLE) jCol[i]--;
00117 const int &colIdx = jCol[i];
00118
00119 if (cleanNnz(jValues[i], colLower[colIdx], colUpper[colIdx],
00120 rowLower[rowIdx], rowUpper[rowIdx], x[colIdx], lb, ub,
00121 tiny, veryTiny)) {
00122 cut.insert(colIdx, jValues[i]);
00123
00124 if (lb > -infty)
00125 lb += jValues[i] * x[colIdx];
00126 if (ub < infty)
00127 ub += jValues[i] * x[colIdx];
00128 }
00129 }
00130
00131
00132 bool add = true;
00133
00134 if (x2 != NULL) {
00135 double rhs = cut.dotProduct(x2);
00136 double violation = 0.;
00137 if (ub < infty)
00138 violation = std::max(violation, fabs(rhs - ub));
00139 if (lb > -infty)
00140 violation = std::max(violation, fabs(lb - rhs));
00141 if (violation < theta) {
00142 add = false;
00143 }
00144 }
00145 OsiRowCut newCut;
00146
00147
00148 if (add) {
00149 if (global) {
00150 newCut.setGloballyValidAsInteger(1);
00151 }
00152
00153
00154
00155 const int* ids = problem->get_const_xtra_id();
00156
00157 int binary_id = (ids == NULL) ? -1 : ids[ind];
00158 if(binary_id>0) {
00159 if (lb > -infty) {
00160 cut.insert(binary_id, -lb);
00161 newCut.setLb(0);
00162 newCut.setUb(ub);
00163
00164 }
00165 if (ub < infty) {
00166 cut.insert(binary_id, -ub);
00167 newCut.setLb(lb);
00168 newCut.setUb(0);
00169
00170 }
00171 }
00172 else {
00173 newCut.setLb(lb);
00174 newCut.setUb(ub);
00175 }
00176
00177
00178 newCut.setRow(cut);
00179
00180 cs.insert(newCut);
00181 }
00182 }
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275 void addOuterDescription(OsiTMINLPInterface &nlp, OsiSolverInterface &si,
00276 const double * x, int nbAp, bool getObj) {
00277 int n;
00278 int m;
00279 int nnz_jac_g;
00280 int nnz_h_lag;
00281 Ipopt::TNLP::IndexStyleEnum index_style;
00282
00283 TMINLP2TNLP* problem = nlp.problem();
00284 problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
00285
00286 const double * colLower = nlp.getColLower();
00287 const double * colUpper = nlp.getColUpper();
00288 const Bonmin::TMINLP::VariableType* variableType = problem->var_types();
00289 vector<Ipopt::TNLP::LinearityType> constTypes(m);
00290 problem->get_constraints_linearity(m, constTypes());
00291
00292 int OuterDesc = 0;
00293 if (OuterDesc == 0) {
00294 OsiCuts cs;
00295
00296 double * p = CoinCopyOfArray(nlp.getColLower(), n);
00297 double * pp = CoinCopyOfArray(nlp.getColLower(), n);
00298 double * up = CoinCopyOfArray(nlp.getColUpper(), n);
00299
00300 std::vector<int> nbG(m, 2);
00301
00302 std::vector<double> step(n);
00303
00304 for (int i = 0; i < n; i++) {
00305
00306 if (colUpper[i] > 1e08) {
00307 up[i] = 0;
00308 }
00309
00310 if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i]
00311 == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) {
00312 step[i] = 0;
00313 } else
00314 step[i] = (up[i] - colLower[i]) / 2e02;
00315
00316 if (colLower[i] < -1e08) {
00317 p[i] = 0;
00318 pp[i] = 0;
00319 }
00320 }
00321 vector<double> g_p(m);
00322 double g_p_i, g_pp_i;
00323 problem->eval_g(n, p, 1, m, g_p());
00324 vector<double> g_pp(m);
00325 vector<double> g_up(m);
00326 problem->eval_g(n, up, 1, m, g_up());
00327
00328 for (int i = 0; (i < m); i++) {
00329 if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
00330 getMyOuterApproximation(nlp, cs, i, p, 0, NULL, 10000, true);
00331 }
00332 vector<double> thr(m);
00333 for (int i = 0; i < m; i++) {
00334 thr[i] = std::abs(g_up[i]-g_p[i])/nbAp;
00335 }
00336 double diff = 0;
00337 for (int i = 0; (i < m); i++) {
00338 if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
00339 p = CoinCopyOfArray(nlp.getColLower(), n);
00340 pp = CoinCopyOfArray(nlp.getColLower(), n);
00341 while (nbG[i] < nbAp) {
00342
00343
00344 for (int j = 0; j < n; j++) {
00345 pp[j] += step[j];
00346 }
00347 problem->eval_gi(n, p, 1, i, g_p_i);
00348 problem->eval_gi(n, pp, 1, i, g_pp_i);
00349 diff = std::abs(g_p_i - g_pp_i);
00350
00351 if (diff>=thr[i] ) {
00352 getMyOuterApproximation(nlp, cs, i, pp, 0, NULL, 10000, true);
00353 for (int j = 0; j < n; j++) {
00354 p[j] = pp[j];
00355 }
00356
00357 nbG[i]++;
00358 }
00359 }
00360
00361 }
00362
00363 for (int i = 0; i < m ; i++) {
00364 if(constTypes[i] != Ipopt::TNLP::NON_LINEAR) continue;
00365 getMyOuterApproximation(nlp, cs, i, up, 0, NULL, 10000, true);
00366 }
00367
00368 si.applyCuts(cs);
00369 delete [] p;
00370 delete [] pp;
00371 delete [] up;
00372
00373 }
00374
00375
00376 }
00377 }
00378