/home/coin/SVN-release/OS-2.1.1/Bcp/src/LP/BCP_lp_fathom.cpp

Go to the documentation of this file.
00001 // Copyright (C) 2000, International Business Machines
00002 // Corporation and others.  All Rights Reserved.
00003 #include <numeric>
00004 
00005 #include "CoinHelperFunctions.hpp"
00006 #include "CoinSort.hpp"
00007 
00008 #include "BCP_matrix.hpp"
00009 #include "BCP_lp_node.hpp"
00010 #include "BCP_lp_pool.hpp"
00011 #include "BCP_lp.hpp"
00012 #include "BCP_lp_user.hpp"
00013 #include "BCP_lp_result.hpp"
00014 #include "BCP_lp_functions.hpp"
00015 
00016 //#############################################################################
00017 
00018 void
00019 BCP_lp_perform_fathom(BCP_lp_prob& p, const char* msg, BCP_message_tag msgtag)
00020 {
00021     p.user->print(p.param(BCP_lp_par::LpVerb_FathomInfo), "%s", msg);
00022     // Here we don't have col/row_indices to compress, we are from fathom and
00023     // we do want to force deletion.
00024     if (p.param(BCP_lp_par::SendFathomedNodeDesc)) {
00025         BCP_lp_delete_cols_and_rows(p, 0, 0, 0, true, true);
00026     }
00027     BCP_lp_send_node_description(p, 0, msgtag);
00028     BCP_lp_clean_up_node(p);
00029 }
00030 
00031 //#############################################################################
00032 
00033 // the primal is infeas when this function is called. Still, we must first try
00034 // to achive TDF, before trying to restore feasibility.
00035 
00036 bool BCP_lp_fathom(BCP_lp_prob& p, const bool from_repricing)
00037 {
00038     BCP_lp_result& lpres = *p.lp_result;
00039 
00040     int i, j;
00041     int added_size = 0;
00042     int vars_to_add_size = 0;
00043     const int max_var = p.param(BCP_lp_par::MaxVarsAddedPerIteration);
00044 
00045     switch (p.node->colgen) {
00046     case BCP_DoNotGenerateColumns_Fathom:
00047         BCP_lp_perform_fathom(p, "LP:   Pruning node\n",
00048                               lpres.termcode() & BCP_ProvenPrimalInf ?
00049                               BCP_Msg_NodeDescription_Infeas_Pruned :
00050                               BCP_Msg_NodeDescription_OverUB_Pruned);
00051         return true;
00052 
00053     case BCP_DoNotGenerateColumns_Send:
00054         BCP_lp_perform_fathom(p, "LP:   Sending node for next phase\n",
00055                               lpres.termcode() & BCP_ProvenPrimalInf ?
00056                               BCP_Msg_NodeDescription_Infeas :
00057                               BCP_Msg_NodeDescription_OverUB);
00058         return true;
00059 
00060     case BCP_GenerateColumns:
00061         BCP_lp_check_ub(p);
00062         if (p.param(BCP_lp_par::LpVerb_ColumnGenerationInfo))
00063             printf("LP:   Generating columns before fathoming/resolving\n");
00064         BCP_vec<BCP_col*> cols_to_add;
00065         BCP_vec<BCP_var*> vars_to_add;
00066         if (lpres.termcode() & BCP_ProvenPrimalInf) { //############ infeasible
00067             // *FIXME* : change the hardcoded 10 into a parameter
00068             std::vector<double*> dual_rays = p.lp_solver->getDualRays(10);
00069             if (dual_rays.size() > 0) {
00070                 BCP_restore_feasibility(p, dual_rays, vars_to_add, cols_to_add);
00071                 for (i = dual_rays.size() - 1; i >= 0; --i) {
00072                     delete[] dual_rays[i];
00073                 }
00074             } else {
00075                 throw BCP_fatal_error("\
00076 BCP_lp_fathom(): infeasible but can't get a dual ray!\n");
00077             }
00078             vars_to_add_size = vars_to_add.size();
00079             if (vars_to_add_size == 0) {
00080                 // Nothing helps...
00081                 BCP_lp_perform_fathom(p, "\
00082 LP:   Fathoming node (discovered not restorable inf.)\n",
00083                                       BCP_Msg_NodeDescription_Infeas_Pruned);
00084                 return true;
00085             } else {
00086                 // Great, we can fix infeasibility:
00087                 for (i = 0; i < vars_to_add_size; ++i) {
00088                     vars_to_add[i]->set_bcpind(-BCP_lp_next_var_index(p));
00089                 }
00090                 BCP_lp_add_cols_to_lp(cols_to_add, p.lp_solver);
00091                 purge_ptr_vector(cols_to_add);
00092                 p.node->vars.append(vars_to_add);
00093                 p.local_cut_pool->rows_are_valid(false);
00094                 if (p.param(BCP_lp_par::LpVerb_ColumnGenerationInfo))
00095                     printf("LP:   %i variables added while restoring feasibility\n",
00096                            static_cast<int>(vars_to_add.size()));
00097                 // need not delete the entries in vars_to_add one-by-one; those
00098                 // pointers are appended to p.node->variables
00099                 // Here we don't have col/row_indices to compress, we say we
00100                 // are not from fathom ('cos we do add columns, i.e., we are
00101                 // not going to fathom the node after the call returns) and we
00102                 // don't want to force deletion.
00103                 BCP_lp_delete_cols_and_rows(p, 0, 0, 0, false, false);
00104                 return false;
00105             }
00106         } else { //########################################### over upper bound
00107             BCP_price_vars(p, true /*from fathom*/, vars_to_add, cols_to_add);
00108             if (vars_to_add.size() == 0) {
00109                 // we can fathom!
00110                 BCP_lp_perform_fathom(p, "\
00111 LP:   Fathoming node (discovered tdf & high cost)\n",
00112                                       BCP_Msg_NodeDescription_OverUB_Pruned);
00113                 return true;
00114             }
00115             // keep only the best so many to add
00116             if (max_var < vars_to_add_size) {
00117                 // reorder the generated variables (and the corresponding
00118                 // columns) based on the reduced costs of the columns
00119                 const double * duals = p.lp_result->pi();
00120                 BCP_vec<double> rc(vars_to_add_size, 0.0);
00121                 for (i = 0; i < vars_to_add_size; ++i) {
00122                     rc[i] = (cols_to_add[i]->Objective() -
00123                              cols_to_add[i]->dotProduct(duals));
00124                 }
00125                 BCP_vec<int> perm;
00126                 perm.reserve(vars_to_add_size);
00127                 for (i = 0; i < vars_to_add_size; ++i)
00128                     perm.unchecked_push_back(i);
00129                 CoinSort_2(rc.begin(), rc.end(), perm.begin());
00130                 const double rc_cutoff = rc[max_var];
00131                 CoinSort_2(perm.begin(), perm.end(), rc.begin());
00132                 for (i = 0, j = 0; i < vars_to_add_size; ++i) {
00133                     if (rc[i] <= rc_cutoff) {
00134                         perm[j++] = i;
00135                     }
00136                 }
00137                 perm.erase(perm.entry(j), perm.end());
00138                 // those in perm are to be kept
00139                 keep_ptr_vector_by_index(vars_to_add, perm.begin(),perm.end());
00140                 keep_ptr_vector_by_index(cols_to_add, perm.begin(),perm.end());
00141                 // cols_to_add.keep_by_index(perm); // this was wrong
00142             }
00143 
00144             // Just add the given colums and go back to resolve
00145             added_size = vars_to_add.size();
00146             for (i = 0; i < added_size; ++i){
00147                 vars_to_add[i]->set_bcpind(-BCP_lp_next_var_index(p));
00148             }
00149             BCP_lp_add_cols_to_lp(cols_to_add, p.lp_solver);
00150             purge_ptr_vector(cols_to_add);
00151             p.node->vars.append(vars_to_add);
00152             p.local_cut_pool->rows_are_valid(false);
00153             if (p.param(BCP_lp_par::LpVerb_ColumnGenerationInfo))
00154                 printf("LP:   %i variables added in price-out (not TDF :-( )\n",
00155                        static_cast<int>(vars_to_add.size()));
00156             // need not delete the entries in vars_to_add one-by-one; those
00157             // pointers are appended to p.node->variables.
00158             // Here we don't have col/row_indices to compress, we say we are
00159             // not from fathom ('cos we do add columns, i.e., we are not going
00160             // to fathom the node after the call returns) and we don't want
00161             // to force deletion.
00162             BCP_lp_delete_cols_and_rows(p, 0, 0, 0, false, false);
00163             return false;
00164         }
00165         break;
00166     }
00167 
00168     return true; // fake return
00169 }
00170 
00171 //#############################################################################
00172 
00173 void
00174 BCP_price_vars(BCP_lp_prob& p, const bool from_fathom,
00175                BCP_vec<BCP_var*>& vars_to_add, BCP_vec<BCP_col*>& cols_to_add)
00176 {
00177     const BCP_lp_result& lpres = *p.lp_result;
00178 
00179     bool generated_algo_var = false;
00180     const size_t to_add = vars_to_add.size();
00181     if (p.user_has_lp_result_processing) {
00182         vars_to_add.append(p.new_vars);
00183         cols_to_add.append(p.new_cols);
00184         p.new_vars.clear();
00185         p.new_cols.clear();
00186     } else {
00187         p.user->generate_vars_in_lp(lpres, p.node->vars, p.node->cuts,
00188                                     from_fathom, vars_to_add, cols_to_add);
00189     }
00190     if (vars_to_add.size() > to_add) {
00191         generated_algo_var = true;
00192         if (cols_to_add.size() > to_add) {
00193             if (cols_to_add.size() !=  vars_to_add.size()) {
00194                 throw BCP_fatal_error("\
00195 LP: uneven new_vars/new_cols sizes in BCP_price_vars().\n");
00196             }
00197         } else {
00198             // expand the generated vars
00199             BCP_vec<BCP_var*> new_vars(vars_to_add.begin() + to_add,
00200                                        vars_to_add.end());
00201             BCP_vec<BCP_col*> new_cols;
00202             p.user->vars_to_cols(p.node->cuts, new_vars, new_cols,
00203                                  lpres, BCP_Object_FromGenerator, false);
00204             cols_to_add.insert(cols_to_add.end(),
00205                                new_cols.begin(), new_cols.end());
00206         }
00207     }
00208 }
00209 
00210 //#############################################################################
00211 
00212 void
00213 BCP_restore_feasibility(BCP_lp_prob& p,
00214                         const std::vector<double*> dual_rays,
00215                         BCP_vec<BCP_var*>& vars_to_add,
00216                         BCP_vec<BCP_col*>& cols_to_add)
00217 {
00218     // Now try to restore feasibility with algo vars
00219     // now we want to pass only the uncut dual rays, so collect them
00220     const size_t to_add = vars_to_add.size();
00221     p.user->restore_feasibility(*p.lp_result, dual_rays,
00222                                 p.node->vars, p.node->cuts,
00223                                 vars_to_add, cols_to_add);
00224     if (vars_to_add.size() > to_add) {
00225         if (cols_to_add.size() > to_add) {
00226             if (cols_to_add.size() !=  vars_to_add.size()) {
00227                 throw BCP_fatal_error("\
00228 LP: uneven new_vars/new_cols sizes in BCP_restore_feasibility().\n");
00229             }
00230         } else {
00231             // expand the generated vars
00232             BCP_vec<BCP_var*> new_vars(vars_to_add.begin() + to_add,
00233                                        vars_to_add.end());
00234             BCP_vec<BCP_col*> new_cols;
00235             p.user->vars_to_cols(p.node->cuts, new_vars, new_cols,
00236                                  *p.lp_result, BCP_Object_FromGenerator,
00237                                  false);
00238             cols_to_add.insert(cols_to_add.end(),
00239                                new_cols.begin(), new_cols.end());
00240         }
00241     }
00242 }

Generated on Mon May 3 03:05:11 2010 by  doxygen 1.4.7