BonAmplTMINLP.cpp
Go to the documentation of this file.
1 // (C) Copyright International Business Machines Corporation and
2 // Carnegie Mellon University 2004-2011
3 //
4 // All Rights Reserved.
5 // This code is published under the Eclipse Public License.
6 //
7 // Authors :
8 // Carl D. Laird, Carnegie Mellon University,
9 // Andreas Waechter, International Business Machines Corporation
10 // Pierre Bonami, Carnegie Mellon University,
11 //
12 // Date : 12/01/2004
13 #include "IpBlas.hpp"
14 
15 #include <list>
16 
17 #include "AmplTNLP.hpp"
18 #include "BonAmplTMINLP.hpp"
19 #include <iostream>
20 #include <fstream>
21 
22 #include "CoinHelperFunctions.hpp"
23 #include "BonExitCodes.hpp"
24 
25 using namespace Ipopt;
26 
27 namespace Bonmin{
28  void
29  RegisteredOptions::fillAmplOptionList(ExtraCategoriesInfo which, Ipopt::AmplOptionsList * amplOptList){
30  std::list<int> test;
31  std::list< Ipopt::RegisteredOption * > options;
32  chooseOptions(which, options);
33  for(std::list< Ipopt::RegisteredOption * >::iterator i = options.begin();
34  i != options.end() ; i++)
35  {
36  std::string name = "bonmin.";
37  name += (*i)->Name();
38  Ipopt::RegisteredOptionType T = (*i)->Type();
39  Ipopt::AmplOptionsList::AmplOptionType type;
40  switch(T){
41  case Ipopt::OT_Number: type = Ipopt::AmplOptionsList::Number_Option;
42  break;
43  case Ipopt::OT_Integer: type = Ipopt::AmplOptionsList::Integer_Option;
44  break;
45  case Ipopt::OT_String: type = Ipopt::AmplOptionsList::String_Option;
46  break;
47  case Ipopt::OT_Unknown:
48  default:
49  throw CoinError("RegisteredOptions","fillAmplOptionList","Unknown option type");
50  }
51  amplOptList->AddAmplOption(name, name, type, (*i)->ShortDescription());
52  }
53 }
54 }
55 #include "asl.h"
56 #include "asl_pfgh.h"
57 #include "getstub.h"
58 
59 namespace ampl_utils
60 {
61  void sos_kludge(int nsos, int *sosbeg, double *sosref);
62 }
63 namespace Bonmin
64 {
65 
66  AmplTMINLP::AmplTMINLP()
67  :
68  TMINLP(),
69  appName_(),
70  upperBoundingObj_(-1),
71  ampl_tnlp_(NULL),
72  branch_(),
73  sos_(),
74  suffix_handler_(NULL),
75  constraintsConvexities_(NULL),
76  numberNonConvex_(0),
77  nonConvexConstraintsAndRelaxations_(NULL),
78  numberSimpleConcave_(0),
79  simpleConcaves_(NULL),
80  hasLinearObjective_(false)
81  {}
82 
83 
86  const SmartPtr<OptionsList> options,
87  char**& argv,
88  AmplSuffixHandler* suffix_handler /*=NULL*/,
89  const std::string& appName,
90  std::string* nl_file_content /* = NULL */
91  )
92  :
93  TMINLP(),
94  appName_(),
95  upperBoundingObj_(-1),
96  ampl_tnlp_(NULL),
97  branch_(),
98  sos_(),
99  suffix_handler_(NULL),
100  constraintsConvexities_(NULL),
101  numberNonConvex_(0),
102  nonConvexConstraintsAndRelaxations_(NULL),
103  numberSimpleConcave_(0),
104  simpleConcaves_(NULL),
105  hasLinearObjective_(false)
106  {
107  Initialize(jnlst, roptions, options, argv, suffix_handler, appName, nl_file_content);
108  }
109 
110  void
113  const SmartPtr<OptionsList> options,
114  char**& argv,
115  AmplSuffixHandler* suffix_handler /*=NULL*/,
116  const std::string& appName,
117  std::string* nl_file_content /* = NULL */
118  )
119  {
120  appName_ = appName;
121  options->GetEnumValue("file_solution",writeAmplSolFile_,"bonmin.");
122  jnlst_ = jnlst;
123 
124  if (suffix_handler==NULL)
125  suffix_handler_ = suffix_handler = new AmplSuffixHandler();
126 
127  // Add the suffix handler for scaling
128  suffix_handler->AddAvailableSuffix("scaling_factor", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
129  suffix_handler->AddAvailableSuffix("scaling_factor", AmplSuffixHandler::Constraint_Source, AmplSuffixHandler::Number_Type);
130  suffix_handler->AddAvailableSuffix("scaling_factor", AmplSuffixHandler::Objective_Source, AmplSuffixHandler::Number_Type);
131 
132  // Modified for warm-start from AMPL
133  suffix_handler->AddAvailableSuffix("ipopt_zL_out", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
134  suffix_handler->AddAvailableSuffix("ipopt_zU_out", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
135  suffix_handler->AddAvailableSuffix("ipopt_zL_in", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
136  suffix_handler->AddAvailableSuffix("ipopt_zU_in", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
137 
138  // priority suffix
139  suffix_handler->AddAvailableSuffix("priority", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Index_Type);
140  suffix_handler->AddAvailableSuffix("direction", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
141  suffix_handler->AddAvailableSuffix("downPseudocost", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
142  suffix_handler->AddAvailableSuffix("upPseudocost", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
143 
144 
145 
146  // sos suffixes
147  suffix_handler->AddAvailableSuffix("ref", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
148  suffix_handler->AddAvailableSuffix("sos",AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Index_Type);
149  suffix_handler->AddAvailableSuffix("sos",AmplSuffixHandler::Constraint_Source, AmplSuffixHandler::Index_Type);
150  suffix_handler->AddAvailableSuffix("sosno",AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
151  suffix_handler->AddAvailableSuffix("sosref",AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
152  suffix_handler->AddAvailableSuffix("sstatus", AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Index_Type);
153  suffix_handler->AddAvailableSuffix("sstatus", AmplSuffixHandler::Constraint_Source, AmplSuffixHandler::Index_Type);
154 
155 
156  // For marking convex/nonconvex constraints
157  suffix_handler->AddAvailableSuffix("non_conv",AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Index_Type);
158  suffix_handler->AddAvailableSuffix("primary_var",AmplSuffixHandler::Constraint_Source, AmplSuffixHandler::Index_Type);
159 
160  // For marking onoff constraints and indicator variables
161  suffix_handler->AddAvailableSuffix("onoff_c",AmplSuffixHandler::Constraint_Source, AmplSuffixHandler::Index_Type);
162  suffix_handler->AddAvailableSuffix("onoff_v",AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Index_Type);
163 
164  // For objectives
165  suffix_handler->AddAvailableSuffix("UBObj", AmplSuffixHandler::Objective_Source, AmplSuffixHandler::Index_Type);
166 
167 
168  // Perturbation radius
169  suffix_handler->AddAvailableSuffix("perturb_radius",AmplSuffixHandler::Variable_Source, AmplSuffixHandler::Number_Type);
170 
171  SmartPtr<AmplOptionsList> ampl_options_list = new AmplOptionsList();
172  roptions->fillAmplOptionList(RegisteredOptions::BonminCategory, GetRawPtr(ampl_options_list));
173  roptions->fillAmplOptionList(RegisteredOptions::FilterCategory, GetRawPtr(ampl_options_list));
174  roptions->fillAmplOptionList(RegisteredOptions::BqpdCategory, GetRawPtr(ampl_options_list));
175  fillApplicationOptions(GetRawPtr(ampl_options_list) );
176  std::string options_id = appName + "_options";
177  ampl_tnlp_ = new AmplTNLP(jnlst, options, argv, suffix_handler, true,
178  ampl_options_list, options_id.c_str(),
179  appName.c_str(), appName.c_str(), nl_file_content);
180 
181 
182  /* Read suffixes */
184  read_priorities();
186  read_onoff();
187  read_sos();
188 
189 
190  /* Determine if objective is linear.*/
191  Index n_non_linear_b= 0;
192  Index n_non_linear_bi= 0;
193  Index n_non_linear_c= 0;
194  Index n_non_linear_ci = 0;
195  Index n_non_linear_o= 0;
196  Index n_non_linear_oi = 0;
197  Index n_binaries = 0;
198  Index n_integers = 0;
199  ampl_tnlp_->get_discrete_info(n_non_linear_b, n_non_linear_bi, n_non_linear_c,
200  n_non_linear_ci, n_non_linear_o, n_non_linear_oi,
201  n_binaries, n_integers);
202  if (n_non_linear_b == 0 && n_non_linear_o == 0) {
203  hasLinearObjective_ = true;
204  }
205  }
206 
208  {
209  delete [] constraintsConvexities_;
210  delete [] simpleConcaves_;
212  delete ampl_tnlp_;
213  }
214 
215  void
217  {
218  int numcols, m, dummy1, dummy2;
219  TNLP::IndexStyleEnum index_style;
220  ampl_tnlp_->get_nlp_info(numcols, m, dummy1, dummy2, index_style);
221 
222  const AmplSuffixHandler * suffix_handler = GetRawPtr(suffix_handler_);
223 
224  const Index* pri = suffix_handler->GetIntegerSuffixValues("priority", AmplSuffixHandler::Variable_Source);
225  const Index* brac = suffix_handler->GetIntegerSuffixValues("direction", AmplSuffixHandler::Variable_Source);
226  const Number* upPs = suffix_handler->GetNumberSuffixValues("upPseudocost", AmplSuffixHandler::Variable_Source);
227  const Number* dwPs = suffix_handler->GetNumberSuffixValues("downPseudocost", AmplSuffixHandler::Variable_Source);
228 
229 
231  branch_.size = numcols;
232  if (pri) {
233  branch_.priorities = new int[numcols];
234  for (int i = 0 ; i < numcols ; i++) {
235  branch_.priorities [i] = -pri[i] + 9999;
236  }
237  }
238  if (brac) {
239  branch_.branchingDirections = CoinCopyOfArray(brac,numcols);
240  }
241  if (upPs && !dwPs) dwPs = upPs;
242  else if (dwPs && !upPs) upPs = dwPs;
243 
244  if (upPs) {
245  branch_.upPsCosts = CoinCopyOfArray(upPs,numcols);
246  }
247  if (dwPs) {
248  branch_.downPsCosts = CoinCopyOfArray(dwPs,numcols);
249  }
250 
251  const double* perturb_radius =
252  suffix_handler->GetNumberSuffixValues("perturb_radius", AmplSuffixHandler::Variable_Source);
253  perturb_info_.SetPerturbationArray(numcols, perturb_radius);
254  }
255 
256  void
258  {
259  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
260  int i = 0;//ASL_suf_sos_explict_free;
261  int copri[2], **p_sospri;
262  copri[0] = 0;
263  copri[1] = 0;
264  int * starts = NULL;
265  int * indices = NULL;
266  char * types = NULL;
267  double * weights = NULL;
268  int * priorities = NULL;
269  p_sospri = &priorities;
270 
272  int m = n_con;
273  sos_.num = suf_sos(i, &sos_.numNz, &types, p_sospri, copri,
274  &starts, &indices, &weights);
275  if(m != n_con){
276  throw CoinError("number of constraints changed by suf_sos. Not supported.",
277  "read_sos","Bonmin::AmplTMINLP");
278  }
279  if (sos_.num) {
280  //Copy sos information
281  sos_.priorities = CoinCopyOfArray(priorities,sos_.num);
282  sos_.starts = CoinCopyOfArray(starts, sos_.num + 1);
283  sos_.indices = CoinCopyOfArray(indices, sos_.numNz);
284  sos_.types = CoinCopyOfArray(types, sos_.num);
285  sos_.weights = CoinCopyOfArray(weights, sos_.numNz);
286 
288  for (int ii=0;ii<sos_.num;ii++) {
289  int ichar = sos_.types[ii] - '0';
290  if (ichar != 1 && ichar != 2) {
291  std::cerr<<"Unsuported type of sos constraint: "<<sos_.types[ii]<<std::endl;
292  throw;
293  }
294  sos_.types[ii]= static_cast<char>(ichar);
295  }
296  }
297  }
298 
299  void
301  {
302  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
303  DBG_ASSERT(asl);
304  //Get the values
305  if (n_obj < 2) return;
306 
307  const AmplSuffixHandler * suffix_handler = GetRawPtr(suffix_handler_);
308 
309  const Index* UBObj = suffix_handler->GetIntegerSuffixValues("UBObj", AmplSuffixHandler::Objective_Source);
310  if (UBObj) {
311  //get index of lower bounding objective
312  int lowerBoundingObj = (UBObj[0] == 1)? 1:0;
313  // Pass information to Ipopt
314  ampl_tnlp_->set_active_objective(lowerBoundingObj);
315 
316  //get the index of upper bounding objective
317  for (int i = 0; i < n_obj; i++) {
318  if (UBObj[i]==1) {
319  if (upperBoundingObj_ != -1) {
320  jnlst_->Printf(J_ERROR, J_MAIN,
321  "Too many objectives for upper-bounding");
322  }
323  upperBoundingObj_ = i;
324  }
325  }
326  }
327  else {
328  ampl_tnlp_->set_active_objective(0);
329  }
330  }
333  {
336  cIdx(-1),relIdx(-1),scXIdx(-1),scYIdx(-1)
337  {}
340  cIdx(other.cIdx), relIdx(other.relIdx),
341  scXIdx(other.scXIdx), scYIdx(other.scYIdx)
342  {}
344  int cIdx;
346  int relIdx;
348  int scXIdx;
350  int scYIdx;
351  };
353  {
354  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
355  DBG_ASSERT(asl);
356 
357  const AmplSuffixHandler * suffix_handler = GetRawPtr(suffix_handler_);
358  const Index * id = suffix_handler->GetIntegerSuffixValues("non_conv", AmplSuffixHandler::Variable_Source);
359  const Index * primary_var = suffix_handler->GetIntegerSuffixValues("primary_var", AmplSuffixHandler::Constraint_Source);
360 
361 
362  if (primary_var!= NULL) {
363  if (constraintsConvexities_ != NULL) {
364  delete [] constraintsConvexities_;
365  }
367  if (id == NULL) {
368  std::cerr<<"Incorrect suffixes description in ampl model. n_conv's are not declared "<<std::endl;
370  }
371  int numberSimpleConcave = 0;
372  std::map<int, int> id_map;
373 
374  for (int i = 0 ; i < n_var ; i++) {
375  id_map[id[i]] = i;
376  }
377 
378 
379  for (int i = 0 ; i < n_con ; i++) {
380  if (primary_var[i] != 0) {
382  numberSimpleConcave++;
383  }
385  }
386  simpleConcaves_ = new SimpleConcaveConstraint[numberSimpleConcave];
387  nonConvexConstraintsAndRelaxations_ = new MarkedNonConvex[numberSimpleConcave];
388  numberSimpleConcave = 0;
389  int * jCol = new int[n_var];
390  for (int i = 0 ; i < n_con ; i++) {
391  if (primary_var[i] != 0) {
392  nonConvexConstraintsAndRelaxations_[numberSimpleConcave].cIdx = i;
393  nonConvexConstraintsAndRelaxations_[numberSimpleConcave].cRelaxIdx = -1;
394  simpleConcaves_[numberSimpleConcave].cIdx = i;
395  simpleConcaves_[numberSimpleConcave].yIdx = id_map[primary_var[i]];
396 
397  //Now get gradient of i to get xIdx.
398  int nnz;
399  int & yIdx = simpleConcaves_[numberSimpleConcave].yIdx;
400  int & xIdx = simpleConcaves_[numberSimpleConcave].xIdx;
401  eval_grad_gi(n_var, NULL, false, i, nnz, jCol, NULL);
402  if (nnz != 2) {//Error in ampl model
403  std::cout<<"Incorrect suffixes description in ampl model. Constraint with id "
404  <<id<<" is simple concave and should have only two nonzero elements"<<std::endl;
406  }
407  if (jCol[0] - 1== yIdx) {
408  xIdx = jCol[1] - 1;
409  }
410  else {
411  if (jCol[1] - 1!= yIdx) {//Error in ampl model
412  std::cout<<"Incorrect suffixes description in ampl model. Constraint with id "
413  <<id<<" : variable marked as y does not appear in the constraint."<<std::endl;
415  }
416  xIdx = jCol[0] - 1;
417  }
418  numberSimpleConcave++;
419  }
420  }
421  delete [] jCol;
422  numberSimpleConcave_ = numberSimpleConcave;
423  numberNonConvex_ = numberSimpleConcave;
424  }
425 
426  }
427 
428 
430  {
431  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
432  DBG_ASSERT(asl);
433 
434  const AmplSuffixHandler * suffix_handler = GetRawPtr(suffix_handler_);
435  const Index * onoff_c = suffix_handler->GetIntegerSuffixValues("onoff_c", AmplSuffixHandler::Constraint_Source);
436  const Index * onoff_v = suffix_handler->GetIntegerSuffixValues("onoff_v", AmplSuffixHandler::Variable_Source);
437 
438  if(onoff_c == NULL && onoff_v == NULL){//No suffixes
439  return;
440  }
441  if(onoff_c == NULL || onoff_v == NULL){// If one in non-null both should be
442  std::cerr<<"Incorrect suffixes description in ampl model. One of per_v or per_c is declared but not the other."<<std::endl;
444  }
445 
446  c_extra_id_.clear();
447  c_extra_id_.resize(n_con, -1);
448  std::map<int, int> id_map;
449 
450  for (int i = 0 ; i < n_var ; i++) {
451  if(onoff_v[i] > 0)
452  id_map[onoff_v[i]] = i;
453  }
454 
455  for (int i = 0 ; i < n_con ; i++) {
456  if(onoff_c[i] > 0){
457  std::map<int, int >::iterator k = id_map.find(onoff_c[i]);
458  if(k != id_map.end()){
459  c_extra_id_[i] = (*k).second;
460  }
461  else{
462  std::cerr<<"Incorrect suffixes description in ampl model. onoff_c has value attributed to no variable "<<std::endl;
464  }
465  }
466  }
467  }
468 
469  bool AmplTMINLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
470  {
471  return ampl_tnlp_->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
472  }
473 
475  {
476  // The variables are sorted by type in AMPL, so all we need to
477  // know are the counts of each type.
478 
479 
480  Index n_non_linear_b= 0;
481  Index n_non_linear_bi= 0;
482  Index n_non_linear_c= 0;
483  Index n_non_linear_ci = 0;
484  Index n_non_linear_o= 0;
485  Index n_non_linear_oi = 0;
486  Index n_binaries = 0;
487  Index n_integers = 0;
488  ampl_tnlp_->get_discrete_info(n_non_linear_b, n_non_linear_bi, n_non_linear_c,
489  n_non_linear_ci, n_non_linear_o, n_non_linear_oi,
490  n_binaries, n_integers);
491  int totalNumberOfNonContinuous = 0;
492 
493 
494  //begin with variables non-linear both in the objective function and in some constraints
495  // The first ones are continuous
496  Index start = 0;
497  Index end = n_non_linear_b - n_non_linear_bi;
498  for (int i=start; i<end; i++) {
499  var_types[i] = CONTINUOUS;
500  }
501 
502  // The second ones are integers
503  start = end;
504  end = start + n_non_linear_bi;
505  for (int i=start; i < end; i++) {
506  var_types[i] = INTEGER;
507  totalNumberOfNonContinuous++;
508  }
509  //next variables non-linear in some constraints
510  // The first ones are continuous
511  start = end;
512  end = std::max(start,end + n_non_linear_c - n_non_linear_ci - n_non_linear_b);
513  for (int i=start; i<end; i++) {
514  var_types[i] = CONTINUOUS;
515  }
516 
517  // The second ones are integers
518  start = end;
519  end = start + n_non_linear_ci;
520  for (int i=start; i < end; i++) {
521  var_types[i] = INTEGER;
522  totalNumberOfNonContinuous++;
523  }
524 
525  //next variables non-linear in the objective function
526  // The first ones are continuous
527  start = end;
528  end = std::max(start,end + n_non_linear_o - std::max(n_non_linear_b, n_non_linear_c) - n_non_linear_oi);
529  for (int i=start; i<end; i++) {
530  var_types[i] = CONTINUOUS;
531  }
532 
533  // The second ones are integers
534  start = end;
535  end = start + n_non_linear_oi;
536  for (int i=start; i < end; i++) {
537  var_types[i] = INTEGER;
538  totalNumberOfNonContinuous++;
539  }
540 
541  //At last the linear variables
542  // The first ones are continuous
543  start = end;
544  end = n - n_binaries - n_integers;
545  for (int i=start; i<end; i++) {
546  var_types[i] = CONTINUOUS;
547  }
548 
549  // The second ones are binaries
550  start = end;
551  end = start + n_binaries;
552  for (int i=start; i < end; i++) {
553  var_types[i] = BINARY;
554  totalNumberOfNonContinuous++;
555  }
556 
557  // The third ones are integers
558  start = end;
559  end = start + n_integers;
560  for (int i=start; i < end; i++) {
561  var_types[i] = INTEGER;
562  totalNumberOfNonContinuous++;
563  }
564  return true;
565  }
566 
567  bool AmplTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types)
568  {
569  // The variables are sorted by type in AMPL, so all we need to
570  // know are the counts of each type.
571 
572 
573  Index n_non_linear_b= 0;
574  Index n_non_linear_bi= 0;
575  Index n_non_linear_c= 0;
576  Index n_non_linear_ci = 0;
577  Index n_non_linear_o= 0;
578  Index n_non_linear_oi = 0;
579  Index n_binaries = 0;
580  Index n_integers = 0;
581  ampl_tnlp_->get_discrete_info(n_non_linear_b, n_non_linear_bi, n_non_linear_c,
582  n_non_linear_ci, n_non_linear_o, n_non_linear_oi,
583  n_binaries, n_integers);
584 
585  //Compute the number of non linear variables:
586  int n_non_linear = std::max(n_non_linear_c, n_non_linear_o);//n_non_linear_c + n_non_linear_o - n_non_linear_b;
587 
588  //printf("n_non_linear_c %i n_non_linear_o %i n_non_linear_b %i\n", n_non_linear_c, n_non_linear_o, n_non_linear_b);
589 
590  int start = 0;
591  int end = n_non_linear;
592  for (int i=start; i<end; i++) {
593  var_types[i] = Ipopt::TNLP::NON_LINEAR;
594  }
595 
596  //At last the linear variables
597  // The first ones are continuous
598  start = end;
599  end = n;
600  for (int i=start; i<end; i++) {
601  var_types[i] = Ipopt::TNLP::LINEAR;
602  }
603  return true;
604  }
605 
608  bool
610  Ipopt::TNLP::LinearityType* const_types)
611  {
612  return ampl_tnlp_->get_constraints_linearity(m, const_types);
613  }
614 
615  bool AmplTMINLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
616  Index m, Number* g_l, Number* g_u)
617  {
618  return ampl_tnlp_->get_bounds_info(n, x_l, x_u, m, g_l, g_u);
619  }
620 
621  bool AmplTMINLP::get_starting_point(Index n, bool init_x, Number* x,
622  bool init_z, Number* z_L, Number* z_U,
623  Index m, bool init_lambda, Number* lambda)
624  {
625  return ampl_tnlp_->get_starting_point(n, init_x, x,
626  init_z, z_L, z_U,
627  m, init_lambda, lambda);
628  }
629 
630  bool AmplTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
631  {
632  return ampl_tnlp_->eval_f(n, x, new_x, obj_value);
633  }
634 
635  bool AmplTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
636  {
637  return ampl_tnlp_->eval_grad_f(n, x, new_x, grad_f);
638  }
639 
640  bool AmplTMINLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
641  {
642  return ampl_tnlp_->eval_g(n, x, new_x, m, g);
643  }
644 
645  bool AmplTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,
646  Index m, Index nele_jac, Index* iRow,
647  Index *jCol, Number* values)
648  {
649  return ampl_tnlp_->eval_jac_g(n, x, new_x,
650  m, nele_jac, iRow, jCol,
651  values);
652  }
653 
654  bool AmplTMINLP::eval_h(Index n, const Number* x, bool new_x,
655  Number obj_factor, Index m, const Number* lambda,
656  bool new_lambda, Index nele_hess, Index* iRow,
657  Index* jCol, Number* values)
658  {
659  return ampl_tnlp_->eval_h(n, x, new_x, obj_factor,
660  m, lambda, new_lambda, nele_hess, iRow,
661  jCol, values);
662  }
663 
664  bool AmplTMINLP::eval_gi(Index n, const Number* x, bool new_x,
665  Index i, Number& gi)
666  {
667  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
668 
669  // ignore new_x for now
670  xunknown();
671 
672  fint nerror = 0;
673  gi = conival(i, const_cast<real*>(x), &nerror);
674  if (nerror!=0) {
675  return false;
676  }
677  else {
678  return true;
679  }
680  }
681 
682  bool AmplTMINLP::eval_grad_gi(Index n, const Number* x, bool new_x,
683  Index i, Index& nele_grad_gi, Index* jCol,
684  Number* values)
685  {
686  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
687 
688  if (jCol) {
689  // Only compute the number of nonzeros and the indices
690  DBG_ASSERT(!values);
691  nele_grad_gi = 0;
692  for (cgrad* cg=Cgrad[i]; cg; cg = cg->next) {
693  jCol[nele_grad_gi++] = cg->varno + 1;
694  }
695  return true;
696  }
697  DBG_ASSERT(values);
698 
699  // ignore new_x for now
700  xunknown();
701 
702  asl->i.congrd_mode = 1;
703  fint nerror = 0;
704  congrd(i, const_cast<real*>(x), values, &nerror);
705  if (nerror!=0) {
706  return false;
707  }
708  else {
709  return true;
710  }
711  }
712 
714  Index n, const Number* x, Number obj_value)
715  {
716  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
717  std::string message;
718  std::string status_str;
719  if (status == TMINLP::SUCCESS) {
720  status_str = "\t\"Finished\"";
721  message = "\n" + appName_ +": Optimal";
722  solve_result_num = 3;
723  }
724  else if (status == TMINLP::INFEASIBLE) {
725  status_str = "\t\"Finished\"";
726  message = "\n" + appName_ + ": Infeasible problem";
727  solve_result_num = 220;
728  }
729  else if (status == TMINLP::CONTINUOUS_UNBOUNDED) {
730  status_str = "\t\"Finished\"";
731  message = "\n" + appName_ +" Continuous relaxation is unbounded.";
732  solve_result_num = 300;
733  }
734  else if (status == TMINLP::LIMIT_EXCEEDED) {
735  status_str = "\t\"Not finished\"";
736  message = "\n" + appName_ + ": Optimization interrupted on limit.";
737  if(x)
738  solve_result_num = 421; /* Limit reached or user interrupt with integer feasible solution.*/
739  else
740  solve_result_num = 410; /* Limit reached or user interrupt without solution.*/
741  }
742  else if (status == TMINLP::USER_INTERRUPT) {
743  status_str = "\t\"Not finished\"";
744  message = "\n" + appName_ + ": Optimization interrupted by user.";
745  if(x)
746  solve_result_num = 422; /* Limit reached or user interrupt with integer feasible solution.*/
747  else
748  solve_result_num = 411; /* Limit reached or user interrupt without solution.*/
749  }
750  else if (status == TMINLP::MINLP_ERROR) {
751  status_str = "\t\"Aborted\"";
752  message = "\n" + appName_ + ": Error encountered in optimization.";
753  solve_result_num = 500;
754  }
755  if (writeAmplSolFile_) {
756  write_solution(message, x);
757  std::cout<<"\n "<<status_str<<std::endl;
758  }
759  else {
760  std::cout<<status_str<<message<<std::endl;
761  std::string fName = appName_ + ".sol";
762  std::ofstream of(fName.c_str());
763  for (int i = 0 ; i < n ; i++) {
764  of<<i<<"\t"<<x[i]<<std::endl;
765  }
766  of<<"-1\n";
767  }
768  }
769 
770  void AmplTMINLP::write_solution(const std::string & message, const Number *x_sol)
771  {
772  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
773 
774  DBG_ASSERT(asl);
775  // DBG_ASSERT(x_sol);
776 
777  // We need to copy the message into a non-const char array to make
778  // it work with the AMPL C function.
779  char* cmessage = new char[message.length()+1];
780  strcpy(cmessage, message.c_str());
781 
782  // In order to avoid casting into non-const, we copy the data here...
783  double* x_sol_copy = NULL;
784  if (x_sol) {
785  x_sol_copy = new double[n_var];
786  for (int i=0; i<n_var; i++) {
787  x_sol_copy[i] = x_sol[i];
788  }
789  }
790  write_sol(cmessage, x_sol_copy, NULL, NULL);
791 
792  delete [] x_sol_copy;
793  delete [] cmessage;
794 
795  }
796 
797 
800  {
801  Index n, m, nnz_jac_g, nnz_h_lag;
802  TNLP::IndexStyleEnum index_style = TNLP::FORTRAN_STYLE;
803  get_nlp_info( n, m, nnz_jac_g, nnz_h_lag, index_style);
804  eval_grad_f(n, NULL, 0,obj);
805  Index n_non_linear_b= 0;
806  Index n_non_linear_bi= 0;
807  Index n_non_linear_c= 0;
808  Index n_non_linear_ci = 0;
809  Index n_non_linear_o= 0;
810  Index n_non_linear_oi = 0;
811  Index n_binaries = 0;
812  Index n_integers = 0;
813  ampl_tnlp_->get_discrete_info(n_non_linear_b, n_non_linear_bi, n_non_linear_c,
814  n_non_linear_ci, n_non_linear_o, n_non_linear_oi,
815  n_binaries, n_integers);
816 
817  // Now get the coefficients of variables wich are linear in the objective
818  // The first ones are not
819  Index start = 0;
820  Index end = n_non_linear_b;
821  for (int i=start; i<end; i++) {
822  obj[i] = 0.;
823  }
824 
825  //next variables should be linear in the objective just skip them
826  // (from current end to (end + n_non_linear_c - n_non_linear_ci - n_non_linear_b;)
827 
828 
829  // Those are nonlinear in the objective
830  start = end + n_non_linear_c;
831  end = start + n_non_linear_o;
832  for (int i=start; i < end; i++) {
833  obj[i]=0.;
834  }
835  //The rest is linear keep the values of the gradient
836  }
837 
838 
839 
842  bool
843  AmplTMINLP::eval_upper_bound_f(Index n, const Number* x,
844  Number& obj_value)
845  {
846  ASL_pfgh* asl = ampl_tnlp_->AmplSolverObject();
847  //xknown(x); // This tells ampl to use a new x
848  fint nerror = -1;
849  obj_value = objval(upperBoundingObj_, const_cast<double *>(x), &nerror);
850  if (nerror > 0) {
851  jnlst_->Printf(J_ERROR, J_MAIN,
852  "Error in evaluating upper bounding objecting");
853  throw -1;
854  }
855  return nerror;
856 
857  }
859  const ASL_pfgh*
861  {
862  return ampl_tnlp_->AmplSolverObject();
863  }
864 
865 } // namespace Bonmin
virtual void Initialize(const Ipopt::SmartPtr< const Ipopt::Journalist > &jnlst, const Ipopt::SmartPtr< Bonmin::RegisteredOptions > roptions, const Ipopt::SmartPtr< Ipopt::OptionsList > options, char **&argv, Ipopt::AmplSuffixHandler *suffix_handler=NULL, const std::string &appName="bonmin", std::string *nl_file_content=NULL)
Constructor.
double * values
Base class for all MINLPs that use a standard triplet matrix form and dense vectors.
Definition: BonTMINLP.hpp:59
int cIdx
Index of the nonconvex constraint.
std::string appName_
Name of application.
int num
Number of SOS constraints.
Definition: BonTMINLP.hpp:75
Structure for marked non-convex constraints.
Definition: BonTMINLP.hpp:355
virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
specifies the jacobian structure (if values is NULL) and evaluates the jacobian values (if values is ...
const ASL_pfgh * AmplSolverObject() const
Return the ampl solver object (ASL*)
int scXIdx
Index of variable x in a simple concave constraint of type y &gt;= F(x).
virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
returns bounds of the nlp.
void read_sos()
read the sos constraints from ampl suffixes
void read_convexities()
Read suffixes which indicate which constraints are convex.
void read_onoff()
Read suffixes used to apply perspective in OA to some of the constraints.
virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
evaluates the gradient of the objective for the nlp.
virtual void finalize_solution(TMINLP::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, Ipopt::Number obj_value)
Called after optimizing to return results to ampl.
Ipopt::AmplTNLP * ampl_tnlp_
pointer to the internal AmplTNLP
SosInfo sos_
Storage of sos constraints.
Bonmin::BqpdSolver::fint fint
double * downPsCosts
User set down pseudo costs.
Definition: BonTMINLP.hpp:122
virtual bool eval_upper_bound_f(Ipopt::Index n, const Ipopt::Number *x, Ipopt::Number &obj_value)
This method to returns the value of an alternative objective function for upper bounding (if one has ...
void sos_kludge(int nsos, int *sosbeg, double *sosref)
Definition: sos_kludge.cpp:29
PerturbInfo perturb_info_
Storage for perturbation radii.
void write_solution(const std::string &message, const Ipopt::Number *x_sol)
Write the solution using ampl&#39;s write_sol (called by finalize_solution).
NonConvexSuff()
Default constructor.
BranchingInfo branch_
Storage of branching priorities information.
int numberNonConvex_
Ipopt::Number of nonConvex constraints.
int * branchingDirections
User set preferered branching direction.
Definition: BonTMINLP.hpp:118
virtual void fillApplicationOptions(Ipopt::AmplOptionsList *amplOptList)
Additional application specific options.
virtual void getLinearPartOfObjective(double *obj)
This methods gives the linear part of the objective function.
Constraint is convex.
Definition: BonTMINLP.hpp:349
int * starts
For 0 &lt;= i &lt; nums, start[i] gives the indice of indices and weights arrays at which the description o...
Definition: BonTMINLP.hpp:86
Ipopt::SmartPtr< const Ipopt::Journalist > jnlst_
Journalist.
virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Number &gi)
compute the value of a single constraint
int * priorities
User set priorities on variables.
Definition: BonTMINLP.hpp:116
Structure which describes a constraints of the form $f[ y F(x) ] with a concave function...
Definition: BonTMINLP.hpp:366
bool hasLinearObjective_
Flag to indicate if objective function is linear.
SolverReturn
Return statuses of algorithm.
Definition: BonTMINLP.hpp:64
fint end
int numNz
Total number of non zeroes in SOS constraints.
Definition: BonTMINLP.hpp:84
virtual bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
specifies the structure of the hessian of the lagrangian (if values is NULL) and evaluates the values...
virtual bool get_constraints_linearity(Ipopt::Index m, Ipopt::TNLP::LinearityType *const_types)
Returns the constraint linearity.
void fint fint * k
Constraint is concave of the simple form y &gt;= F(x).
Definition: BonTMINLP.hpp:351
virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
evaluates the constraint residuals for the nlp.
virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
provides a starting point for the nlp variables.
int * priorities
priorities of sos constraints.
Definition: BonTMINLP.hpp:79
MarkedNonConvex * nonConvexConstraintsAndRelaxations_
Store marked non-convex constraints and their relaxations.
vector< int > c_extra_id_
Store onoff information.
Convexity
Used to mark constraints of the problem.
Definition: BonTMINLP.hpp:348
char * types
Type of sos.
Definition: BonTMINLP.hpp:77
int xIdx
Index of the variable x.
Definition: BonTMINLP.hpp:371
To store all data stored in the nonconvex suffixes.
U * GetRawPtr(const OSSmartPtr< U > &smart_ptr)
Definition: OSSmartPtr.hpp:452
NonConvexSuff(const NonConvexSuff &other)
Copy constructor.
virtual bool get_variables_types(Ipopt::Index n, VariableType *var_types)
returns the vector of variable types
TMINLP::Convexity * constraintsConvexities_
Store constraints types.
virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index i, Ipopt::Index &nele_grad_gi, Ipopt::Index *jCol, Ipopt::Number *values)
compute the structure or values of the gradient for one constraint
int writeAmplSolFile_
Flag to say if AMPL solution file should be written.
AmplTMINLP()
Default constructor.
int numberSimpleConcave_
Ipopt::Number of simpleConcave constraints.
int cIdx
Index of the constraint.
Definition: BonTMINLP.hpp:375
SimpleConcaveConstraint * simpleConcaves_
Store simple concave constraints descriptions.
int * indices
indices of elements belonging to the SOS.
Definition: BonTMINLP.hpp:88
int cIdx
Index of the nonconvex constraint.
Definition: BonTMINLP.hpp:360
void SetPerturbationArray(Ipopt::Index numvars, const double *perturb_radius)
Method for setting the perturbation radii.
Definition: BonTMINLP.cpp:89
virtual bool get_variables_linearity(Ipopt::Index n, Ipopt::TNLP::LinearityType *var_types)
return the variables linearity (linear or not)
void read_obj_suffixes()
Read suffixes on objective functions for upper bounding.
void fint fint fint real fint real real real real real real * g
int size
number of variables
Definition: BonTMINLP.hpp:114
int upperBoundingObj_
Index of the objective to use for upper bounding.
virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
evaluates the objective value for the nlp.
void read_priorities()
read the branching priorities from ampl suffixes.
void fint * m
int scYIdx
Index of variable y in a simple concave constraint of type y &gt;= F(x).
int nnz
ATTENTION: Filter expect the jacobian to be ordered by row.
int cRelaxIdx
Index of constraint relaxing the nonconvex constraint.
Definition: BonTMINLP.hpp:362
Ipopt::SmartPtr< Ipopt::AmplSuffixHandler > suffix_handler_
Store a suffix handler.
int relIdx
Index of its relaxation.
void fint * n
virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, Ipopt::TNLP::IndexStyleEnum &index_style)
returns dimensions of the nlp.
VariableType
Type of the variables.
Definition: BonTMINLP.hpp:192
void gutsOfDestructor()
Reset information.
Definition: BonTMINLP.cpp:72
virtual ~AmplTMINLP()
destructor
double * upPsCosts
User set up pseudo costs.
Definition: BonTMINLP.hpp:120
int yIdx
Index of the variable y.
Definition: BonTMINLP.hpp:373
double * weights
weights of the elements of the SOS.
Definition: BonTMINLP.hpp:90
void fint fint fint real fint real * x