CppAD: A C++ Algorithmic Differentiation Package  20171217
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
solve_callback.hpp
Go to the documentation of this file.
1 // $Id: solve_callback.hpp 3804 2016-03-20 15:08:46Z bradbell $
2 # ifndef CPPAD_IPOPT_SOLVE_CALLBACK_HPP
3 # define CPPAD_IPOPT_SOLVE_CALLBACK_HPP
4 /* --------------------------------------------------------------------------
5 CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-16 Bradley M. Bell
6 
7 CppAD is distributed under multiple licenses. This distribution is under
8 the terms of the
9  Eclipse Public License Version 1.0.
10 
11 A copy of this license is included in the COPYING file of this distribution.
12 Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
13 -------------------------------------------------------------------------- */
14 
15 # include <cppad/cppad.hpp>
16 # include <coin/IpIpoptApplication.hpp>
17 # include <coin/IpTNLP.hpp>
19 
20 namespace CppAD { // BEGIN_CPPAD_NAMESPACE
21 namespace ipopt {
22 /*!
23 \file solve_callback.hpp
24 \brief Class that connects ipopt::solve to Ipopt
25 */
26 
27 /*!
28 Class that Ipopt uses for obtaining information about this problem.
29 
30 
31 \section Evaluation_Methods Evaluation Methods
32 The set of evaluation methods for this class is
33 \verbatim
34  { eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h }
35 \endverbatim
36 Note that the bool return flag for the evaluations methods
37 does not appear in the Ipopt documentation.
38 Looking at the code, it seems to be a flag telling Ipopt to abort
39 when the flag is false.
40 */
41 template <class Dvector, class ADvector, class FG_eval>
42 class solve_callback : public Ipopt::TNLP
43 {
44 private:
45  // ------------------------------------------------------------------
46  // Types used by this class
47  // ------------------------------------------------------------------
48  /// A Scalar value used by Ipopt
49  typedef Ipopt::Number Number;
50  /// An index value used by Ipopt
51  typedef Ipopt::Index Index;
52  /// Indexing style used in Ipopt sparsity structure
53  typedef Ipopt::TNLP::IndexStyleEnum IndexStyleEnum;
54  // ------------------------------------------------------------------
55  // Values directly passed in to constuctor
56  // ------------------------------------------------------------------
57  /// dimension of the range space for f(x).
58  /// The objective is sum_i f_i (x).
59  /// Note that, at this point, there is no advantage having nf_ > 1.
60  const size_t nf_;
61  /// dimension of the domain space for f(x) and g(x)
62  const size_t nx_;
63  /// dimension of the range space for g(x)
64  const size_t ng_;
65  /// initial value for x
66  const Dvector& xi_;
67  /// lower limit for x
68  const Dvector& xl_;
69  /// upper limit for x
70  const Dvector& xu_;
71  /// lower limit for g(x)
72  const Dvector& gl_;
73  /// upper limit for g(x)
74  const Dvector& gu_;
75  /// object that evaluates f(x) and g(x)
76  FG_eval& fg_eval_;
77  /// should operation sequence be retaped for each new x.
78  bool retape_;
79  /// Should sparse methods be used to compute Jacobians and Hessians
80  /// with forward mode used for Jacobian.
82  /// Should sparse methods be used to compute Jacobians and Hessians
83  /// with reverse mode used for Jacobian.
85  /// final results are returned to this structure
87  // ------------------------------------------------------------------
88  // Values that are initilaized by the constructor
89  // ------------------------------------------------------------------
90  /// AD function object that evaluates x -> [ f(x) , g(x) ]
91  /// If retape is false, this object is initialzed by constructor
92  /// otherwise it is set by cache_new_x each time it is called.
94  /// value of x corresponding to previous new_x
95  Dvector x0_;
96  /// value of fg corresponding to previous new_x
97  Dvector fg0_;
98  // ----------------------------------------------------------------------
99  // Jacobian information
100  // ----------------------------------------------------------------------
101  /// Sparsity pattern for Jacobian of [f(x), g(x) ].
102  /// If sparse is true, this pattern set by constructor and does not change.
103  /// Otherwise this vector has size zero.
105  /// Row indices of [f(x), g(x)] for Jacobian of g(x) in row order.
106  /// (Set by constructor and not changed.)
108  /// Column indices for Jacobian of g(x), same order as row_jac_.
109  /// (Set by constructor and not changed.)
111  /// col_order_jac_ sorts row_jac_ and col_jac_ in column order.
112  /// (Set by constructor and not changed.)
114  /// Work vector used by SparseJacobian, stored here to avoid recalculation.
116  // ----------------------------------------------------------------------
117  // Hessian information
118  // ----------------------------------------------------------------------
119  /// Sparsity pattern for Hessian of Lagragian
120  /// \f[ L(x) = \sigma \sum_i f_i (x) + \sum_i \lambda_i g_i (x) \f]
121  /// If sparse is true, this pattern set by constructor and does not change.
122  /// Otherwise this vector has size zero.
124  /// Row indices of Hessian lower left triangle in row order.
125  /// (Set by constructor and not changed.)
127  /// Column indices of Hessian left triangle in same order as row_hes_.
128  /// (Set by constructor and not changed.)
130  /// Work vector used by SparseJacobian, stored here to avoid recalculation.
132  // ------------------------------------------------------------------
133  // Private member functions
134  // ------------------------------------------------------------------
135  /*!
136  Cache information for a new value of x.
137 
138  \param x
139  is the new value for x.
140 
141  \par x0_
142  the elements of this vector are set to the new value for x.
143 
144  \par fg0_
145  the elements of this vector are set to the new value for [f(x), g(x)]
146 
147  \par adfun_
148  If retape is true, the operation sequence for this function
149  is changes to correspond to the argument x.
150  If retape is false, the operation sequence is not changed.
151  The zero order Taylor coefficients for this function are set
152  so they correspond to the argument x.
153  */
154  void cache_new_x(const Number* x)
155  { size_t i;
156  if( retape_ )
157  { // make adfun_, as well as x0_ and fg0_ correspond to this x
158  ADvector a_x(nx_), a_fg(nf_ + ng_);
159  for(i = 0; i < nx_; i++)
160  { x0_[i] = x[i];
161  a_x[i] = x[i];
162  }
163  CppAD::Independent(a_x);
164  fg_eval_(a_fg, a_x);
165  adfun_.Dependent(a_x, a_fg);
166  }
167  else
168  { // make x0_ and fg0_ correspond to this x
169  for(i = 0; i < nx_; i++)
170  x0_[i] = x[i];
171  }
172  fg0_ = adfun_.Forward(0, x0_);
173  }
174 public:
175  // ----------------------------------------------------------------------
176  /*!
177  Constructor for the interface between ipopt::solve and Ipopt
178 
179  \param nf
180  dimension of the range space for f(x)
181 
182  \param nx
183  dimension of the domain space for f(x) and g(x).
184 
185  \param ng
186  dimension of the range space for g(x)
187 
188  \param xi
189  initial value of x during the optimization procedure (size nx).
190 
191  \param xl
192  lower limit for x (size nx).
193 
194  \param xu
195  upper limit for x (size nx).
196 
197  \param gl
198  lower limit for g(x) (size ng).
199 
200  \param gu
201  upper limit for g(x) (size ng).
202 
203  \param fg_eval
204  function object that evaluations f(x) and g(x) using fg_eval(fg, x)
205 
206  \param retape
207  should the operation sequence be retaped for each argument value.
208 
209  \param sparse_forward
210  should sparse matrix computations be used for Jacobians and Hessians
211  with forward mode for Jacobian.
212 
213  \param sparse_reverse
214  should sparse matrix computations be used for Jacobians and Hessians
215  with reverse mode for Jacobian.
216  (sparse_forward and sparse_reverse cannot both be true).
217 
218  \param solution
219  object where final results are stored.
220  */
222  size_t nf ,
223  size_t nx ,
224  size_t ng ,
225  const Dvector& xi ,
226  const Dvector& xl ,
227  const Dvector& xu ,
228  const Dvector& gl ,
229  const Dvector& gu ,
230  FG_eval& fg_eval ,
231  bool retape ,
232  bool sparse_forward ,
233  bool sparse_reverse ,
234  solve_result<Dvector>& solution ) :
235  nf_ ( nf ),
236  nx_ ( nx ),
237  ng_ ( ng ),
238  xi_ ( xi ),
239  xl_ ( xl ),
240  xu_ ( xu ),
241  gl_ ( gl ),
242  gu_ ( gu ),
243  fg_eval_ ( fg_eval ),
244  retape_ ( retape ),
245  sparse_forward_ ( sparse_forward ),
246  sparse_reverse_ ( sparse_reverse ),
247  solution_ ( solution )
249 
250  size_t i, j;
251  size_t nfg = nf_ + ng_;
252 
253  // initialize x0_ and fg0_ wih proper dimensions and value nan
254  x0_.resize(nx);
255  fg0_.resize(nfg);
256  for(i = 0; i < nx_; i++)
257  x0_[i] = CppAD::nan(0.0);
258  for(i = 0; i < nfg; i++)
259  fg0_[i] = CppAD::nan(0.0);
260 
261  if( ! retape_ )
262  { // make adfun_ correspond to x -> [ f(x), g(x) ]
263  ADvector a_x(nx_), a_fg(nfg);
264  for(i = 0; i < nx_; i++)
265  a_x[i] = xi_[i];
266  CppAD::Independent(a_x);
267  fg_eval_(a_fg, a_x);
268  adfun_.Dependent(a_x, a_fg);
269  // optimize because we will make repeated use of this tape
270  adfun_.optimize();
271  }
272  if( sparse_forward_ | sparse_reverse_ )
273  { CPPAD_ASSERT_UNKNOWN( ! retape );
274  size_t m = nf_ + ng_;
275  //
276  // -----------------------------------------------------------
277  // Jacobian
278  pattern_jac_.resize( m * nx_ );
279  if( nx_ <= m )
280  { // use forward mode to compute sparsity
281 
282  // number of bits that are packed into one unit in vectorBool
283  size_t n_column = vectorBool::bit_per_unit();
284 
285  // sparsity patterns for current columns
286  vectorBool r(nx_ * n_column), s(m * n_column);
287 
288  // compute the sparsity pattern n_column columns at a time
289  size_t n_loop = (nx_ - 1) / n_column + 1;
290  for(size_t i_loop = 0; i_loop < n_loop; i_loop++)
291  { // starting column index for this iteration
292  size_t i_column = i_loop * n_column;
293 
294  // pattern that picks out the appropriate columns
295  for(i = 0; i < nx_; i++)
296  { for(j = 0; j < n_column; j++)
297  r[i * n_column + j] = (i == i_column + j);
298  }
299  s = adfun_.ForSparseJac(n_column, r);
300 
301  // fill in the corresponding columns of total_sparsity
302  for(i = 0; i < m; i++)
303  { for(j = 0; j < n_column; j++)
304  { if( i_column + j < nx_ )
305  pattern_jac_[i * nx_ + i_column + j] =
306  s[i * n_column + j];
307  }
308  }
309  }
310  }
311  else
312  { // use reverse mode to compute sparsity
313 
314  // number of bits that are packed into one unit in vectorBool
315  size_t n_row = vectorBool::bit_per_unit();
316 
317  // sparsity patterns for current rows
318  vectorBool r(n_row * m), s(n_row * nx_);
319 
320  // compute the sparsity pattern n_row row at a time
321  size_t n_loop = (m - 1) / n_row + 1;
322  for(size_t i_loop = 0; i_loop < n_loop; i_loop++)
323  { // starting row index for this iteration
324  size_t i_row = i_loop * n_row;
325 
326  // pattern that picks out the appropriate rows
327  for(i = 0; i < n_row; i++)
328  { for(j = 0; j < m; j++)
329  r[i * m + j] = (i_row + i == j);
330  }
331  s = adfun_.RevSparseJac(n_row, r);
332 
333  // fill in correspoding rows of total sparsity
334  for(i = 0; i < n_row; i++)
335  { for(j = 0; j < nx_; j++)
336  if( i_row + i < m )
337  pattern_jac_[ (i_row + i) * nx_ + j ] =
338  s[ i * nx_ + j];
339  }
340  }
341  }
342  /*
343  { // use reverse mode to compute sparsity
344  CppAD::vectorBool s(m * m);
345  for(i = 0; i < m; i++)
346  { for(j = 0; j < m; j++)
347  s[i * m + j] = (i == j);
348  }
349  pattern_jac_ = adfun_.RevSparseJac(m, s);
350  }
351  */
352  // Set row and column indices in Jacoian of [f(x), g(x)]
353  // for Jacobian of g(x). These indices are in row major order.
354  for(i = nf_; i < nfg; i++)
355  { for(j = 0; j < nx_; j++)
356  { if( pattern_jac_[ i * nx_ + j ] )
357  { row_jac_.push_back(i);
358  col_jac_.push_back(j);
359  }
360  }
361  }
362  // Set row and column indices in Jacoian of [f(x), g(x)]
363  // for Jacobian of g(x). These indices are in row major order.
364  // -----------------------------------------------------------
365  // Hessian
366  pattern_hes_.resize(nx_ * nx_);
367 
368  // number of bits that are packed into one unit in vectorBool
369  size_t n_column = vectorBool::bit_per_unit();
370 
371  // sparsity patterns for current columns
372  vectorBool r(nx_ * n_column), h(nx_ * n_column);
373 
374  // sparsity pattern for range space of function
375  vectorBool s(m);
376  for(i = 0; i < m; i++)
377  s[i] = true;
378 
379  // compute the sparsity pattern n_column columns at a time
380  size_t n_loop = (nx_ - 1) / n_column + 1;
381  for(size_t i_loop = 0; i_loop < n_loop; i_loop++)
382  { // starting column index for this iteration
383  size_t i_column = i_loop * n_column;
384 
385  // pattern that picks out the appropriate columns
386  for(i = 0; i < nx_; i++)
387  { for(j = 0; j < n_column; j++)
388  r[i * n_column + j] = (i == i_column + j);
389  }
390  adfun_.ForSparseJac(n_column, r);
391 
392  // sparsity pattern corresponding to paritls w.r.t. (theta, u)
393  // of partial w.r.t. the selected columns
394  bool transpose = true;
395  h = adfun_.RevSparseHes(n_column, s, transpose);
396 
397  // fill in the corresponding columns of total_sparsity
398  for(i = 0; i < nx_; i++)
399  { for(j = 0; j < n_column; j++)
400  { if( i_column + j < nx_ )
401  pattern_hes_[i * nx_ + i_column + j] =
402  h[i * n_column + j];
403  }
404  }
405  }
406  // Set row and column indices for Lower triangle of Hessian
407  // of Lagragian. These indices are in row major order.
408  for(i = 0; i < nx_; i++)
409  { for(j = 0; j < nx_; j++)
410  { if( pattern_hes_[ i * nx_ + j ] )
411  if( j <= i )
412  { row_hes_.push_back(i);
413  col_hes_.push_back(j);
414  }
415  }
416  }
417  }
418  else
419  { // Set row and column indices in Jacoian of [f(x), g(x)]
420  // for Jacobian of g(x). These indices are in row major order.
421  for(i = nf_; i < nfg; i++)
422  { for(j = 0; j < nx_; j++)
423  { row_jac_.push_back(i);
424  col_jac_.push_back(j);
425  }
426  }
427  // Set row and column indices for lower triangle of Hessian.
428  // These indices are in row major order.
429  for(i = 0; i < nx_; i++)
430  { for(j = 0; j <= i; j++)
431  { row_hes_.push_back(i);
432  col_hes_.push_back(j);
433  }
434  }
435  }
436 
437  // Column order indirect sort of the Jacobian indices
438  col_order_jac_.resize( col_jac_.size() );
440  }
441  // -----------------------------------------------------------------------
442  /*!
443  Return dimension information about optimization problem.
444 
445  \param[out] n
446  is set to the value nx_.
447 
448  \param[out] m
449  is set to the value ng_.
450 
451  \param[out] nnz_jac_g
452  is set to ng_ * nx_ (sparsity not yet implemented)
453 
454  \param[out] nnz_h_lag
455  is set to nx_*(nx_+1)/2 (sparsity not yet implemented)
456 
457  \param[out] index_style
458  is set to C_STYLE; i.e., zeoro based indexing is used in the
459  information passed to Ipopt.
460  */
461  virtual bool get_nlp_info(
462  Index& n ,
463  Index& m ,
464  Index& nnz_jac_g ,
465  Index& nnz_h_lag ,
466  IndexStyleEnum& index_style )
467  {
468  n = static_cast<Index>(nx_);
469  m = static_cast<Index>(ng_);
470  nnz_jac_g = static_cast<Index>(row_jac_.size());
471  nnz_h_lag = static_cast<Index>(row_hes_.size());
472 
473 # ifndef NDEBUG
474  if( ! (sparse_forward_ | sparse_reverse_) )
475  { size_t nnz = static_cast<size_t>(nnz_jac_g);
476  CPPAD_ASSERT_UNKNOWN( nnz == ng_ * nx_);
477  //
478  nnz = static_cast<size_t>(nnz_h_lag);
479  CPPAD_ASSERT_UNKNOWN( nnz == (nx_ * (nx_ + 1)) / 2 );
480  }
481 # endif
482 
483  // use the fortran index style for row/col entries
484  index_style = C_STYLE;
485 
486  return true;
487  }
488  // -----------------------------------------------------------------------
489  /*!
490  Return bound information about optimization problem.
491 
492  \param[in] n
493  is the dimension of the domain space for f(x) and g(x); i.e.,
494  it must be equal to nx_.
495 
496  \param[out] x_l
497  is a vector of size nx_.
498  The input value of its elements does not matter.
499  On output, it is a copy of the lower bound for \f$ x \f$; i.e.,
500  xl_.
501 
502  \param[out] x_u
503  is a vector of size nx_.
504  The input value of its elements does not matter.
505  On output, it is a copy of the upper bound for \f$ x \f$; i.e.,
506  xu_.
507 
508  \param[in] m
509  is the dimension of the range space for g(x). i.e.,
510  it must be equal to ng_.
511 
512  \param[out] g_l
513  is a vector of size ng_.
514  The input value of its elements does not matter.
515  On output, it is a copy of the lower bound for \f$ g(x) \f$; i.e., gl_.
516 
517  \param[out] g_u
518  is a vector of size ng_.
519  The input value of its elements does not matter.
520  On output, it is a copy of the upper bound for \f$ g(x) \f$; i.e, gu_.
521  */
522  virtual bool get_bounds_info(
523  Index n ,
524  Number* x_l ,
525  Number* x_u ,
526  Index m ,
527  Number* g_l ,
528  Number* g_u )
529  { size_t i;
530  // here, the n and m we gave IPOPT in get_nlp_info are passed back
531  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_);
532  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_);
533 
534  // pass back bounds
535  for(i = 0; i < nx_; i++)
536  { x_l[i] = xl_[i];
537  x_u[i] = xu_[i];
538  }
539  for(i = 0; i < ng_; i++)
540  { g_l[i] = gl_[i];
541  g_u[i] = gu_[i];
542  }
543 
544  return true;
545  }
546  // -----------------------------------------------------------------------
547  /*!
548  Return initial x value where optimiation is started.
549 
550  \param[in] n
551  must be equal to the domain dimension for f(x) and g(x); i.e.,
552  it must be equal to nx_.
553 
554  \param[in] init_x
555  must be equal to true.
556 
557  \param[out] x
558  is a vector of size nx_.
559  The input value of its elements does not matter.
560  On output, it is a copy of the initial value for \f$ x \f$; i.e. xi_.
561 
562  \param[in] init_z
563  must be equal to false.
564 
565  \param z_L
566  is not used.
567 
568  \param z_U
569  is not used.
570 
571  \param[in] m
572  must be equal to the domain dimension for f(x) and g(x); i.e.,
573  it must be equal to ng_.
574 
575  \param init_lambda
576  must be equal to false.
577 
578  \param lambda
579  is not used.
580  */
581  virtual bool get_starting_point(
582  Index n ,
583  bool init_x ,
584  Number* x ,
585  bool init_z ,
586  Number* z_L ,
587  Number* z_U ,
588  Index m ,
589  bool init_lambda ,
590  Number* lambda )
591  { size_t j;
592 
593  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
594  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
595  CPPAD_ASSERT_UNKNOWN(init_x == true);
596  CPPAD_ASSERT_UNKNOWN(init_z == false);
597  CPPAD_ASSERT_UNKNOWN(init_lambda == false);
598 
599  for(j = 0; j < nx_; j++)
600  x[j] = xi_[j];
601 
602  return true;
603  }
604  // -----------------------------------------------------------------------
605  /*!
606  Evaluate the objective fucntion f(x).
607 
608  \param[in] n
609  is the dimension of the argument space for f(x); i.e., must be equal nx_.
610 
611  \param[in] x
612  is a vector of size nx_ containing the point at which to evaluate
613  the function sum_i f_i (x).
614 
615  \param[in] new_x
616  is false if the previous call to any one of the
617  \ref Evaluation_Methods used the same value for x.
618 
619  \param[out] obj_value
620  is the value of the objective sum_i f_i (x) at this value of x.
621 
622  \return
623  The return value is always true; see \ref Evaluation_Methods.
624  */
625  virtual bool eval_f(
626  Index n ,
627  const Number* x ,
628  bool new_x ,
629  Number& obj_value )
630  { size_t i;
631  if( new_x )
632  cache_new_x(x);
633  //
634  double sum = 0.0;
635  for(i = 0; i < nf_; i++)
636  sum += fg0_[i];
637  obj_value = static_cast<Number>(sum);
638  return true;
639  }
640  // -----------------------------------------------------------------------
641  /*!
642  Evaluate the gradient of f(x).
643 
644  \param[in] n
645  is the dimension of the argument space for f(x); i.e., must be equal nx_.
646 
647  \param[in] x
648  has a vector of size nx_ containing the point at which to evaluate
649  the gradient of f(x).
650 
651  \param[in] new_x
652  is false if the previous call to any one of the
653  \ref Evaluation_Methods used the same value for x.
654 
655  \param[out] grad_f
656  is a vector of size nx_.
657  The input value of its elements does not matter.
658  The output value of its elements is the gradient of f(x)
659  at this value of.
660 
661  \return
662  The return value is always true; see \ref Evaluation_Methods.
663  */
664  virtual bool eval_grad_f(
665  Index n ,
666  const Number* x ,
667  bool new_x ,
668  Number* grad_f )
669  { size_t i;
670  if( new_x )
671  cache_new_x(x);
672  //
673  Dvector w(nf_ + ng_), dw(nx_);
674  for(i = 0; i < nf_; i++)
675  w[i] = 1.0;
676  for(i = 0; i < ng_; i++)
677  w[nf_ + i] = 0.0;
678  dw = adfun_.Reverse(1, w);
679  for(i = 0; i < nx_; i++)
680  grad_f[i] = dw[i];
681  return true;
682  }
683  // -----------------------------------------------------------------------
684  /*!
685  Evaluate the function g(x).
686 
687  \param[in] n
688  is the dimension of the argument space for g(x); i.e., must be equal nx_.
689 
690  \param[in] x
691  has a vector of size n containing the point at which to evaluate
692  the gradient of g(x).
693 
694  \param[in] new_x
695  is false if the previous call to any one of the
696  \ref Evaluation_Methods used the same value for x.
697 
698  \param[in] m
699  is the dimension of the range space for g(x); i.e., must be equal to ng_.
700 
701  \param[out] g
702  is a vector of size ng_.
703  The input value of its elements does not matter.
704  The output value of its elements is
705  the value of the function g(x) at this value of x.
706 
707  \return
708  The return value is always true; see \ref Evaluation_Methods.
709  */
710  virtual bool eval_g(
711  Index n ,
712  const Number* x ,
713  bool new_x ,
714  Index m ,
715  Number* g )
716  { size_t i;
717  if( new_x )
718  cache_new_x(x);
719  //
720  for(i = 0; i < ng_; i++)
721  g[i] = fg0_[nf_ + i];
722  return true;
723  }
724  // -----------------------------------------------------------------------
725  /*!
726  Evaluate the Jacobian of g(x).
727 
728  \param[in] n
729  is the dimension of the argument space for g(x);
730  i.e., must be equal nx_.
731 
732  \param x
733  If values is not NULL,
734  x is a vector of size nx_ containing the point at which to evaluate
735  the gradient of g(x).
736 
737  \param[in] new_x
738  is false if the previous call to any one of the
739  \ref Evaluation_Methods used the same value for x.
740 
741  \param[in] m
742  is the dimension of the range space for g(x);
743  i.e., must be equal to ng_.
744 
745  \param[in] nele_jac
746  is the number of possibly non-zero elements in the Jacobian of g(x);
747  i.e., must be equal to ng_ * nx_.
748 
749  \param iRow
750  if values is not NULL, iRow is not defined.
751  if values is NULL, iRow
752  is a vector with size nele_jac.
753  The input value of its elements does not matter.
754  On output,
755  For <tt>k = 0 , ... , nele_jac-1, iRow[k]</tt> is the
756  base zero row index for the
757  k-th possibly non-zero entry in the Jacobian of g(x).
758 
759  \param jCol
760  if values is not NULL, jCol is not defined.
761  if values is NULL, jCol
762  is a vector with size nele_jac.
763  The input value of its elements does not matter.
764  On output,
765  For <tt>k = 0 , ... , nele_jac-1, jCol[k]</tt> is the
766  base zero column index for the
767  k-th possibly non-zero entry in the Jacobian of g(x).
768 
769  \param values
770  if \c values is not \c NULL, \c values
771  is a vector with size \c nele_jac.
772  The input value of its elements does not matter.
773  On output,
774  For <tt>k = 0 , ... , nele_jac-1, values[k]</tt> is the
775  value for the
776  k-th possibly non-zero entry in the Jacobian of g(x).
777 
778  \return
779  The return value is always true; see \ref Evaluation_Methods.
780  */
781  virtual bool eval_jac_g(
782  Index n,
783  const Number* x,
784  bool new_x,
785 
786  Index m,
787  Index nele_jac,
788  Index* iRow,
789  Index *jCol,
790 
791  Number* values)
792  { size_t i, j, k, ell;
793  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
794  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
795  //
796  size_t nk = row_jac_.size();
797  CPPAD_ASSERT_UNKNOWN( static_cast<size_t>(nele_jac) == nk );
798  //
799  if( new_x )
800  cache_new_x(x);
801 
802  if( values == NULL )
803  { for(k = 0; k < nk; k++)
804  { i = row_jac_[k];
805  j = col_jac_[k];
806  CPPAD_ASSERT_UNKNOWN( i >= nf_ );
807  iRow[k] = static_cast<Index>(i - nf_);
808  jCol[k] = static_cast<Index>(j);
809  }
810  return true;
811  }
812  //
813  if( nk == 0 )
814  return true;
815  //
816  if( sparse_forward_ )
817  { Dvector jac(nk);
820  );
821  for(k = 0; k < nk; k++)
822  values[k] = jac[k];
823  }
824  else if( sparse_reverse_ )
825  { Dvector jac(nk);
828  );
829  for(k = 0; k < nk; k++)
830  values[k] = jac[k];
831  }
832  else if( nx_ < ng_ )
833  { // use forward mode
834  Dvector x1(nx_), fg1(nf_ + ng_);
835  for(j = 0; j < nx_; j++)
836  x1[j] = 0.0;
837  // index in col_order_jac_ of next entry
838  ell = 0;
839  k = col_order_jac_[ell];
840  for(j = 0; j < nx_; j++)
841  { // compute j-th column of Jacobian of g(x)
842  x1[j] = 1.0;
843  fg1 = adfun_.Forward(1, x1);
844  while( ell < nk && col_jac_[k] <= j )
845  { CPPAD_ASSERT_UNKNOWN( col_jac_[k] == j );
846  i = row_jac_[k];
847  CPPAD_ASSERT_UNKNOWN( i >= nf_ )
848  values[k] = fg1[i];
849  ell++;
850  if( ell < nk )
851  k = col_order_jac_[ell];
852  }
853  x1[j] = 0.0;
854  }
855  }
856  else
857  { // user reverse mode
858  size_t nfg = nf_ + ng_;
859  // user reverse mode
860  Dvector w(nfg), dw(nx_);
861  for(i = 0; i < nfg; i++)
862  w[i] = 0.0;
863  // index in row_jac_ of next entry
864  k = 0;
865  for(i = nf_; i < nfg; i++)
866  { // compute i-th row of Jacobian of g(x)
867  w[i] = 1.0;
868  dw = adfun_.Reverse(1, w);
869  while( k < nk && row_jac_[k] <= i )
870  { CPPAD_ASSERT_UNKNOWN( row_jac_[k] == i );
871  j = col_jac_[k];
872  values[k] = dw[j];
873  k++;
874  }
875  w[i] = 0.0;
876  }
877  }
878  return true;
879  }
880  // -----------------------------------------------------------------------
881  /*!
882  Evaluate the Hessian of the Lagragian
883 
884  \section The_Hessian_of_the_Lagragian The Hessian of the Lagragian
885  The Hessian of the Lagragian is defined as
886  \f[
887  H(x, \sigma, \lambda )
888  =
889  \sigma \nabla^2 f(x) + \sum_{i=0}^{m-1} \lambda_i \nabla^2 g(x)_i
890  \f]
891 
892  \param[in] n
893  is the dimension of the argument space for g(x);
894  i.e., must be equal nx_.
895 
896  \param x
897  if values is not NULL, x
898  is a vector of size nx_ containing the point at which to evaluate
899  the Hessian of the Lagragian.
900 
901  \param[in] new_x
902  is false if the previous call to any one of the
903  \ref Evaluation_Methods used the same value for x.
904 
905  \param[in] obj_factor
906  the value \f$ \sigma \f$ multiplying the Hessian of
907  f(x) in the expression for \ref The_Hessian_of_the_Lagragian.
908 
909  \param[in] m
910  is the dimension of the range space for g(x);
911  i.e., must be equal to ng_.
912 
913  \param[in] lambda
914  if values is not NULL, lambda
915  is a vector of size ng_ specifing the value of \f$ \lambda \f$
916  in the expression for \ref The_Hessian_of_the_Lagragian.
917 
918  \param[in] new_lambda
919  is true if the previous call to eval_h had the same value for
920  lambda and false otherwise.
921  (Not currently used.)
922 
923  \param[in] nele_hess
924  is the number of possibly non-zero elements in the
925  Hessian of the Lagragian;
926  i.e., must be equal to nx_*(nx_+1)/2.
927 
928  \param iRow
929  if values is not NULL, iRow is not defined.
930  if values is NULL, iRow
931  is a vector with size nele_hess.
932  The input value of its elements does not matter.
933  On output,
934  For <tt>k = 0 , ... , nele_hess-1, iRow[k]</tt> is the
935  base zero row index for the
936  k-th possibly non-zero entry in the Hessian fo the Lagragian.
937 
938  \param jCol
939  if values is not NULL, jCol is not defined.
940  if values is NULL, jCol
941  is a vector with size nele_hess.
942  The input value of its elements does not matter.
943  On output,
944  For <tt>k = 0 , ... , nele_hess-1, jCol[k]</tt> is the
945  base zero column index for the
946  k-th possibly non-zero entry in the Hessian of the Lagragian.
947 
948  \param values
949  if values is not NULL, it
950  is a vector with size nele_hess.
951  The input value of its elements does not matter.
952  On output,
953  For <tt>k = 0 , ... , nele_hess-1, values[k]</tt> is the
954  value for the
955  k-th possibly non-zero entry in the Hessian of the Lagragian.
956 
957  \return
958  The return value is always true; see \ref Evaluation_Methods.
959  */
960  virtual bool eval_h(
961  Index n ,
962  const Number* x ,
963  bool new_x ,
964  Number obj_factor ,
965  Index m ,
966  const Number* lambda ,
967  bool new_lambda ,
968  Index nele_hess ,
969  Index* iRow ,
970  Index* jCol ,
971  Number* values )
972  { size_t i, j, k;
973  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
974  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
975  //
976  size_t nk = row_hes_.size();
977  CPPAD_ASSERT_UNKNOWN( static_cast<size_t>(nele_hess) == nk );
978  //
979  if( new_x )
980  cache_new_x(x);
981  //
982  if( values == NULL )
983  { for(k = 0; k < nk; k++)
984  { i = row_hes_[k];
985  j = col_hes_[k];
986  iRow[k] = static_cast<Index>(i);
987  jCol[k] = static_cast<Index>(j);
988  }
989  return true;
990  }
991  //
992  if( nk == 0 )
993  return true;
994 
995  // weigting vector for Lagragian
996  Dvector w(nf_ + ng_);
997  for(i = 0; i < nf_; i++)
998  w[i] = obj_factor;
999  for(i = 0; i < ng_; i++)
1000  w[i + nf_] = lambda[i];
1001  //
1003  { Dvector hes(nk);
1006  );
1007  for(k = 0; k < nk; k++)
1008  values[k] = hes[k];
1009  }
1010  else
1011  { Dvector hes(nx_ * nx_);
1012  hes = adfun_.Hessian(x0_, w);
1013  for(k = 0; k < nk; k++)
1014  { i = row_hes_[k];
1015  j = col_hes_[k];
1016  values[k] = hes[i * nx_ + j];
1017  }
1018  }
1019  return true;
1020  }
1021  // ----------------------------------------------------------------------
1022  /*!
1023  Pass solution information from Ipopt to users solution structure.
1024 
1025  \param[in] status
1026  is value that the Ipopt solution status
1027  which gets mapped to a correponding value for
1028  \n
1029  <tt>solution_.status</tt>
1030 
1031  \param[in] n
1032  is the dimension of the domain space for f(x) and g(x); i.e.,
1033  it must be equal to nx_.
1034 
1035  \param[in] x
1036  is a vector with size nx_ specifing the final solution.
1037  This is the output value for
1038  \n
1039  <tt>solution_.x</tt>
1040 
1041  \param[in] z_L
1042  is a vector with size nx_ specifing the Lagragian multipliers for the
1043  constraint \f$ x^l \leq x \f$.
1044  This is the output value for
1045  \n
1046  <tt>solution_.zl</tt>
1047 
1048  \param[in] z_U
1049  is a vector with size nx_ specifing the Lagragian multipliers for the
1050  constraint \f$ x \leq x^u \f$.
1051  This is the output value for
1052  \n
1053  <tt>solution_.zu</tt>
1054 
1055  \param[in] m
1056  is the dimension of the range space for g(x). i.e.,
1057  it must be equal to ng_.
1058 
1059  \param[in] g
1060  is a vector with size ng_ containing the value of the constraint function
1061  g(x) at the final solution x.
1062  This is the output value for
1063  \n
1064  <tt>solution_.g</tt>
1065 
1066  \param[in] lambda
1067  is a vector with size ng_ specifing the Lagragian multipliers for the
1068  constraints \f$ g^l \leq g(x) \leq g^u \f$.
1069  This is the output value for
1070  \n
1071  <tt>solution_.lambda</tt>
1072 
1073  \param[in] obj_value
1074  is the value of the objective function f(x) at the final solution x.
1075  This is the output value for
1076  \n
1077  <tt>solution_.obj_value</tt>
1078 
1079  \param[in] ip_data
1080  is unspecified (by Ipopt) and hence not used.
1081 
1082  \param[in] ip_cq
1083  is unspecified (by Ipopt) and hence not used.
1084 
1085  \par solution_[out]
1086  this is a reference to the solution argument
1087  in the constructor for solve_callback.
1088  The results are stored here
1089  (see documentation above).
1090  */
1091  virtual void finalize_solution(
1092  Ipopt::SolverReturn status ,
1093  Index n ,
1094  const Number* x ,
1095  const Number* z_L ,
1096  const Number* z_U ,
1097  Index m ,
1098  const Number* g ,
1099  const Number* lambda ,
1100  Number obj_value ,
1101  const Ipopt::IpoptData* ip_data ,
1102  Ipopt::IpoptCalculatedQuantities* ip_cq
1103  )
1104  { size_t i, j;
1105 
1106  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(n) == nx_ );
1107  CPPAD_ASSERT_UNKNOWN(static_cast<size_t>(m) == ng_ );
1108 
1109  switch(status)
1110  { // convert status from Ipopt enum to solve_result<Dvector> enum
1111  case Ipopt::SUCCESS:
1113  break;
1114 
1115  case Ipopt::MAXITER_EXCEEDED:
1116  solution_.status =
1118  break;
1119 
1120  case Ipopt::STOP_AT_TINY_STEP:
1121  solution_.status =
1123  break;
1124 
1125  case Ipopt::STOP_AT_ACCEPTABLE_POINT:
1126  solution_.status =
1128  break;
1129 
1130  case Ipopt::LOCAL_INFEASIBILITY:
1131  solution_.status =
1133  break;
1134 
1135  case Ipopt::USER_REQUESTED_STOP:
1136  solution_.status =
1138  break;
1139 
1140  case Ipopt::DIVERGING_ITERATES:
1141  solution_.status =
1143  break;
1144 
1145  case Ipopt::RESTORATION_FAILURE:
1146  solution_.status =
1148  break;
1149 
1150  case Ipopt::ERROR_IN_STEP_COMPUTATION:
1151  solution_.status =
1153  break;
1154 
1155  case Ipopt::INVALID_NUMBER_DETECTED:
1156  solution_.status =
1158  break;
1159 
1160  case Ipopt::INTERNAL_ERROR:
1161  solution_.status =
1163  break;
1164 
1165  default:
1166  solution_.status =
1168  }
1169 
1170  solution_.x.resize(nx_);
1171  solution_.zl.resize(nx_);
1172  solution_.zu.resize(nx_);
1173  for(j = 0; j < nx_; j++)
1174  { solution_.x[j] = x[j];
1175  solution_.zl[j] = z_L[j];
1176  solution_.zu[j] = z_U[j];
1177  }
1178  solution_.g.resize(ng_);
1179  solution_.lambda.resize(ng_);
1180  for(i = 0; i < ng_; i++)
1181  { solution_.g[i] = g[i];
1182  solution_.lambda[i] = lambda[i];
1183  }
1184  solution_.obj_value = obj_value;
1185  return;
1186  }
1187 };
1188 
1189 } // end namespace ipopt
1190 } // END_CPPAD_NAMESPACE
1191 
1192 # endif
bool sparse_reverse_
Should sparse methods be used to compute Jacobians and Hessians with reverse mode used for Jacobian...
class used by SparseHessian to hold information so it does not need to be recomputed.
includes the entire CppAD package in the necessary order.
solve_callback(size_t nf, size_t nx, size_t ng, const Dvector &xi, const Dvector &xl, const Dvector &xu, const Dvector &gl, const Dvector &gu, FG_eval &fg_eval, bool retape, bool sparse_forward, bool sparse_reverse, solve_result< Dvector > &solution)
Constructor for the interface between ipopt::solve and Ipopt.
Ipopt::Number Number
A Scalar value used by Ipopt.
size_t SparseJacobianReverse(const VectorBase &x, const VectorSet &p, const VectorSize &r, const VectorSize &c, VectorBase &jac, sparse_jacobian_work &work)
Compute user specified subset of a sparse Jacobian using forward mode.
virtual bool get_bounds_info(Index n, Number *x_l, Number *x_u, Index m, Number *g_l, Number *g_u)
Return bound information about optimization problem.
virtual bool eval_g(Index n, const Number *x, bool new_x, Index m, Number *g)
Evaluate the function g(x).
CppAD::ADFun< double > adfun_
AD function object that evaluates x -&gt; [ f(x) , g(x) ] If retape is false, this object is initialzed ...
Dvector fg0_
value of fg corresponding to previous new_x
The CppAD Simple Vector template class.
Definition: vector.hpp:338
solve_result< Dvector > & solution_
final results are returned to this structure
bool sparse_forward_
Should sparse methods be used to compute Jacobians and Hessians with forward mode used for Jacobian...
const Dvector & xi_
initial value for x
static size_t bit_per_unit(void)
Definition: vector.hpp:672
const size_t nf_
dimension of the range space for f(x). The objective is sum_i f_i (x). Note that, at this point...
Class that contains information about solve problem result.
virtual void finalize_solution(Ipopt::SolverReturn status, Index n, const Number *x, const Number *z_L, const Number *z_U, Index m, const Number *g, const Number *lambda, Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
Pass solution information from Ipopt to users solution structure.
const size_t ng_
dimension of the range space for g(x)
CppAD::vectorBool pattern_jac_
Sparsity pattern for Jacobian of [f(x), g(x) ]. If sparse is true, this pattern set by constructor an...
Scalar nan(const Scalar &zero)
Definition: nan.hpp:187
size_t SparseJacobianForward(const VectorBase &x, const VectorSet &p, const VectorSize &r, const VectorSize &c, VectorBase &jac, sparse_jacobian_work &work)
Compute user specified subset of a sparse Jacobian using forward mode.
CppAD::vector< size_t > row_jac_
Row indices of [f(x), g(x)] for Jacobian of g(x) in row order. (Set by constructor and not changed...
CppAD::vector< size_t > col_order_jac_
col_order_jac_ sorts row_jac_ and col_jac_ in column order. (Set by constructor and not changed...
Class that Ipopt uses for obtaining information about this problem.
Ipopt::TNLP::IndexStyleEnum IndexStyleEnum
Indexing style used in Ipopt sparsity structure.
virtual bool get_starting_point(Index n, bool init_x, Number *x, bool init_z, Number *z_L, Number *z_U, Index m, bool init_lambda, Number *lambda)
Return initial x value where optimiation is started.
CppAD::sparse_hessian_work work_hes_
Work vector used by SparseJacobian, stored here to avoid recalculation.
void Independent(VectorAD &x, size_t abort_op_index)
Declaration of independent variables.
VectorSet RevSparseJac(size_t q, const VectorSet &s, bool transpose=false, bool dependency=false)
User API for Jacobian sparsity patterns using reverse mode.
const Dvector & xl_
lower limit for x
VectorSet RevSparseHes(size_t q, const VectorSet &s, bool transpose=false)
User API for Hessian sparsity patterns using reverse mode.
const Dvector & gu_
upper limit for g(x)
CppAD::vectorBool pattern_hes_
Sparsity pattern for Hessian of Lagragian If sparse is true, this pattern set by constructor and doe...
void Dependent(local::ADTape< Base > *tape, const ADvector &y)
change the operation sequence corresponding to this object
Definition: dependent.hpp:236
void index_sort(const VectorKey &keys, VectorSize &ind)
Compute the indices that sort a vector of keys.
Definition: index_sort.hpp:140
class used by SparseJacobian to hold information so it does not need to be recomputed.
const Dvector & xu_
upper limit for x
#define CPPAD_ASSERT_UNKNOWN(exp)
Check that exp is true, if not terminate execution.
Dvector x0_
value of x corresponding to previous new_x
void optimize(const std::string &options="")
Optimize a player object operation sequence.
Definition: optimize.hpp:216
CppAD::sparse_jacobian_work work_jac_
Work vector used by SparseJacobian, stored here to avoid recalculation.
VectorBase Forward(size_t q, size_t r, const VectorBase &x)
forward mode user API, one order multiple directions.
Definition: forward.hpp:324
const size_t nx_
dimension of the domain space for f(x) and g(x)
VectorSet ForSparseJac(size_t q, const VectorSet &r, bool transpose=false, bool dependency=false)
User API for Jacobian sparsity patterns using forward mode.
virtual bool eval_f(Index n, const Number *x, bool new_x, Number &obj_value)
Evaluate the objective fucntion f(x).
virtual bool get_nlp_info(Index &n, Index &m, Index &nnz_jac_g, Index &nnz_h_lag, IndexStyleEnum &index_style)
Return dimension information about optimization problem.
CppAD::vector< size_t > col_hes_
Column indices of Hessian left triangle in same order as row_hes_. (Set by constructor and not change...
VectorBase SparseHessian(const VectorBase &x, const VectorBase &w)
calculate sparse Hessians
Ipopt::Index Index
An index value used by Ipopt.
void cache_new_x(const Number *x)
Cache information for a new value of x.
VectorBase Hessian(const VectorBase &x, const VectorBase &w)
calculate Hessian for one component of f
Class that contains information about solve problem result.
bool retape_
should operation sequence be retaped for each new x.
const Dvector & gl_
lower limit for g(x)
CppAD::vector< size_t > row_hes_
Row indices of Hessian lower left triangle in row order. (Set by constructor and not changed...
void resize(size_t n)
change number of elements in this vector
Definition: vector.hpp:721
virtual bool eval_jac_g(Index n, const Number *x, bool new_x, Index m, Index nele_jac, Index *iRow, Index *jCol, Number *values)
Evaluate the Jacobian of g(x).
virtual bool eval_h(Index n, const Number *x, bool new_x, Number obj_factor, Index m, const Number *lambda, bool new_lambda, Index nele_hess, Index *iRow, Index *jCol, Number *values)
Evaluate the Hessian of the Lagragian.
virtual bool eval_grad_f(Index n, const Number *x, bool new_x, Number *grad_f)
Evaluate the gradient of f(x).
FG_eval & fg_eval_
object that evaluates f(x) and g(x)
CppAD::vector< size_t > col_jac_
Column indices for Jacobian of g(x), same order as row_jac_. (Set by constructor and not changed...
VectorBase Reverse(size_t p, const VectorBase &v)
reverse mode sweep
Definition: reverse.hpp:91