1 # ifndef CPPAD_LOCAL_LOAD_OP_HPP 
    2 # define CPPAD_LOCAL_LOAD_OP_HPP 
   16 namespace CppAD { 
namespace local { 
 
  143 template <
class Base>
 
  148      const Base*    parameter   ,
 
  152      size_t*        index_by_ind   ,
 
  226 template <
class Vector_set>
 
  231      size_t              num_combined   ,
 
  232      const size_t*       combined       ,
 
  233      Vector_set&         var_sparsity   ,
 
  234      Vector_set&         vecad_sparsity )
 
  246 template <
class Base>
 
  251      const Base*    parameter   ,
 
  255      size_t*        index_by_ind   ,
 
  265      size_t i_vec = arg[1];
 
  269      size_t i_v_x  = index_by_ind[ arg[0] + i_vec ];
 
  270      Base* z       = taylor + i_z * cap_order;
 
  271      if( isvar_by_ind[ arg[0] + i_vec ]  )
 
  273           var_by_load_op[ arg[2] ] = 
addr_t( i_v_x );
 
  274           Base* v_x = taylor + i_v_x * cap_order;
 
  279           var_by_load_op[ arg[2] ] = 0;
 
  280           Base v_x  = parameter[i_v_x];
 
  290 template <
class Base>
 
  295      const Base*    parameter   ,
 
  299      size_t*        index_by_ind   ,
 
  307      size_t i_vec = 
Integer( taylor[ arg[1] * cap_order + 0 ] );
 
  309           i_vec < index_by_ind[ arg[0] - 1 ] ,
 
  310           "VecAD: index during zero order forward sweep is out of range" 
  314      size_t i_v_x  = index_by_ind[ arg[0] + i_vec ];
 
  315      Base* z       = taylor + i_z * cap_order;
 
  316      if( isvar_by_ind[ arg[0] + i_vec ]  )
 
  318           var_by_load_op[ arg[2] ] = 
addr_t( i_v_x );
 
  319           Base* v_x = taylor + i_v_x * cap_order;
 
  324           var_by_load_op[ arg[2] ] = 0;
 
  325           Base v_x  = parameter[i_v_x];
 
  413 template <
class Base>
 
  423      const addr_t*        var_by_load_op       ,
 
  434      size_t i_var = size_t( var_by_load_op[ arg[2] ] );
 
  437      size_t num_taylor_per_var = (cap_order-1) * r + 1;
 
  438      Base* z  = taylor + i_z * num_taylor_per_var;
 
  440      {    Base* v_x = taylor + i_var * num_taylor_per_var;
 
  441           for(
size_t ell = 0; ell < r; ell++)
 
  442           {    
for(
size_t k = p; k <= q; k++)
 
  443                {    
size_t m = (k-1) * r + 1 + ell;
 
  449      {    
for(
size_t ell = 0; ell < r; ell++)
 
  450           {    
for(
size_t k = p; k <= q; k++)
 
  451                {    
size_t m = (k-1) * r + 1 + ell;
 
  546 template <
class Base>
 
  556      const addr_t*        var_by_load_op )
 
  557 {    
size_t i_load = size_t( var_by_load_op[ arg[2] ] );
 
  566           Base* pz   = partial + i_z    * nc_partial;
 
  567           Base* py_x = partial + i_load * nc_partial;
 
  583 template <
class Vector_set>
 
  589      size_t             num_combined   ,
 
  590      const size_t*      combined       ,
 
  591      Vector_set&        var_sparsity   ,
 
  592      Vector_set&        vecad_sparsity )
 
  598      size_t i_v = combined[ arg[0] - 1 ];
 
  601      var_sparsity.assignment(i_z, i_v, vecad_sparsity);
 
  602      if( dependency & (op == 
LdvOp) )
 
  603           var_sparsity.binary_union(i_z, i_z, arg[1], var_sparsity);
 
  617 template <
class Vector_set>
 
  623      size_t             num_combined   ,
 
  624      const size_t*      combined       ,
 
  625      Vector_set&        var_sparsity   ,
 
  626      Vector_set&        vecad_sparsity )
 
  632      size_t i_v = combined[ arg[0] - 1 ];
 
  635      vecad_sparsity.binary_union(i_v, i_v, i_z, var_sparsity);
 
  636      if( dependency & (op == 
LdvOp) )
 
  637           var_sparsity.binary_union(arg[1], arg[1], i_z, var_sparsity);
 
  661 template <
class Vector_set>
 
  666      size_t             num_combined   ,
 
  667      const size_t*      combined       ,
 
  668      Vector_set&        var_sparsity   ,
 
  669      Vector_set&        vecad_sparsity ,
 
  671      bool*              vecad_jacobian )
 
  677      size_t i_v = combined[ arg[0] - 1 ];
 
  680      vecad_sparsity.binary_union(i_v, i_v, i_z, var_sparsity);
 
  682      vecad_jacobian[i_v] |= var_jacobian[i_z];
 
void forward_load_op_0(const local::player< Base > *play, size_t i_z, const addr_t *arg, const Base *parameter, size_t cap_order, Base *taylor, bool *isvar_by_ind, size_t *index_by_ind, addr_t *var_by_load_op)
Shared documentation for zero order forward mode implementation of op = LdpOp or LdvOp (not called)...
 
#define CPPAD_ASSERT_KNOWN(exp, msg)
Check that exp is true, if not print msg and terminate execution. 
 
CPPAD_TAPE_ADDR_TYPE addr_t
 
size_t num_load_op_rec(void) const 
Fetch number of vecad load operations. 
 
size_t NumArg(OpCode op)
Number of arguments for a specified operator. 
 
Class used to store and play back an operation sequence recording. 
 
void forward_sparse_load_op(bool dependency, OpCode op, size_t i_z, const addr_t *arg, size_t num_combined, const size_t *combined, Vector_set &var_sparsity, Vector_set &vecad_sparsity)
Forward mode sparsity operations for LdpOp and LdvOp. 
 
size_t NumRes(OpCode op)
Number of variables resulting from the specified operation. 
 
void forward_load_op(const local::player< Base > *play, OpCode op, size_t p, size_t q, size_t r, size_t cap_order, size_t i_z, const addr_t *arg, const addr_t *var_by_load_op, Base *taylor)
Forward mode, except for zero order, for op = LdpOp or op = LdvOp. 
 
OpCode
Type used to distinguish different AD< Base > atomic operations. 
 
void reverse_load_op(OpCode op, size_t d, size_t i_z, const addr_t *arg, size_t cap_order, const Base *taylor, size_t nc_partial, Base *partial, const addr_t *var_by_load_op)
Reverse mode for op = LdpOp or LdvOp. 
 
void reverse_sparse_hessian_load_op(OpCode op, size_t i_z, const addr_t *arg, size_t num_combined, const size_t *combined, Vector_set &var_sparsity, Vector_set &vecad_sparsity, bool *var_jacobian, bool *vecad_jacobian)
Reverse mode Hessian sparsity operations for LdpOp and LdvOp. 
 
void reverse_sparse_jacobian_load_op(bool dependency, OpCode op, size_t i_z, const addr_t *arg, size_t num_combined, const size_t *combined, Vector_set &var_sparsity, Vector_set &vecad_sparsity)
Reverse mode Jacobian sparsity operations for LdpOp and LdvOp. 
 
#define CPPAD_ASSERT_UNKNOWN(exp)
Check that exp is true, if not terminate execution. 
 
int Integer(const std::complex< double > &x)
 
void sparse_load_op(OpCode op, size_t i_z, const addr_t *arg, size_t num_combined, const size_t *combined, Vector_set &var_sparsity, Vector_set &vecad_sparsity)
Shared documentation for sparsity operations corresponding to op = LdpOp or LdvOp (not called)...
 
void forward_load_v_op_0(const local::player< Base > *play, size_t i_z, const addr_t *arg, const Base *parameter, size_t cap_order, Base *taylor, bool *isvar_by_ind, size_t *index_by_ind, addr_t *var_by_load_op)
Zero order forward mode implementation of op = LdvOp. 
 
void forward_load_p_op_0(const local::player< Base > *play, size_t i_z, const addr_t *arg, const Base *parameter, size_t cap_order, Base *taylor, bool *isvar_by_ind, size_t *index_by_ind, addr_t *var_by_load_op)
Zero order forward mode implementation of op = LdpOp.