/Users/kmartin/Documents/files/code/cpp/OScpp/COIN-OS/OS/examples/algorithmicDiff/OSAlgorithmicDiffTest.cpp

Go to the documentation of this file.
00001 
00074 #include <cstddef>
00075 #include <cstdlib>
00076 #include <cctype>
00077 #include <cassert>
00078 #include <stack>
00079 #include <cppad/cppad.hpp>
00080 #include <iostream>
00081 
00082 // CoinHelperFunctions <has cmath>
00083 #include "CoinHelperFunctions.hpp"
00084 #include "OSInstance.h"
00085 #include "OSiLWriter.h"
00086 #include "OSParameters.h"
00087 #include "OSnLNode.h"
00088 #include "OSErrorClass.h"
00089 #include "OSFileUtil.h"  
00090 #include "OSiLReader.h"
00091 #include "OSInstance.h"
00092 #include "OSExpressionTree.h"
00093 #include "OSnLNode.h"
00094 #include "OSDataStructures.h"
00095 
00096 
00097 
00098 #include <vector>  
00099 #include <map> 
00100 #include<string>
00101  
00102 
00103 int  main(){    
00104         using std::cout;
00105         using std::endl;
00106         using CppAD::AD;
00107         //using CppAD::NearEqual;
00108         using CppAD::vector;
00109         std::cout.precision(12);
00110         // error checking functions
00111         bool CheckFunctionValues( double *conVals, double objValue,
00112                 double x0, double x1, double x2, double x3, double y0, double y1, double z );
00113         bool CheckHessianUpper( SparseHessianMatrix *sparseHessian, 
00114                 double x0, double x1, double x2, double x3, double y0, double y1, double z );
00115         bool CheckGradientValues( SparseJacobianMatrix *sparseJac, double *objGrad,
00116                 double x0, double x1, double x2, double x3, double y0, double y1, double z );
00117         bool ok = true;
00118         int k, idx;
00119         //
00120         // get the problem data
00121         //
00122         FileUtil *fileUtil = NULL; 
00123         std::string osilFileName;
00124         std::string osil;
00125         // get the input file
00126         const char dirsep =  CoinFindDirSeparator();
00127         // Set directory containing mps data files.
00128         std::string dataDir;
00129     dataDir = dirsep == '/' ? "../../data/" : "..\\..\\data\\";
00130         //osilFileName =  dataDir + "HS071_NLP.osil";
00131         osilFileName =  dataDir + "osilFiles" + dirsep  + "CppADTestLag.osil";
00132         std::cout  << "osilFileName  =  " << osilFileName << std::endl;
00133         fileUtil = new FileUtil();
00134         osil = fileUtil->getFileAsString( &osilFileName[0]);    
00135         //
00136         // create OSReader and OSInstance objects
00137         OSiLReader *osilreader = NULL;
00138         OSInstance *osinstance = NULL;
00139         // create reader, parse the OSiL, and generate the OSInstance object
00140         try{    
00141                 // a counter
00142                 int kjl;
00143                 osilreader = new OSiLReader();
00144                 osinstance = osilreader->readOSiL( osil);
00145                 std::vector<double> funVals(3);
00146                 std::vector<double> dfunVals(6);
00147                 double *conVals = NULL;
00148                 //conVals = new double[ 2];
00149                 double *objVals = NULL;
00150                 //objVals = new double[ 1];
00151 
00165                 //
00166                 // first initialize the nonlinear structures for call backs
00167                 std::cout << "Initialize Nonlinear Structures" << std::endl;
00168                 osinstance->initForAlgDiff( );
00169                 //osinstance->getJacobianSparsityPattern( );
00170 
00171 
00172                 
00178                 std::map<int, int> varIndexMap;
00179                 std::map<int, int>::iterator posVarIndexMap;
00180                 varIndexMap = osinstance->getAllNonlinearVariablesIndexMap( );
00185                 for(posVarIndexMap = varIndexMap.begin(); posVarIndexMap != varIndexMap.end(); ++posVarIndexMap){
00186                                 std::cout <<  "Variable Index = "   << posVarIndexMap->first  << std::endl ;
00187                 }
00188                 std::cout << "Number of nonlinear variables =  " << varIndexMap.size() << std::endl;
00189                 // domain space vector
00190                 size_t n  = varIndexMap.size(); // three variables
00191                 // range space vector
00192                 size_t m = 3; // Lagrangian has an objective and two constraints
00193 
00194                 std::vector<double> x0( n);
00199                 x0[0] = 1; // the value for variable x0
00200                 x0[1] = 5; // the value for variable x1
00201                 x0[2] = 5; // the value for variable x3                         
00202                 std::cout << "CALL forward" << std::endl;
00203                 funVals = osinstance->forwardAD(0, x0);
00204                 for( kjl = 0; kjl < 3; kjl++){
00205                         std::cout << "forward 0 " << funVals[ kjl] << std::endl;
00206                 }
00207                 // get the third column of the Jacobian from a forward sweep
00208                 std::vector<double> x1( n);
00209                 x1[0] = 0;
00210                 x1[1] = 0;
00211                 x1[2] = 1;
00212                 std::cout << "Now get the third column of the Jacobian forwardAD(1, x1)"  << std::endl;
00213                 funVals = osinstance->forwardAD(1, x1);
00214                 for( kjl = 0; kjl < 3; kjl++){
00215                         std::cout << "forward 1 " << funVals[ kjl] << std::endl;
00216                 }
00217                 
00226                 x1[0] = 1;
00227                 x1[1] = 0;
00228                 x1[2] = 1;
00229                 // recalculate the forward sweep with the new x1 vector
00230                 funVals = osinstance->forwardAD(1, x1);
00231                 std::vector<double> x2( n);
00232                 x2[0] = 0;
00233                 x2[1] = 0;
00234                 x2[2] = 0;
00235                 std::cout << "Now calcuate forwardAD(2, x2)"  << std::endl;
00236                 funVals = osinstance->forwardAD(2, x2);
00237                 for( kjl = 0; kjl < 3; kjl++){
00238                         std::cout << "forward 2 " << funVals[ kjl] << std::endl;
00239                 }                
00240                 
00248                 std::vector<double> vlambda(3);
00249                 vlambda[0] = 0;
00250                 vlambda[1] = 0;
00251                 vlambda[2] = 1;
00252                 // reverse sweep to get third row of Jacobian 
00253                 std::cout << "Now get the third row of the Jacobian reverseAD(1, vlambda)"  << std::endl;
00254                 osinstance->forwardAD(0, x0);
00255                 funVals = osinstance->reverseAD(1, vlambda);
00256                 for( kjl = 0; kjl < 3; kjl++){
00257                         std::cout << "reverse 1 " << funVals[ kjl] << std::endl;
00258                 }
00259                 // now get the Hessian of the Lagrangian of objective and 
00260                 // with the following multipliers
00261                 vlambda[0] = 1;
00262                 vlambda[1] = 2;
00263                 vlambda[2] = 1;
00269                 x1[0] = 0;
00270                 x1[1] = 0;
00271                 x1[2] = 1;
00272                 funVals = osinstance->forwardAD(1, x1);          
00273                  
00284                 dfunVals = osinstance->reverseAD(2, vlambda);
00285                 // get the first partials of the Lagrangian
00286                 std::cout << "Here are the first partials of the Lagrangain" << std::endl;
00287                 for(int kjl = 0; kjl <= 4; kjl+=2){
00288                         std::cout << dfunVals[ kjl] << std::endl;
00289                 }
00295                 std::cout << "Here is the third row (column) of Hessian of Lagrangian" << std::endl;
00296                 for(int kjl = 1; kjl <= 5; kjl+=2){
00297                         std::cout << dfunVals[ kjl] << std::endl;
00298                 }
00304                 double* x = new double[4]; //primal variables
00305                 double* z = new double[2]; //Lagrange multipliers on constraints
00306                 double* w = new double[1]; //Lagrange multiplier on objective
00307                 x[ 0] = 1;    // primal variable 0
00308                 x[ 1] = 5;    // primal variable 1
00309                 x[ 2] = 10;   // primal variable 2
00310                 x[ 3] = 5;    // primal variable 3
00311                 z[ 0] = 2;    // Lagrange multiplier on constraint 0
00312                 z[ 1] = 1;    // Lagrange multiplier on constraint 1
00313                 w[ 0] = 1;    // Lagrange multiplier on the objective function
00314                 
00344                 osinstance->bUseExpTreeForFunEval = false;
00345                 std::cout << "Calculate objective, idx = -1"  << std::endl;                     
00346                 std::cout << "obj value = " << osinstance->calculateFunctionValue(-1, x,  true) << std::endl;
00347                 
00348                 std::cout << "Calculate  first constraint, idx = 0"  << std::endl;                      
00349                 std::cout << "constraint index 0 value = " << osinstance->calculateFunctionValue(0, x,  true) << std::endl;
00350                  
00351                 std::cout << "Now use calculateAllConstraintFunctionValues"  << std::endl;                      
00352                 conVals = osinstance->calculateAllConstraintFunctionValues(x, true);
00353                 for( idx = 0; idx < osinstance->getConstraintNumber(); idx++){
00354                         std::cout << "CONSTRAINT FUNCTION INDEX = " <<  idx << "  CONSTRAINT FUNCTION VALUE =  "  << *(conVals + idx) << std::endl;
00355                 }
00356                 //
00357                 std::cout << "Now use calculateAllObjectiveFunctionValues"  << std::endl;       
00358                 objVals = osinstance->calculateAllObjectiveFunctionValues( x, NULL, NULL, true, 0);
00359                 for( idx = 0; idx < osinstance->getObjectiveNumber(); idx++){
00360                         std::cout << "OBJECTIVE FUNCTION  INDEX = " << idx <<  "  OBJECTIVE FUNCTION VALUE = "  << *(objVals + idx) << std::endl;
00361                 }
00362                 ok = CheckFunctionValues( conVals, *objVals, x[ 0], x[1], x[2], x[3],  z[0], z[1], w[0] );
00363                 if( ok == 0){
00364                         std::cout << "FAILED CHECKING FUNCTION VALUES TEST" << std::endl;
00365                         return 0;
00366                 }
00367                 else{
00368                         std::cout << "PASSED CHECKING FUNCTION VALUES TEST" << std::endl;
00369                 }
00370                 
00377                 std::cout << "PLACE CALL TO JACOBIAN SPARSITY PATTERN"   << std::endl;
00378                 SparseJacobianMatrix *sparseJac;
00379                 sparseJac = osinstance->getJacobianSparsityPattern();           
00380                 // print out just the sparsity pattern
00381                 std::cout << "JACOBIAN SPARSITY PATTERN"   << std::endl;
00382                 std::cout << "JACOBIAN START SIZE "   <<  sparseJac->startSize << std::endl;
00383                 for(idx = 0; idx < osinstance->getConstraintNumber(); idx++){
00384                         // some solvers (e.g. reduced gradient solvers) may want to know which values of the
00385                         // Jacobian matrix are constant, i.e. linear, sparseJac->conVals is the number of constant
00386                         // terms in the gradient for each rowt, the first conVals terms are constant, when getting
00387                         std::cout << "number constant terms in constraint "   <<  idx << " is " 
00388                         << *(sparseJac->conVals + idx)  << std::endl;
00389                         for(k = *(sparseJac->starts + idx); k < *(sparseJac->starts + idx + 1); k++){
00390                                 std::cout << "row idx = " << idx <<  "  col idx = "<< *(sparseJac->indexes + k) << std::endl;
00391                         }
00392                 }       
00393                 
00394                 SparseHessianMatrix *sparseHessian;
00395                 // the Hessian test
00396                 // get the sparsity pattern -- many solvers want to initialize with just the sparsity
00397                 std::cout << "GET LAGRANGIAN HESSIAN SPARSITY PATTERN"   << std::endl;
00398                 sparseHessian = osinstance->getLagrangianHessianSparsityPattern( );
00399                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00400                         std::cout <<  "Row Index = " << *(sparseHessian->hessRowIdx + idx) ;
00401                         std::cout <<  "  Column Index = " << *(sparseHessian->hessColIdx + idx) << std::endl;
00402                 }                
00403                 
00419                 double *objGrad;
00420                 std::cout << "OBJECTIVE FUNCTION GRADIENT"   << std::endl;
00421                 // in our implementation the objective function is a dense gradient
00422                 objGrad = osinstance->calculateObjectiveFunctionGradient( x, NULL, NULL,  -1, false, 1);
00423                 for(idx = 0; idx < osinstance->getVariableNumber(); idx++){
00424                         std::cout << "col idxx = " << idx << "  value =  " << *(objGrad + idx)  << std::endl;
00425                 }
00426                 std::cout << "CONSTRAINT JACOBIAN MATRIX"   << std::endl;
00427                 // now make the gradient calculations and fill in the sparse Jacobian matrix
00428                 sparseJac = osinstance->calculateAllConstraintFunctionGradients( x, NULL, NULL,  false, 1);
00429                 for(idx = 0; idx < osinstance->getConstraintNumber(); idx++){
00430                         for(k = *(sparseJac->starts + idx); k < *(sparseJac->starts + idx + 1); k++){
00431                                 std::cout << "row idx = " << idx <<  "  col idx = "<< *(sparseJac->indexes + k)
00432                                 << " value = " << *(sparseJac->values + k) << std::endl;
00433                         }
00434                 }
00435                 ok = CheckGradientValues( sparseJac, objGrad, x[ 0], x[1], x[2], x[3], z[0], z[1], w[0] );
00436                 if( ok == 0){
00437                         std::cout << "FAILED THE GRADIENT TEST" << std::endl;
00438                         return 0;
00439                 }
00440                 else{
00441                         std::cout << "PASSED THE GRADIENT TEST" << std::endl;
00442                 }                                         
00452                 //first iteration 
00453                 std::cout << "GET LAGRANGIAN HESSIAN FIRST TIME"   << std::endl;
00454                 sparseHessian = osinstance->calculateLagrangianHessian( x, w,  z,  false, 2);
00455                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00456                         std::cout << "row idx = " << *(sparseHessian->hessRowIdx + idx) <<  
00457                         "  col idx = "<< *(sparseHessian->hessColIdx + idx)
00458                         << " value = " << *(sparseHessian->hessValues + idx) << std::endl;
00459                 }
00460                 ok = CheckHessianUpper( sparseHessian, x[0],  x[1], x[2],  x[3],  z[0], z[1], w[0]);
00461                 if( ok == 0){
00462                         std::cout << "FAILED THE FIRST HESSIAN TEST" << std::endl;
00463                         return 0; 
00464                 }
00465                 else{
00466                         std::cout << "PASSED THE FIRST HESSIAN TEST" << std::endl;
00467                 }
00468                 // now change an x value, we don't rebuild the tree, however new_x 
00469                 // must be set to true
00470                 x[0] = 5;
00471                 
00472                 
00473                 
00474                 std::cout << "NOW GET LAGRANGIAN HESSIAN SECOND TIME FOR x[0] = 5"   << std::endl;
00475                 
00483                 osinstance->calculateAllObjectiveFunctionValues( x, w, z, true, 2);
00497                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00498                         std::cout << "row idx = " << *(sparseHessian->hessRowIdx + idx) <<  
00499                         "  col idx = "<< *(sparseHessian->hessColIdx + idx)
00500                         << " value = " << *(sparseHessian->hessValues + idx) << std::endl;
00501                 }
00502                 ok = CheckHessianUpper( sparseHessian , x[0],  x[1], x[2], x[3],  z[0], z[1], w[0] );
00503                 if( ok == 0){
00504                         std::cout << "FAILED THE SECOND HESSIAN TEST" << std::endl;
00505                         return 0;
00506                 }
00507                 else{
00508                         std::cout << "PASSED THE SECOND HESSIAN TEST" << std::endl  << std::endl ;
00509                 }
00513                 std::cout << "HERE IS ROW 1 OF JACOBIAN MATRIX" << std::endl; 
00514                 idx = 1;
00515                 for(k = *(sparseJac->starts + idx); k < *(sparseJac->starts + idx + 1); k++){
00516                         std::cout << "row idx = " << idx <<  "  col idx = "<< *(sparseJac->indexes + k)
00517                                 << " value = " << *(sparseJac->values + k) << std::endl;
00518                 }
00519                 std::cout << std::endl; 
00526                 sparseHessian = osinstance->calculateHessian(x, 1, true);
00527                 std::cout << "HERE IS ROW 1 HESSIAN MATRIX" << std::endl;
00528                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00529                         std::cout << "row idx = " << *(sparseHessian->hessRowIdx + idx) <<  
00530                         "  col idx = "<< *(sparseHessian->hessColIdx + idx)
00531                         << " value = " << *(sparseHessian->hessValues + idx) << std::endl;
00532                 }
00533                 //
00534                 // adjust the Lagrange multipliers to correspond to finding Hessian of constraint 1
00535                 z[ 0] = 0;  // Lagrange multiplier on constraint 0
00536                 z[ 1] = 1;  // Lagrange multiplier on constraint 1
00537                 w[ 0] = 0;  // Lagrange multiplier on the objective function
00538                 ok = CheckHessianUpper( sparseHessian , x[0],  x[1], x[2], x[3], z[0], z[1], w[0] );
00539                 if( ok == 0){
00540                         std::cout << "FAILED THE THIRD HESSIAN TEST" << std::endl;
00541                         return 0;
00542                 }
00543                 else{
00544                         std::cout << "PASSED THE THIRD HESSIAN TEST" << std::endl  << std::endl ;
00545                 }
00546                 //set x[0] back to its original value of 1
00547                 x[ 0] = 1;
00548                 //return 0;     
00549                 //
00550                 //
00551                 // now work directly with the CppAD package instead of OSInstance API
00552                 //
00553                 n = 4;
00554                 m = 3;
00555                 CppADvector< AD<double> > X(n);
00556                 CppADvector< AD<double> > Y(m);
00557                 X[0] = 5;
00558                 X[1] = 5;
00559                 X[2] = 0;
00560                 X[3] = 1;
00561                 // declare independent variables and start tape recording
00562                 std::cout << "Start Taping" << std::endl;
00563                 CppAD::Independent( X);
00564                 // range space vector 
00565                 // we include the constant terms in the CppAD functions
00566                 Y[ 0] =  CppAD::pow(X[0], 2) + 9*X[1];
00567                 Y[ 1] =  33 - 105 + 1.37*X[1] + 2*X[3] + 5*X[1] ;
00568                 Y[ 2] =  log(X[0]*X[3]) + 7*X[2] ;
00569                 // create f: x -> y and stop tape recording
00570                 CppAD::ADFun<double> f(X, Y); 
00571                 std::cout << "Stop Taping" << std::endl;
00572                 // get function values
00573                 std::vector<double> x_vec( n);
00574                 x_vec[ 0] = x[ 0];
00575                 x_vec[ 1] = x[ 1];
00576                 x_vec[ 2] = x[ 2];
00577                 x_vec[ 3] = x[ 3];
00578                 funVals = f.Forward(0, x_vec);
00579                 conVals[ 0] = funVals[ 1];
00580                 std::cout << "conVals[ 0] = " << conVals[ 0] << std::endl;
00581                 conVals[ 1] = funVals[ 2];
00582                 std::cout << "conVals[ 1] = " << conVals[ 1] << std::endl;
00583                 objVals[ 0] = funVals[ 0];
00584                 std::cout << "objVals[ 0] = " << objVals[ 0] << std::endl;
00585                 ok = CheckFunctionValues( conVals, funVals[ 0], x[ 0], x[1], x[2], x[3], z[0], z[1], w[0] );
00586                 if( ok == 0){
00587                         std::cout << "FAILED CHECKING FUNCTION VALUES TEST" << std::endl;
00588                         return 0;
00589                 }
00590                 else{
00591                         std::cout << "PASSED CHECKING FUNCTION VALUES TEST" << std::endl;
00592                 }
00593                 // now get gradient and Hessian
00594                 // first define and initialze unit vector vector
00595                 sparseJac = osinstance->getJacobianSparsityPattern();
00596                 std::vector<double> unit_col_vec( n);
00597                 std::vector<double> lagMultipliers( m); 
00598                 std::vector<double> gradVals( m);
00599                 lagMultipliers[ 0] = w[ 0];
00600                 lagMultipliers[ 1] = z[ 0];
00601                 lagMultipliers[ 2] = z[ 1];     
00602                 unsigned int index, kj;
00603                 //return 0;
00604                 for(index = 0; index < n; index++){
00605                         unit_col_vec[ index] = 0;
00606                 }       
00607                 for(index = 0; index < n; index++){
00608                         unit_col_vec[ index] = 1;
00609                         // calculate column i of the Jacobian matrix
00610                         gradVals = f.Forward(1, unit_col_vec);
00611                         unit_col_vec[ index] = 0;
00612                         // get the nonzero gradient values in constraint k
00613                         for(kj = 0; kj < m; kj++){
00614                                 std::cout << "variable " << index << "  row " << kj << "  gradient value" << std::endl;
00615                                 std::cout << "gradient value = " << gradVals[ kj] << std::endl; 
00616                         }
00617                         // get row i of the Lagrangian function!!!
00618                         std::cout << "CALL f.Reverse -------" << std::endl;
00619                         f.Reverse(2, lagMultipliers);
00620                         std::cout << "FINISH CALL f.Reverse -------" << std::endl;
00621                 }
00622                 // done with CppAD test 
00623                 // do garbage collection
00624                 delete osilreader;
00625                 osilreader = NULL;
00626                 std::cout << "OSILREADER DELETED" << std::endl; 
00627                 //delete[] conVals;
00628                 //delete[] objVals;             
00629                 delete[] x;
00630                 delete[] z;
00631                 delete[] w;
00632         }
00633         catch(const ErrorClass& eclass){
00634                 std::cout << eclass.errormsg << std::endl;
00635         } 
00636 
00637         //
00638         {
00639                 //checking CppAD power, another illustration of CppAD
00640                 size_t n  = 2;
00641                 double x0 = 4;
00642                 double x1 = .5;
00643                 CppADvector< AD<double> > x(n);
00644             x[0]      = x0;
00645             x[1]      = x1;
00646              // declare independent variables and start tape recording
00647              CppAD::Independent(x);
00648              // range space vector 
00649              size_t m = 1;
00650              CppADvector< AD<double> > y(m);
00651              y[0] = std::pow(x0, x1);
00652              // create f: x -> y and stop tape recording
00653              CppAD::ADFun<double> f(x, y); 
00654              // check value 
00655              double check = std::pow(x0, x1);
00656              // forward computation of first partial w.r.t. x[0]
00657              std::vector<double> dx(n);
00658              std::vector<double> dy(m);
00659              dx[0] = 4.;
00660              dx[1] = 1/2.;
00661              dy    = f.Forward(1, dx);
00662              std::cout << "dy =  " <<  dy[ 0] << std::endl;
00663              check = x1 * std::pow(x0, x1-1.);
00664              //ok   &= NearEqual(dy[0], check, 1e-10, 1e-10);
00665              ok = ( fabs(check - dy[0])/(fabs( check) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00666         }
00667         
00668         {
00669                 
00670                 //checking CppAD sparsity features
00671                 // domain space vector
00672                 size_t n = 3;
00673                 CPPAD_TEST_VECTOR< AD<double> > X(n);
00674                 X[0] = 0.;
00675                 X[1] = 1.;
00676                 X[2] = 2.;
00677                 // declare independent variables and start recording
00678                 CppAD::Independent(X);
00679                 // range space vector
00680                 size_t m = 2;
00681                 CPPAD_TEST_VECTOR< AD<double> > Y(m);
00682                 Y[0] = CppAD::pow(X[0], 2)  + CppAD::pow(X[2], 2);
00683                 Y[1] = -CppAD::pow(X[0], 2) + CppAD::pow(X[1], 2);
00684                 // create f: X -> Y and stop tape recording
00685                 CppAD::ADFun<double> f(X, Y);
00686                 
00687                 // sparsity pattern for the identity matrix
00688                 std::vector<bool> r(n * n);
00689                 size_t i, j;
00690                 for(i = 0; i < n; i++) { 
00691                         for(j = 0; j < n; j++)
00692                                 r[ i * n + j ] = false;
00693                                 r[ i * n + i ] = true;
00694                 }
00695                 // compute sparsity pattern for J(x) = F^{(1)} (x)
00696                 f.ForSparseJac(n, r);
00700                 //std::vector<bool> s(m * m);
00701                 //for(i = 0; i < m; i++){    
00702                 //      for(j = 0; j < m; j++)
00703                 //              s[ i * m + j ] = false;
00704                 //      s[ i * m + i ] = true;
00705             // }
00706             // sparsity pattern for F'(x)
00707             // f.RevSparseJac(m, s);                    
00711                 // compute sparsity pattern for H(x) = F_0^{(2)} (x)
00712                 std::vector<bool> e( m);
00713                 //Vector s(m);
00714                 for(i = 0; i < m; i++)
00715                 e[i] = false;
00716                 e[ 0] = false;
00717                 e[ 1] = true;
00718                 std::vector<bool> h( n*n);
00719                 //Vector h(n * n);
00720                 std::cout << "Computing Sparse Hessian" << std::endl;
00721                 h = f.RevSparseHes(n, e);
00722                 for(i = 0; i < n; i++){
00723                         std::cout << "Row " << i << "  of Hessian " << std::endl;
00724                         for(j = 0; j < n; j++){
00725                                 std::cout << h[ i*n + j] <<  "  ";
00726                         }
00727                         std::cout << std::endl;
00728                 }
00729         }
00730         delete fileUtil;
00731         return 0;
00732 }// end main program
00733 
00734 bool CheckFunctionValues( double *conVals, double objValue,
00735         double x0, double x1, double x2, double x3, double z0, double z1, double w ){
00736         using CppAD::NearEqual;
00737         bool ok  = true;
00738         double checkObj = x0*x0 + 9*x1;
00739         std::cout  << "checkObj = " << checkObj << std::endl;
00740         std::cout  << "objValue = " << objValue << std::endl;
00741         //ok &= NearEqual(objValue, checkObj, 1e-10, 1e-10); 
00742         ok = ( fabs(checkObj - objValue )/(fabs( checkObj) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00743         double checkCon0 = 33. - 105. + 1.37*x1 + 2*x3 + 5*x1;
00744         std::cout  << "checkCon0 = " << checkCon0 << std::endl;
00745         std::cout  << "conVals = " << *(conVals + 0) << std::endl;
00746         //ok &= NearEqual(*(conVals + 0), checkCon0, 1e-10, 1e-10);
00747         ok = ( fabs(checkCon0 - *(conVals + 0) )/(fabs( checkCon0) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00748         double checkCon1 = log(x0*x3) + 7*x2;
00749         std::cout  << "checkCon1 = " << checkCon1 << std::endl;
00750         std::cout  << "conVals = " << *(conVals + 1) << std::endl;
00751         //ok &= NearEqual( *(conVals + 1), checkCon1, 1e-10, 1e-10);
00752         ok = ( fabs(checkCon1 - *(conVals + 1) )/(fabs( checkCon1) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00753         return ok;
00754 }//CheckFunctionValues
00755 //
00756 //
00757 bool CheckGradientValues( SparseJacobianMatrix *sparseJac, double *objGrad,
00758         double x0, double x1, double x2, double x3, double y0, double y1, double w ){
00759         using CppAD::NearEqual;
00760         bool ok  = true;
00761         // first the objective function gradient
00762         double checkObjPartial0 = 2*x0;
00763         //ok &= NearEqual( *(objGrad + 0), checkObjPartial0, 1e-10, 1e-10); 
00764         ok = ( fabs(checkObjPartial0 - *(objGrad + 0) )/(fabs( checkObjPartial0) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00765         double checkObjPartial1 = 9;
00766         //ok &= NearEqual( *(objGrad + 1), checkObjPartial1, 1e-10, 1e-10); 
00767         ok = ( fabs(checkObjPartial1 - *(objGrad + 1) )/(fabs( checkObjPartial1) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00768         double checkObjPartial2 = 0;
00769         //ok &= NearEqual( *(objGrad + 2), checkObjPartial2, 1e-10, 1e-10); 
00770         ok = ( fabs(checkObjPartial2 - *(objGrad + 2) )/(fabs( checkObjPartial2) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00771         // get the constrating gradient
00772         // row 0 gradient -- there are nonzero partials for variables 1 and 2
00773         double checkCon0Partial1 = 1.37 + 5.0;
00774         //ok &= NearEqual( *(sparseJac->values + 0), checkCon0Partial1, 1e-10, 1e-10); 
00775         ok = ( fabs(checkCon0Partial1 - *(sparseJac->values + 0) )/(fabs( checkCon0Partial1) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00776         double checkCon0Partial3 = 2.;
00777         //ok &= NearEqual( *(sparseJac->values + 1), checkCon0Partial3, 1e-10, 1e-10); 
00778         ok = ( fabs(checkCon0Partial3 - *(sparseJac->values + 1) )/(fabs( checkCon0Partial3) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00779         // row 1 gradient -- there are nonzero partials for variables 0 and 2
00780         double checkCon1Partial2 = 7;
00781         //ok &= NearEqual( *(sparseJac->values + 2), checkCon1Partial2, 1e-10, 1e-10);  
00782         ok = ( fabs(checkCon1Partial2 - *(sparseJac->values + 2) )/(fabs( checkCon1Partial2) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00783         double checkCon1Partial0 = 1./x0;
00784         //ok &= NearEqual( *(sparseJac->values + 3), checkCon1Partial0, 1e-10, 1e-10); 
00785         ok = ( fabs(checkCon1Partial0 - *(sparseJac->values + 3) )/(fabs( checkCon1Partial0) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00786         double checkCon1Partial3 = 1./x3;
00787         //ok &= NearEqual( *(sparseJac->values + 4), checkCon1Partial3, 1e-10, 1e-10); 
00788         ok = ( fabs(checkCon1Partial3 - *(sparseJac->values + 4) )/(fabs( checkCon1Partial3) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00789         return ok;
00790 }//CheckGradientValues
00791 //
00792 bool CheckHessianUpper( SparseHessianMatrix *sparseHessian , 
00793         double x0, double x1, double x2, double x3, double z0, double z1, double w ){
00794         using CppAD::NearEqual;
00795         bool ok  = true;
00796         int hessValuesIdx = 0;
00797         //assert( sparseHessian->hessDimension = n * (n + 1) /2)
00798         // L_00 = 2 * w - z1 / ( x0 * x0 )
00799         double check = 2. * w - z1 / (x0 * x0);
00800         ok &= NearEqual(*(sparseHessian->hessValues + hessValuesIdx++), check, 1e-10, 1e-10); 
00801         if(ok == false) std::cout << "FAILED ONE" << std::endl;
00802         ok &= NearEqual(*(sparseHessian->hessValues + hessValuesIdx++), 0., 1e-10, 1e-10);
00803         if(ok == false) std::cout << "FAILED TWO" << std::endl;
00804         // L_22 = - z1 / (x3 * x3)
00805         check = - z1 / (x3 * x3);
00806         ok &= NearEqual(*(sparseHessian->hessValues + hessValuesIdx++), check, 1e-10, 1e-1);
00807         if(ok == false) std::cout << "FAILED THREE" << std::endl;
00808         return ok;
00809 }//CheckHessianUpper

Generated on Sat Mar 29 22:38:01 2008 by  doxygen 1.5.3