/home/coin/SVN-release/OS-2.4.1/examples/algorithmicDiff/OSAlgorithmicDiffTest.cpp

Go to the documentation of this file.
00001 /* $Id: OSAlgorithmicDiffTest.cpp 2698 2009-06-09 04:14:07Z kmartin $ */
00073 #include <cstddef>
00074 #include <cstdlib>
00075 #include <cctype>
00076 #include <cassert>
00077 #include <stack>
00078 #include <cppad/cppad.hpp>
00079 #include <iostream>
00080 
00081 // CoinHelperFunctions <has cmath>
00082 #include "CoinHelperFunctions.hpp"
00083 #include "OSInstance.h"
00084 #include "OSiLWriter.h"
00085 #include "OSParameters.h"
00086 #include "OSnLNode.h"
00087 #include "OSErrorClass.h"
00088 #include "OSFileUtil.h"  
00089 #include "OSiLReader.h"
00090 #include "OSInstance.h"
00091 #include "OSExpressionTree.h"
00092 #include "OSnLNode.h"
00093 #include "OSDataStructures.h"
00094 
00095 
00096 
00097 #include <vector>  
00098 #include <map> 
00099 #include <string>
00100  
00101 
00102 int  main(){    
00103         WindowsErrorPopupBlocker();
00104         using std::cout;
00105         using std::endl;
00106         using CppAD::AD;
00107         //using CppAD::NearEqual;
00108         using CppAD::vector;
00109         std::cout.precision(12);
00110         // error checking functions
00111         bool CheckFunctionValues( double *conVals, double objValue,
00112                 double x0, double x1, double x2, double x3, double y0, double y1, double z );
00113         bool CheckHessianUpper( SparseHessianMatrix *sparseHessian, 
00114                 double x0, double x1, double x2, double x3, double y0, double y1, double z );
00115         bool CheckGradientValues( SparseJacobianMatrix *sparseJac, double *objGrad,
00116                 double x0, double x1, double x2, double x3, double y0, double y1, double z );
00117         bool ok = true;
00118         int k, idx;
00119         //
00120         // get the problem data
00121         //
00122         FileUtil *fileUtil = NULL; 
00123         std::string osilFileName;
00124         std::string osil;
00125         // get the input file
00126         const char dirsep =  CoinFindDirSeparator();
00127         // Set directory containing mps data files.
00128         std::string dataDir;
00129     dataDir = dirsep == '/' ? "../data/" : "..\\data\\";
00130         //osilFileName =  dataDir + "HS071_NLP.osil";
00131         osilFileName =  dataDir   + "CppADTestLag.osil";
00132         std::cout  << "osilFileName  =  " << osilFileName << std::endl;
00133         fileUtil = new FileUtil();
00134         osil = fileUtil->getFileAsString( &osilFileName[0]);    
00135         //
00136         // create OSReader and OSInstance objects
00137         OSiLReader *osilreader = NULL;
00138         OSInstance *osinstance = NULL;
00139         // create reader, parse the OSiL, and generate the OSInstance object
00140         try{    
00141                 // a counter
00142                 int kjl;
00143                 osilreader = new OSiLReader();
00144                 osinstance = osilreader->readOSiL( osil);
00145                 std::vector<double> funVals(3);
00146                 std::vector<double> dfunVals(6);
00147                 double *conVals = NULL;
00148                 //conVals = new double[ 2];
00149                 double *objVals = NULL;
00150                 //objVals = new double[ 1];
00151 
00165                 //
00166                 // first initialize the nonlinear structures for call backs
00167                 std::cout << "Initialize Nonlinear Structures" << std::endl;
00168                 osinstance->initForAlgDiff( );
00169                 //osinstance->getJacobianSparsityPattern( );
00170 
00171 
00172                 
00178                 std::map<int, int> varIndexMap;
00179                 std::map<int, int>::iterator posVarIndexMap;
00180                 varIndexMap = osinstance->getAllNonlinearVariablesIndexMap( );
00185                 for(posVarIndexMap = varIndexMap.begin(); posVarIndexMap != varIndexMap.end(); ++posVarIndexMap){
00186                                 std::cout <<  "Variable Index = "   << posVarIndexMap->first  << std::endl ;
00187                 }
00188                 std::cout << "Number of nonlinear variables =  " << varIndexMap.size() << std::endl;
00189                 
00190                 //
00191                 
00192                 // get the number of nonlinear terms
00193                 
00194                 int mm = osinstance->getNumberOfNonlinearExpressionTreeModIndexes();
00195                 
00196                 int jj;
00197                 
00198                 for(jj = 0; jj < mm; jj++){
00199                         std::cout << osinstance->getNonlinearExpressionTreeModIndexes()[ jj] << std::endl;
00200                 }
00201                 
00202                 std::cout << "Number of unique nonlinear terms =  " <<  mm << std::endl;
00203                 //return 0;
00204                 
00205                 
00206                 
00207                 // domain space vector
00208                 size_t n  = varIndexMap.size(); // three variables
00209                 // range space vector
00210                 size_t m = 3; // Lagrangian has an objective and two constraints
00211 
00212                 std::vector<double> x0( n);
00217                 x0[0] = 1; // the value for variable x0
00218                 x0[1] = 5; // the value for variable x1
00219                 x0[2] = 5; // the value for variable x3                         
00220                 std::cout << "CALL forward" << std::endl;
00221                 funVals = osinstance->forwardAD(0, x0);
00222                 for( kjl = 0; kjl < 3; kjl++){
00223                         std::cout << "forward 0 " << funVals[ kjl] << std::endl;
00224                 }
00225                 // get the third column of the Jacobian from a forward sweep
00226                 std::vector<double> x1( n);
00227                 x1[0] = 0;
00228                 x1[1] = 0;
00229                 x1[2] = 1;
00230                 std::cout << "Now get the third column of the Jacobian forwardAD(1, x1)"  << std::endl;
00231                 funVals = osinstance->forwardAD(1, x1);
00232                 for( kjl = 0; kjl < 3; kjl++){
00233                         std::cout << "forward 1 " << funVals[ kjl] << std::endl;
00234                 }
00235                 
00244                 x1[0] = 1;
00245                 x1[1] = 0;
00246                 x1[2] = 1;
00247                 // recalculate the forward sweep with the new x1 vector
00248                 funVals = osinstance->forwardAD(1, x1);
00249                 std::vector<double> x2( n);
00250                 x2[0] = 0;
00251                 x2[1] = 0;
00252                 x2[2] = 0;
00253                 std::cout << "Now calcuate forwardAD(2, x2)"  << std::endl;
00254                 funVals = osinstance->forwardAD(2, x2);
00255                 for( kjl = 0; kjl < 3; kjl++){
00256                         std::cout << "forward 2 " << funVals[ kjl] << std::endl;
00257                 }                
00258                 
00266                 std::vector<double> vlambda(3);
00267                 vlambda[0] = 0;
00268                 vlambda[1] = 0;
00269                 vlambda[2] = 1;
00270                 // reverse sweep to get third row of Jacobian 
00271                 std::cout << "Now get the third row of the Jacobian reverseAD(1, vlambda)"  << std::endl;
00272                 osinstance->forwardAD(0, x0);
00273                 funVals = osinstance->reverseAD(1, vlambda);
00274                 for( kjl = 0; kjl < 3; kjl++){
00275                         std::cout << "reverse 1 " << funVals[ kjl] << std::endl;
00276                 }
00277                 // now get the Hessian of the Lagrangian of objective and 
00278                 // with the following multipliers
00279                 vlambda[0] = 1;
00280                 vlambda[1] = 2;
00281                 vlambda[2] = 1;
00287                 x1[0] = 0;
00288                 x1[1] = 0;
00289                 x1[2] = 1;
00290                 funVals = osinstance->forwardAD(1, x1);          
00291                  
00302                 dfunVals = osinstance->reverseAD(2, vlambda);
00303                 // get the first partials of the Lagrangian
00304                 std::cout << "Here are the first partials of the Lagrangain" << std::endl;
00305                 for(int kjl = 0; kjl <= 4; kjl+=2){
00306                         std::cout << dfunVals[ kjl] << std::endl;
00307                 }
00313                 std::cout << "Here is the third row (column) of Hessian of Lagrangian" << std::endl;
00314                 for(int kjl = 1; kjl <= 5; kjl+=2){
00315                         std::cout << dfunVals[ kjl] << std::endl;
00316                 }
00322                 double* x = new double[4]; //primal variables
00323                 double* z = new double[2]; //Lagrange multipliers on constraints
00324                 double* w = new double[1]; //Lagrange multiplier on objective
00325                 x[ 0] = 1;    // primal variable 0
00326                 x[ 1] = 5;    // primal variable 1
00327                 x[ 2] = 10;   // primal variable 2
00328                 x[ 3] = 5;    // primal variable 3
00329                 z[ 0] = 2;    // Lagrange multiplier on constraint 0
00330                 z[ 1] = 1;    // Lagrange multiplier on constraint 1
00331                 w[ 0] = 1;    // Lagrange multiplier on the objective function
00332                 
00362                 osinstance->bUseExpTreeForFunEval = false;
00363                 std::cout << "Calculate objective, idx = -1"  << std::endl;                     
00364                 std::cout << "obj value = " << osinstance->calculateFunctionValue(-1, x,  true) << std::endl;
00365                 
00366                 std::cout << "Calculate  first constraint, idx = 0"  << std::endl;                      
00367                 std::cout << "constraint index 0 value = " << osinstance->calculateFunctionValue(0, x,  true) << std::endl;
00368                  
00369                 std::cout << "Now use calculateAllConstraintFunctionValues"  << std::endl;                      
00370                 conVals = osinstance->calculateAllConstraintFunctionValues(x, true);
00371                 for( idx = 0; idx < osinstance->getConstraintNumber(); idx++){
00372                         std::cout << "CONSTRAINT FUNCTION INDEX = " <<  idx << "  CONSTRAINT FUNCTION VALUE =  "  << *(conVals + idx) << std::endl;
00373                 }
00374                 //
00375                 std::cout << "Now use calculateAllObjectiveFunctionValues"  << std::endl;       
00376                 objVals = osinstance->calculateAllObjectiveFunctionValues( x, NULL, NULL, true, 0);
00377                 for( idx = 0; idx < osinstance->getObjectiveNumber(); idx++){
00378                         std::cout << "OBJECTIVE FUNCTION  INDEX = " << idx <<  "  OBJECTIVE FUNCTION VALUE = "  << *(objVals + idx) << std::endl;
00379                 }
00380                 ok = CheckFunctionValues( conVals, *objVals, x[ 0], x[1], x[2], x[3],  z[0], z[1], w[0] );
00381                 if( ok == 0){
00382                         std::cout << "FAILED CHECKING FUNCTION VALUES TEST" << std::endl;
00383                         return 0;
00384                 }
00385                 else{
00386                         std::cout << "PASSED CHECKING FUNCTION VALUES TEST" << std::endl;
00387                 }
00388                 
00395                 std::cout << "PLACE CALL TO JACOBIAN SPARSITY PATTERN"   << std::endl;
00396                 SparseJacobianMatrix *sparseJac;
00397                 sparseJac = osinstance->getJacobianSparsityPattern();           
00398                 // print out just the sparsity pattern
00399                 std::cout << "JACOBIAN SPARSITY PATTERN"   << std::endl;
00400                 std::cout << "JACOBIAN START SIZE "   <<  sparseJac->startSize << std::endl;
00401                 for(idx = 0; idx < osinstance->getConstraintNumber(); idx++){
00402                         // some solvers (e.g. reduced gradient solvers) may want to know which values of the
00403                         // Jacobian matrix are constant, i.e. linear, sparseJac->conVals is the number of constant
00404                         // terms in the gradient for each rowt, the first conVals terms are constant, when getting
00405                         std::cout << "number constant terms in constraint "   <<  idx << " is " 
00406                         << *(sparseJac->conVals + idx)  << std::endl;
00407                         for(k = *(sparseJac->starts + idx); k < *(sparseJac->starts + idx + 1); k++){
00408                                 std::cout << "row idx = " << idx <<  "  col idx = "<< *(sparseJac->indexes + k) << std::endl;
00409                         }
00410                 }       
00411                 
00412                 SparseHessianMatrix *sparseHessian;
00413                 // the Hessian test
00414                 // get the sparsity pattern -- many solvers want to initialize with just the sparsity
00415                 std::cout << "GET LAGRANGIAN HESSIAN SPARSITY PATTERN"   << std::endl;
00416                 sparseHessian = osinstance->getLagrangianHessianSparsityPattern( );
00417                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00418                         std::cout <<  "Row Index = " << *(sparseHessian->hessRowIdx + idx) ;
00419                         std::cout <<  "  Column Index = " << *(sparseHessian->hessColIdx + idx) << std::endl;
00420                 }                
00421                 
00437                 double *objGrad;
00438                 std::cout << "OBJECTIVE FUNCTION GRADIENT"   << std::endl;
00439                 // in our implementation the objective function is a dense gradient
00440                 objGrad = osinstance->calculateObjectiveFunctionGradient( x, NULL, NULL,  -1, false, 1);
00441                 for(idx = 0; idx < osinstance->getVariableNumber(); idx++){
00442                         std::cout << "col idxx = " << idx << "  value =  " << *(objGrad + idx)  << std::endl;
00443                 }
00444                 std::cout << "CONSTRAINT JACOBIAN MATRIX"   << std::endl;
00445                 // now make the gradient calculations and fill in the sparse Jacobian matrix
00446                 sparseJac = osinstance->calculateAllConstraintFunctionGradients( x, NULL, NULL,  false, 1);
00447                 for(idx = 0; idx < osinstance->getConstraintNumber(); idx++){
00448                         for(k = *(sparseJac->starts + idx); k < *(sparseJac->starts + idx + 1); k++){
00449                                 std::cout << "row idx = " << idx <<  "  col idx = "<< *(sparseJac->indexes + k)
00450                                 << " value = " << *(sparseJac->values + k) << std::endl;
00451                         }
00452                 }
00453                 ok = CheckGradientValues( sparseJac, objGrad, x[ 0], x[1], x[2], x[3], z[0], z[1], w[0] );
00454                 if( ok == 0){
00455                         std::cout << "FAILED THE GRADIENT TEST" << std::endl;
00456                         return 0;
00457                 }
00458                 else{
00459                         std::cout << "PASSED THE GRADIENT TEST" << std::endl;
00460                 }                                         
00470                 //first iteration 
00471                 std::cout << "GET LAGRANGIAN HESSIAN FIRST TIME"   << std::endl;
00472                 sparseHessian = osinstance->calculateLagrangianHessian( x, w,  z,  false, 2);
00473                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00474                         std::cout << "row idx = " << *(sparseHessian->hessRowIdx + idx) <<  
00475                         "  col idx = "<< *(sparseHessian->hessColIdx + idx)
00476                         << " value = " << *(sparseHessian->hessValues + idx) << std::endl;
00477                 }
00478                 ok = CheckHessianUpper( sparseHessian, x[0],  x[1], x[2],  x[3],  z[0], z[1], w[0]);
00479                 if( ok == 0){
00480                         std::cout << "FAILED THE FIRST HESSIAN TEST" << std::endl;
00481                         return 0; 
00482                 }
00483                 else{
00484                         std::cout << "PASSED THE FIRST HESSIAN TEST" << std::endl;
00485                 }
00486                 // now change an x value, we don't rebuild the tree, however new_x 
00487                 // must be set to true
00488                 x[0] = 5;
00489                 
00490                 
00491                 
00492                 std::cout << "NOW GET LAGRANGIAN HESSIAN SECOND TIME FOR x[0] = 5"   << std::endl;
00493                 
00501                 osinstance->calculateAllObjectiveFunctionValues( x, w, z, true, 2);
00515                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00516                         std::cout << "row idx = " << *(sparseHessian->hessRowIdx + idx) <<  
00517                         "  col idx = "<< *(sparseHessian->hessColIdx + idx)
00518                         << " value = " << *(sparseHessian->hessValues + idx) << std::endl;
00519                 }
00520                 ok = CheckHessianUpper( sparseHessian , x[0],  x[1], x[2], x[3],  z[0], z[1], w[0] );
00521                 if( ok == 0){
00522                         std::cout << "FAILED THE SECOND HESSIAN TEST" << std::endl;
00523                         return 0;
00524                 }
00525                 else{
00526                         std::cout << "PASSED THE SECOND HESSIAN TEST" << std::endl  << std::endl ;
00527                 }
00531                 std::cout << "HERE IS ROW 1 OF JACOBIAN MATRIX" << std::endl; 
00532                 idx = 1;
00533                 for(k = *(sparseJac->starts + idx); k < *(sparseJac->starts + idx + 1); k++){
00534                         std::cout << "row idx = " << idx <<  "  col idx = "<< *(sparseJac->indexes + k)
00535                                 << " value = " << *(sparseJac->values + k) << std::endl;
00536                 }
00537                 std::cout << std::endl; 
00544                 sparseHessian = osinstance->calculateHessian(x, 1, true);
00545                 std::cout << "HERE IS ROW 1 HESSIAN MATRIX" << std::endl;
00546                 for(idx = 0; idx < sparseHessian->hessDimension; idx++){
00547                         std::cout << "row idx = " << *(sparseHessian->hessRowIdx + idx) <<  
00548                         "  col idx = "<< *(sparseHessian->hessColIdx + idx)
00549                         << " value = " << *(sparseHessian->hessValues + idx) << std::endl;
00550                 }
00551                 //
00552                 // adjust the Lagrange multipliers to correspond to finding Hessian of constraint 1
00553                 z[ 0] = 0;  // Lagrange multiplier on constraint 0
00554                 z[ 1] = 1;  // Lagrange multiplier on constraint 1
00555                 w[ 0] = 0;  // Lagrange multiplier on the objective function
00556                 ok = CheckHessianUpper( sparseHessian , x[0],  x[1], x[2], x[3], z[0], z[1], w[0] );
00557                 if( ok == 0){
00558                         std::cout << "FAILED THE THIRD HESSIAN TEST" << std::endl;
00559                         return 0;
00560                 }
00561                 else{
00562                         std::cout << "PASSED THE THIRD HESSIAN TEST" << std::endl  << std::endl ;
00563                 }
00564                 //set x[0] back to its original value of 1
00565                 x[ 0] = 1;
00566                 //return 0;     
00567                 //
00568                 //
00569                 // now work directly with the CppAD package instead of OSInstance API
00570                 //
00571                 n = 4;
00572                 m = 3;
00573                 CppADvector< AD<double> > X(n);
00574                 CppADvector< AD<double> > Y(m);
00575                 X[0] = 5;
00576                 X[1] = 5;
00577                 X[2] = 0;
00578                 X[3] = 1;
00579                 // declare independent variables and start tape recording
00580                 std::cout << "Start Taping" << std::endl;
00581                 CppAD::Independent( X);
00582                 // range space vector 
00583                 // we include the constant terms in the CppAD functions
00584                 Y[ 0] =  CppAD::pow(X[0], 2) + 9*X[1];
00585                 Y[ 1] =  33 - 105 + 1.37*X[1] + 2*X[3] + 5*X[1] ;
00586                 Y[ 2] =  log(X[0]*X[3]) + 7*X[2] ;
00587                 // create f: x -> y and stop tape recording
00588                 CppAD::ADFun<double> f(X, Y); 
00589                 std::cout << "Stop Taping" << std::endl;
00590                 // get function values
00591                 std::vector<double> x_vec( n);
00592                 x_vec[ 0] = x[ 0];
00593                 x_vec[ 1] = x[ 1];
00594                 x_vec[ 2] = x[ 2];
00595                 x_vec[ 3] = x[ 3];
00596                 funVals = f.Forward(0, x_vec);
00597                 conVals[ 0] = funVals[ 1];
00598                 std::cout << "conVals[ 0] = " << conVals[ 0] << std::endl;
00599                 conVals[ 1] = funVals[ 2];
00600                 std::cout << "conVals[ 1] = " << conVals[ 1] << std::endl;
00601                 objVals[ 0] = funVals[ 0];
00602                 std::cout << "objVals[ 0] = " << objVals[ 0] << std::endl;
00603                 ok = CheckFunctionValues( conVals, funVals[ 0], x[ 0], x[1], x[2], x[3], z[0], z[1], w[0] );
00604                 if( ok == 0){
00605                         std::cout << "FAILED CHECKING FUNCTION VALUES TEST" << std::endl;
00606                         return 0;
00607                 }
00608                 else{
00609                         std::cout << "PASSED CHECKING FUNCTION VALUES TEST" << std::endl;
00610                 }
00611                 // now get gradient and Hessian
00612                 // first define and initialze unit vector vector
00613                 sparseJac = osinstance->getJacobianSparsityPattern();
00614                 std::vector<double> unit_col_vec( n);
00615                 std::vector<double> lagMultipliers( m); 
00616                 std::vector<double> gradVals( m);
00617                 lagMultipliers[ 0] = w[ 0];
00618                 lagMultipliers[ 1] = z[ 0];
00619                 lagMultipliers[ 2] = z[ 1];     
00620                 unsigned int index, kj;
00621                 //return 0;
00622                 for(index = 0; index < n; index++){
00623                         unit_col_vec[ index] = 0;
00624                 }       
00625                 for(index = 0; index < n; index++){
00626                         unit_col_vec[ index] = 1;
00627                         // calculate column i of the Jacobian matrix
00628                         gradVals = f.Forward(1, unit_col_vec);
00629                         unit_col_vec[ index] = 0;
00630                         // get the nonzero gradient values in constraint k
00631                         for(kj = 0; kj < m; kj++){
00632                                 std::cout << "variable " << index << "  row " << kj << "  gradient value" << std::endl;
00633                                 std::cout << "gradient value = " << gradVals[ kj] << std::endl; 
00634                         }
00635                         // get row i of the Lagrangian function!!!
00636                         std::cout << "CALL f.Reverse -------" << std::endl;
00637                         f.Reverse(2, lagMultipliers);
00638                         std::cout << "FINISH CALL f.Reverse -------" << std::endl;
00639                 }
00640                 // done with CppAD test 
00641                 // do garbage collection
00642                 delete osilreader;
00643                 osilreader = NULL;
00644                 std::cout << "OSILREADER DELETED" << std::endl; 
00645                 //delete[] conVals;
00646                 //delete[] objVals;             
00647                 delete[] x;
00648                 delete[] z;
00649                 delete[] w;
00650         }
00651         catch(const ErrorClass& eclass){
00652                 std::cout << eclass.errormsg << std::endl;
00653         } 
00654 
00655         //
00656         {
00657                 //checking CppAD power, another illustration of CppAD
00658                 size_t n  = 2;
00659                 double x0 = 4;
00660                 double x1 = .5;
00661                 CppADvector< AD<double> > x(n);
00662             x[0]      = x0;
00663             x[1]      = x1;
00664              // declare independent variables and start tape recording
00665              CppAD::Independent(x);
00666              // range space vector 
00667              size_t m = 1;
00668              CppADvector< AD<double> > y(m);
00669              y[0] = std::pow(x0, x1);
00670              // create f: x -> y and stop tape recording
00671              CppAD::ADFun<double> f(x, y); 
00672              // check value 
00673              double check = std::pow(x0, x1);
00674              // forward computation of first partial w.r.t. x[0]
00675              std::vector<double> dx(n);
00676              std::vector<double> dy(m);
00677              dx[0] = 4.;
00678              dx[1] = 1/2.;
00679              dy    = f.Forward(1, dx);
00680              std::cout << "dy =  " <<  dy[ 0] << std::endl;
00681              check = x1 * std::pow(x0, x1-1.);
00682              //ok   &= NearEqual(dy[0], check, 1e-10, 1e-10);
00683              ok = ( fabs(check - dy[0])/(fabs( check) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00684         }
00685         
00686         {
00687                 
00688                 //checking CppAD sparsity features
00689                 // domain space vector
00690                 size_t n = 3;
00691                 CPPAD_TEST_VECTOR< AD<double> > X(n);
00692                 X[0] = 0.;
00693                 X[1] = 1.;
00694                 X[2] = 2.;
00695                 // declare independent variables and start recording
00696                 CppAD::Independent(X);
00697                 // range space vector
00698                 size_t m = 2;
00699                 CPPAD_TEST_VECTOR< AD<double> > Y(m);
00700                 Y[0] = CppAD::pow(X[0], 2)  + CppAD::pow(X[2], 2);
00701                 Y[1] = -CppAD::pow(X[0], 2) + CppAD::pow(X[1], 2);
00702                 // create f: X -> Y and stop tape recording
00703                 CppAD::ADFun<double> f(X, Y);
00704                 
00705                 // sparsity pattern for the identity matrix
00706                 std::vector<bool> r(n * n);
00707                 size_t i, j;
00708                 for(i = 0; i < n; i++) { 
00709                         for(j = 0; j < n; j++)
00710                                 r[ i * n + j ] = false;
00711                                 r[ i * n + i ] = true;
00712                 }
00713                 // compute sparsity pattern for J(x) = F^{(1)} (x)
00714                 f.ForSparseJac(n, r);
00718                 //std::vector<bool> s(m * m);
00719                 //for(i = 0; i < m; i++){    
00720                 //      for(j = 0; j < m; j++)
00721                 //              s[ i * m + j ] = false;
00722                 //      s[ i * m + i ] = true;
00723             // }
00724             // sparsity pattern for F'(x)
00725             // f.RevSparseJac(m, s);                    
00729                 // compute sparsity pattern for H(x) = F_0^{(2)} (x)
00730                 std::vector<bool> e( m);
00731                 //Vector s(m);
00732                 for(i = 0; i < m; i++)
00733                 e[i] = false;
00734                 e[ 0] = true;
00735                 e[ 1] = false;
00736                 std::vector<bool> h( n*n);
00737                 //Vector h(n * n);
00738                 std::cout << "Computing Sparse Hessian" << std::endl;
00739                 h = f.RevSparseHes(n, e);
00740                 for(i = 0; i < n; i++){
00741                         std::cout << "Row " << i << "  of Hessian " << std::endl;
00742                         for(j = 0; j < n; j++){
00743                                 std::cout << h[ i*n + j] <<  "  ";
00744                         }
00745                         std::cout << std::endl;
00746                 }
00747         }
00748         delete fileUtil;
00749         std::cout << "\nTEST OF ALGORITHMIC DIFFERENTIATION CONCLUDED SUCCESSFULLY\n";
00750         return 0;
00751 }// end main program
00752 
00753 bool CheckFunctionValues( double *conVals, double objValue,
00754         double x0, double x1, double x2, double x3, double z0, double z1, double w ){
00755         using CppAD::NearEqual;
00756         bool ok  = true;
00757         double checkObj = x0*x0 + 9*x1;
00758         std::cout  << "checkObj = " << checkObj << std::endl;
00759         std::cout  << "objValue = " << objValue << std::endl;
00760         //ok &= NearEqual(objValue, checkObj, 1e-10, 1e-10); 
00761         ok = ( fabs(checkObj - objValue )/(fabs( checkObj) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00762         double checkCon0 = 33. - 105. + 1.37*x1 + 2*x3 + 5*x1;
00763         std::cout  << "checkCon0 = " << checkCon0 << std::endl;
00764         std::cout  << "conVals = " << *(conVals + 0) << std::endl;
00765         //ok &= NearEqual(*(conVals + 0), checkCon0, 1e-10, 1e-10);
00766         ok = ( fabs(checkCon0 - *(conVals + 0) )/(fabs( checkCon0) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00767         double checkCon1 = log(x0*x3) + 7*x2;
00768         std::cout  << "checkCon1 = " << checkCon1 << std::endl;
00769         std::cout  << "conVals = " << *(conVals + 1) << std::endl;
00770         //ok &= NearEqual( *(conVals + 1), checkCon1, 1e-10, 1e-10);
00771         ok = ( fabs(checkCon1 - *(conVals + 1) )/(fabs( checkCon1) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00772         return ok;
00773 }//CheckFunctionValues
00774 //
00775 //
00776 bool CheckGradientValues( SparseJacobianMatrix *sparseJac, double *objGrad,
00777         double x0, double x1, double x2, double x3, double y0, double y1, double w ){
00778         using CppAD::NearEqual;
00779         bool ok  = true;
00780         // first the objective function gradient
00781         double checkObjPartial0 = 2*x0;
00782         //ok &= NearEqual( *(objGrad + 0), checkObjPartial0, 1e-10, 1e-10); 
00783         ok = ( fabs(checkObjPartial0 - *(objGrad + 0) )/(fabs( checkObjPartial0) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00784         double checkObjPartial1 = 9;
00785         //ok &= NearEqual( *(objGrad + 1), checkObjPartial1, 1e-10, 1e-10); 
00786         ok = ( fabs(checkObjPartial1 - *(objGrad + 1) )/(fabs( checkObjPartial1) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00787         double checkObjPartial2 = 0;
00788         //ok &= NearEqual( *(objGrad + 2), checkObjPartial2, 1e-10, 1e-10); 
00789         ok = ( fabs(checkObjPartial2 - *(objGrad + 2) )/(fabs( checkObjPartial2) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00790         // get the constrating gradient
00791         // row 0 gradient -- there are nonzero partials for variables 1 and 2
00792         double checkCon0Partial1 = 1.37 + 5.0;
00793         //ok &= NearEqual( *(sparseJac->values + 0), checkCon0Partial1, 1e-10, 1e-10); 
00794         ok = ( fabs(checkCon0Partial1 - *(sparseJac->values + 0) )/(fabs( checkCon0Partial1) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00795         double checkCon0Partial3 = 2.;
00796         //ok &= NearEqual( *(sparseJac->values + 1), checkCon0Partial3, 1e-10, 1e-10); 
00797         ok = ( fabs(checkCon0Partial3 - *(sparseJac->values + 1) )/(fabs( checkCon0Partial3) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00798         // row 1 gradient -- there are nonzero partials for variables 0 and 2
00799         double checkCon1Partial2 = 7;
00800         //ok &= NearEqual( *(sparseJac->values + 2), checkCon1Partial2, 1e-10, 1e-10);  
00801         ok = ( fabs(checkCon1Partial2 - *(sparseJac->values + 2) )/(fabs( checkCon1Partial2) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00802         double checkCon1Partial0 = 1./x0;
00803         //ok &= NearEqual( *(sparseJac->values + 3), checkCon1Partial0, 1e-10, 1e-10); 
00804         ok = ( fabs(checkCon1Partial0 - *(sparseJac->values + 3) )/(fabs( checkCon1Partial0) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00805         double checkCon1Partial3 = 1./x3;
00806         //ok &= NearEqual( *(sparseJac->values + 4), checkCon1Partial3, 1e-10, 1e-10); 
00807         ok = ( fabs(checkCon1Partial3 - *(sparseJac->values + 4) )/(fabs( checkCon1Partial3) + OS_NEAR_EQUAL) <= OS_NEAR_EQUAL) ? true : false;
00808         return ok;
00809 }//CheckGradientValues
00810 //
00811 bool CheckHessianUpper( SparseHessianMatrix *sparseHessian , 
00812         double x0, double x1, double x2, double x3, double z0, double z1, double w ){
00813         using CppAD::NearEqual;
00814         bool ok  = true;
00815         int hessValuesIdx = 0;
00816         //assert( sparseHessian->hessDimension = n * (n + 1) /2)
00817         // L_00 = 2 * w - z1 / ( x0 * x0 )
00818         double check = 2. * w - z1 / (x0 * x0);
00819         ok &= NearEqual(*(sparseHessian->hessValues + hessValuesIdx++), check, 1e-10, 1e-10); 
00820         if(ok == false) std::cout << "FAILED ONE" << std::endl;
00821         ok &= NearEqual(*(sparseHessian->hessValues + hessValuesIdx++), 0., 1e-10, 1e-10);
00822         if(ok == false) std::cout << "FAILED TWO" << std::endl;
00823         // L_22 = - z1 / (x3 * x3)
00824         check = - z1 / (x3 * x3);
00825         ok &= NearEqual(*(sparseHessian->hessValues + hessValuesIdx++), check, 1e-10, 1e-1);
00826         if(ok == false) std::cout << "FAILED THREE" << std::endl;
00827         return ok;
00828 }//CheckHessianUpper

Generated on Thu Nov 10 03:05:46 2011 by  doxygen 1.4.7