Quellcode durchsuchen

added unit test for Gradient Descent optimization

Johannes Ruehle vor 12 Jahren
Ursprung
Commit
8a2505b777

+ 5 - 5
DerivativeBasedOptimizer.cpp

@@ -65,8 +65,8 @@ const matrix_type  DerivativeBasedOptimizer::getNumericalGradient(const matrix_t
 	{
 	    for(int j = 0 ; j< 2 * static_cast<int>(m_numberOfParameters);j++)
 	    {
-		grid[i][j] = x[i][0] + (( j == i*2   )? +maskWidth[i][0] : 0.0)
-                                     + (( j == i*2+1 )? -maskWidth[i][0] : 0.0);
+            grid[i][j] = x[i][0] + (( j == i*2   )? +maskWidth[i][0] : 0.0)
+                                 + (( j == i*2+1 )? -maskWidth[i][0] : 0.0);
 	    }
 	}
 	
@@ -74,10 +74,10 @@ const matrix_type  DerivativeBasedOptimizer::getNumericalGradient(const matrix_t
 		
 	for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
 	{
-	    if(m_scales[i][0] == 0 )
+        if( fabs(m_scales[i][0]) < 1e-5 || fabs(maskWidth[i][0]) < 1e-5 )
 	    {
-		grad[i][0] = 0;
-		continue;
+            grad[i][0] = 0;
+            continue;
 	    }
         
 	    grad[i][0] = ( values[2*i][0] - values[2*i+1][0] )/( 2 * maskWidth[i][0]);

+ 34 - 0
tests/MyCostFunction.cpp

@@ -0,0 +1,34 @@
+#include "MyCostFunction.h"
+
+double MyCostFunction::evaluate(const optimization::matrix_type & x)
+{
+    double f;
+
+    if (m_bVerbose)
+        std::cerr << x.rows() << " x " << x.cols() << std::endl;
+    if ( x.rows() == 1 )
+    {
+        if (m_bVerbose)
+            std::cerr << "current position: " << x[0][0] << std::endl;
+
+        //our cost function is f(x) = (x-5)^2
+        f = pow(x[0][0] - 4.2, 2.0);
+
+        if (m_bVerbose)
+            std::cerr << "function value: " << f << std::endl;
+
+    }
+    //two-dimensional data
+    else {
+        if (m_bVerbose)
+            std::cerr << "current position: " << x[0][0] << " " << x[1][0] << std::endl;
+
+        //our cost function is f(x,y) = (x-4.7)^2 + (y-1.1)^2
+        f = pow(x[0][0] - 4.7, 2.0) + pow( x[1][0] - 1.1, 2.0 );
+
+        if (m_bVerbose)
+            std::cerr << "function value: " << f << std::endl;
+    }
+    return f;
+}
+

+ 22 - 0
tests/MyCostFunction.h

@@ -0,0 +1,22 @@
+#ifndef _MYCOSTFUNCTION_H
+#define _MYCOSTFUNCTION_H
+
+//#include <cppunit/extensions/HelperMacros.h>
+#include "optimization/CostFunction.h"
+
+//define a simple cost function for one-dimensional or two-dimensional data
+class MyCostFunction : public CostFunction
+{
+public:
+
+    MyCostFunction(const int & dim, bool verbose) : CostFunction(dim), m_bVerbose(verbose)
+    {}
+
+    virtual double evaluate(const optimization::matrix_type & x);
+
+private:
+    bool m_bVerbose;
+
+}; 
+
+#endif

+ 5 - 47
tests/TestDownhillSimplex.cpp

@@ -6,6 +6,8 @@
 
 #include "TestDownhillSimplex.h"
 
+#include "MyCostFunction.h"
+
 using namespace std;
 
 const bool verboseStartEnd = true;
@@ -20,50 +22,6 @@ void TestDownhillSimplex::setUp() {
 void TestDownhillSimplex::tearDown() {
 }
 
-//define a simple cost function for one-dimensional or two-dimensional data
-class MyCostFunction : public CostFunction
-{
-  public: 
-  
-   MyCostFunction(const int & dim) : CostFunction(dim)
-   {
-   }
-
-   virtual double evaluate(const optimization::matrix_type & x)
-   {
-     double f;
-     
-     if (verbose)
-      std::cerr << x.rows() << " x " << x.cols() << std::endl;
-     if ( x.rows() == 1 )
-     {
-       if (verbose)
-        std::cerr << "current position: " << x[0][0] << std::endl;
-       
-       //our cost function is f(x) = (x-5)^2
-       f = pow(x[0][0] - 4.2, 2.0);
-    
-       if (verbose)
-        std::cerr << "function value: " << f << std::endl;
-
-     } 
-     //two-dimensional data
-     else {
-       if (verbose)
-         std::cerr << "current position: " << x[0][0] << " " << x[1][0] << std::endl;
-       
-       //our cost function is f(x,y) = (x-4.7)^2 + (y-1.1)^2
-       f = pow(x[0][0] - 4.7, 2.0) + pow( x[1][0] - 1.1, 2.0 );
-       
-       if (verbose)
-         std::cerr << "function value: " << f << std::endl;
-     }
-     return f;
-   }
-
-
-};
-
 void TestDownhillSimplex::testDHS_1Dim ()
 {
   
@@ -72,7 +30,7 @@ void TestDownhillSimplex::testDHS_1Dim ()
   
   int dim (1);
   
-  CostFunction *func = new MyCostFunction(dim); 
+  CostFunction *func = new MyCostFunction(dim, verbose);
    
   //initial guess: 2.0
   optimization::matrix_type initialParams (dim, 1);
@@ -112,7 +70,7 @@ void TestDownhillSimplex::testDHS_2Dim()
   
   int dim (2);  
   
-  CostFunction *func = new MyCostFunction(dim); 
+  CostFunction *func = new MyCostFunction(dim, verbose);
    
   //initial guess: 2.0
   optimization::matrix_type initialParams (dim, 1);
@@ -149,4 +107,4 @@ void TestDownhillSimplex::testDHS_2Dim()
     std::cerr << "================== TestDownhillSimplex::testDHS_2Dim done ===================== " << std::endl;  
 }
 
-#endif
+#endif

+ 127 - 0
tests/TestGradientDescent.cpp

@@ -0,0 +1,127 @@
+#ifdef NICE_USELIB_CPPUNIT
+
+#include <string>
+#include <exception>
+#include <map>
+
+#include "TestGradientDescent.h"
+
+#include "MyCostFunction.h"
+
+using namespace std;
+
+const bool verboseStartEnd = true;
+const bool verbose = true;
+//const bool verbose = false;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestGradientDescent );
+
+void TestGradientDescent::setUp() {
+}
+
+void TestGradientDescent::tearDown() {
+}
+
+
+
+void TestGradientDescent::testGD_1Dim ()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGradientDescent::testGD_1Dim ===================== " << std::endl;
+  
+  int dim (1);
+  
+  CostFunction *func = new MyCostFunction(dim, verbose);
+   
+  //initial guess: 2.0
+  optimization::matrix_type initialParams (dim, 1);
+  initialParams.Set(2.0);
+
+  //we use a dimension scale of 1.0
+  optimization::matrix_type scales (dim, 1);
+  scales.Set(1.0);
+
+  //setup the optimization problem
+  SimpleOptProblem optProblem ( func, initialParams, scales );
+  optProblem.setMaximize(false);
+
+  GradientDescentOptimizer optimizer;
+  //we search with step-width of 1.0
+  optimization::matrix_type searchSteps (dim, 1);
+  searchSteps[0][0] = 1.0f;
+
+  //optimizer.setVerbose(true);
+  optimizer.setStepSize( searchSteps );
+  optimizer.setMaxNumIter(true, 1000);
+  optimizer.setFuncTol(true, 1e-8);
+  optimizer.optimizeProb ( optProblem );  
+  
+  optimization::matrix_type optimizedParams (optProblem.getAllCurrentParams());
+  
+  double goal(4.2);  
+  
+  if (verbose)
+    std::cerr << "1d optimization -- result " << optimizedParams[0][0] << " -- goal: " << goal << std::endl;
+
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams[0][0], goal, 1e-4 /* tolerance */);
+
+  if (verboseStartEnd)
+    std::cerr << "================== TestGradientDescent::testGD_1Dim done ===================== " << std::endl;
+
+}
+
+void TestGradientDescent::testGD_2Dim()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGradientDescent::testGD_2Dim ===================== " << std::endl;
+  
+  int dim (2);  
+  
+  CostFunction *func = new MyCostFunction(dim, verbose);
+   
+  //initial guess: 2.0
+  optimization::matrix_type initialParams (dim, 1);
+  initialParams.Set(2.0);
+
+  //we use a dimension scale of 1.0
+  optimization::matrix_type scales (dim, 1);
+  scales.Set(1.0);
+
+  //setup the optimization problem
+  SimpleOptProblem optProblem ( func, initialParams, scales );
+  optProblem.setMaximize(false);
+
+  GradientDescentOptimizer optimizer;
+  //we search with step-width of 1.0
+  optimization::matrix_type searchSteps (dim, 1);
+  searchSteps[0][0] = 1.0f;
+  searchSteps[1][0] = 1.0f;
+
+  //optimizer.setVerbose(true);
+  optimizer.setStepSize( searchSteps );
+  optimizer.setMaxNumIter(true, 1000);
+  optimizer.setFuncTol(true, 1e-8);
+  optimizer.optimizeProb ( optProblem );
+  
+  optimization::matrix_type optimizedParams (optProblem.getAllCurrentParams());
+
+  double goalFirstDim(4.7);
+  double goalSecondDim(1.1);
+  
+  if (verbose)
+  {
+    std::cerr << "2d optimization  1st dim-- result " << optimizedParams[0][0] << " -- goal: " << goalFirstDim << std::endl;
+    std::cerr << "2d optimization  1st dim-- result " << optimizedParams[1][0] << " -- goal: " << goalSecondDim << std::endl;
+  }
+
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams[0][0], goalFirstDim, 1e-4 /* tolerance */);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams[1][0], goalSecondDim, 1e-4 /* tolerance */);
+  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGradientDescent::testGD_2Dim done ===================== " << std::endl;
+}
+
+#endif

+ 43 - 0
tests/TestGradientDescent.h

@@ -0,0 +1,43 @@
+#ifndef _TESTGRADIENTDESCENT_H
+#define _TESTGRADIENTDESCENT_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include "optimization/GradientDescentOptimizer.h"
+
+/**
+ * @brief CppUnit-Testcase for Gradient Descent Optimization
+ * @author Johannes Ruehle
+ * @date 12-10-2012
+ */
+class TestGradientDescent : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestGradientDescent );
+    
+    CPPUNIT_TEST(testGD_1Dim);
+    CPPUNIT_TEST(testGD_2Dim);
+    
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    /**
+    * @brief Test of 1D optimziation with a simple convex cost function
+    * @author Johannes Ruehle
+    * @date 12-10-2012
+    */      
+    void testGD_1Dim();
+
+    /**
+    * @brief Test of 2D optimziation with a simple convex cost function
+    * @author Johannes Ruehle
+    * @date 12-10-2012
+    */    
+    void testGD_2Dim();
+
+};
+
+#endif // _TESTGRADIENTDESCENT_H