GradientDescentOptimizer.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. //////////////////////////////////////////////////////////////////////
  2. //
  3. // GradientDescentOptimizer.h: interface of the optimizer GradientDescent.
  4. //
  5. // Written by: Matthias Wacker
  6. // edited by Johannes Ruehle, 2012-10-11
  7. //////////////////////////////////////////////////////////////////////
  8. #ifndef _GRADIENT_DESCENT_OPTIMIZER_
  9. #define _GRADIENT_DESCENT_OPTIMIZER_
  10. #include <cmath>
  11. #include "optimization/DerivativeBasedOptimizer.h"
  12. namespace OPTIMIZATION
  13. {
  14. ///
  15. /// Class GradientDescentOptimizer
  16. ///
  17. /// HowToUse:
  18. ///
  19. /// * use setStepSize to specify the initial stepsize to compute the numerical gradient
  20. /// * use setParameters() to set the start point
  21. /// * call init()
  22. /// * call optimize()
  23. ///
  24. ///
  25. ///
  26. /// Implemented Abort criteria:
  27. ///
  28. /// * maximum number of iterations
  29. /// * time limit
  30. /// * parameter bounds
  31. /// * function value tolerance
  32. /// * parameter tolerance
  33. /// * gradient tolerance
  34. ///
  35. /// Additional return reason:
  36. ///
  37. /// * ERROR_COMPUTATION_UNSTABLE
  38. ///
  39. /// GradientDescent supports the 'scales' feature
  40. class GradientDescentOptimizer : public DerivativeBasedOptimizer
  41. {
  42. public:
  43. typedef DerivativeBasedOptimizer SuperClass;
  44. ///
  45. /// Constructor.
  46. /// \param loger : OptLogBase * to existing log class
  47. ///
  48. GradientDescentOptimizer(OptLogBase *loger=NULL);
  49. ///
  50. /// Copy constructor
  51. /// \param opt .. optimizer to copy
  52. ///
  53. GradientDescentOptimizer( const GradientDescentOptimizer &opt);
  54. ///
  55. /// Destructor.
  56. ///
  57. ~GradientDescentOptimizer();
  58. ///
  59. /// \brief Set the initial step size
  60. /// The initial stepsize is used to give the optimizer an initial value for the order of
  61. /// magnitude to start with.
  62. /// (e.g. step 100000 in x direction or 0.01 ?)
  63. /// \param stepSize with the step size for the i-th dimension in the i-th position.
  64. ///
  65. void setStepSize(const OPTIMIZATION::matrix_type & stepSize);
  66. ///
  67. /// Get the actual step size
  68. /// \return vector<double> with the actual step sizes
  69. ///
  70. inline const OPTIMIZATION::matrix_type & getStepSize(){return m_stepSize;};
  71. ///
  72. /// do internal initializations
  73. ///
  74. void init();
  75. ///
  76. /// start the optimization
  77. /// \return the return status that can be found in the corresponding enumeration
  78. ///
  79. int optimize();
  80. inline void setStepLength(double stepLength){m_stepLength=stepLength;}
  81. inline void setMinimalGradientMagnitude(double minGradientMag){m_MinimalGradientMagnitude=minGradientMag;}
  82. private:
  83. ///
  84. /// step size vector
  85. ///
  86. OPTIMIZATION::matrix_type m_stepSize;
  87. ///
  88. /// .. steplength
  89. ///
  90. double m_stepLength;
  91. ///
  92. /// Minimal threshold for the L2-Norm of the gradient, so that the gradient descent
  93. /// is aborted.
  94. ///
  95. double m_MinimalGradientMagnitude;
  96. };//class
  97. }//namespace
  98. #endif