GradientDescentOptimizer.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. //////////////////////////////////////////////////////////////////////
  2. //
  3. // GradientDescentOptimizer.h: interface of the optimizer GradientDescent.
  4. //
  5. // Written by: Matthias Wacker
  6. // edited by Johannes Ruehle, 2012-10-11
  7. //////////////////////////////////////////////////////////////////////
  8. #ifndef _GRADIENT_DESCENT_OPTIMIZER_
  9. #define _GRADIENT_DESCENT_OPTIMIZER_
  10. #include <cmath>
  11. #include "optimization/DerivativeBasedOptimizer.h"
  12. ///
  13. /// Class GradientDescentOptimizer
  14. ///
  15. /// HowToUse:
  16. ///
  17. /// * use setStepSize to specify the initial stepsize to compute the numerical gradient
  18. /// * use setParameters() to set the start point
  19. /// * call init()
  20. /// * call optimize()
  21. ///
  22. ///
  23. ///
  24. /// Implemented Abort criteria:
  25. ///
  26. /// * maximum number of iterations
  27. /// * time limit
  28. /// * parameter bounds
  29. /// * function value tolerance
  30. /// * parameter tolerance
  31. /// * gradient tolerance
  32. ///
  33. /// Additional return reason:
  34. ///
  35. /// * ERROR_COMPUTATION_UNSTABLE
  36. ///
  37. /// GradientDescent supports the 'scales' feature
  38. class GradientDescentOptimizer : public DerivativeBasedOptimizer
  39. {
  40. public:
  41. typedef DerivativeBasedOptimizer SuperClass;
  42. ///
  43. /// Constructor.
  44. /// \param loger : OptLogBase * to existing log class
  45. ///
  46. GradientDescentOptimizer(OptLogBase *loger=NULL);
  47. ///
  48. /// Copy constructor
  49. /// \param opt .. optimizer to copy
  50. ///
  51. GradientDescentOptimizer( const GradientDescentOptimizer &opt);
  52. ///
  53. /// Destructor.
  54. ///
  55. ~GradientDescentOptimizer();
  56. ///
  57. /// \brief Set the initial step size
  58. /// The initial stepsize is used to give the optimizer an initial value for the order of
  59. /// magnitude to start with.
  60. /// (e.g. step 100000 in x direction or 0.01 ?)
  61. /// \param stepSize with the step size for the i-th dimension in the i-th position.
  62. ///
  63. void setStepSize(const optimization::matrix_type & stepSize);
  64. ///
  65. /// Get the actual step size
  66. /// \return vector<double> with the actual step sizes
  67. ///
  68. inline const optimization::matrix_type & getStepSize(){return m_stepSize;};
  69. ///
  70. /// do internal initializations
  71. ///
  72. void init();
  73. ///
  74. /// start the optimization
  75. /// \return the return status that can be found in the corresponding enumeration
  76. ///
  77. int optimize();
  78. inline void setStepLength(double stepLength){m_stepLength=stepLength;}
  79. inline void setMinimalGradientMagnitude(double minGradientMag){m_MinimalGradientMagnitude=minGradientMag;}
  80. private:
  81. ///
  82. /// step size vector
  83. ///
  84. optimization::matrix_type m_stepSize;
  85. ///
  86. /// .. steplength
  87. ///
  88. double m_stepLength;
  89. ///
  90. /// Minimal threshold for the L2-Norm of the gradient, so that the gradient descent
  91. /// is aborted.
  92. ///
  93. double m_MinimalGradientMagnitude;
  94. };
  95. #endif