////////////////////////////////////////////////////////////////////// // // DerivativeBasedOptimizer.h: interface of DerivativeBasedOptimizer class. // // Written by: Matthias Wacker // ////////////////////////////////////////////////////////////////////// #ifndef _DERIVATIVE_BASED_OPTIMIZER_H_ #define _DERIVATIVE_BASED_OPTIMIZER_H_ #include "optimization/SimpleOptimizer.h" /*! class Abstract base class of all derivative based optimizers. */ class DerivativeBasedOptimizer : public SimpleOptimizer { public: typedef SimpleOptimizer SuperClass; /// /// Constructor. /// /// \param loger : OptLogBase * to existing log class /// DerivativeBasedOptimizer(OptLogBase *loger=NULL); /// /// Copy constructor /// \param opt .. optimizer to copy /// DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt); /// /// Destructor. /// virtual ~DerivativeBasedOptimizer(); /// /// enumeration for the return reasons of an optimizer, /// has all elements of the SuperClass optimizer /// enum { SUCCESS_GRADIENTTOL = _to_continue_, _to_continue_ }; /// /// \brief Set gradient tolerance abort criteria /// /// Set parameter tolerance abort criteria. While iterating, if the gradientnorm gets /// below the given threshold. The optimization stops and returns SUCCESS if /// active is 'true' /// /// \param active : bool to activate the criteria (true == active..) /// \param norm : representing the threshold /// void setGradientTol(bool active, double norm); /// /// Get the gradient tolerance abort criteria /// \return double representing the threshold /// inline double getGradientTol(); /// /// Get numerical Gradient on position x; use central difference with a maskwitdh of maskWidth. /// /// grad(f(x))_i \approx /// { /// [ f( x + (0, ... ,0,maskWidth(i,0),0, ... ,0 ) ) /// - f( x - (0, ... ,0,maskWidth(i,0),0, ... ,0 ) )] /// / (2 * maskWidth(i,0)) /// } /// /// \forall i \in [1, ... ,m_numberOfParameters] /// const optimization::matrix_type getNumericalGradient(const optimization::matrix_type & x , const optimization::matrix_type & maskWidth); /// /// Get the anylytical Gradient of the costfunction (if available) sign already inverted for maximization. /// const optimization::matrix_type getAnalyticalGradient(const optimization::matrix_type & x); /// /// UseAnalyticalGradients, if possible /// void useAnalyticalGradients(bool useAnalyticalGradients); /// /// Get the analytical Hessian of the costfunction (if available ) /// sign already inverted for maximization /// const optimization::matrix_type getAnalyticalHessian(const optimization::matrix_type & x); protected: /// /// initialize /// void init(); /// /// the gradient /// optimization::matrix_type m_gradient; /// /// gradient tolerance threshold /// double m_gradientTol; /// /// gradient tolerance active /// bool m_gradientTolActive; /// /// use numerical or analytical Gradient Computation /// bool m_analyticalGradients; }; #endif