////////////////////////////////////////////////////////////////////// // // DerivativeBasedOptimizer.cpp: Implementation of the DerivativeBased // Optimizer class. // // Written by Matthias Wacker // edited by Johannes Ruehle, 2012-10-11 ////////////////////////////////////////////////////////////////////// #include "optimization/DerivativeBasedOptimizer.h" using namespace optimization; DerivativeBasedOptimizer::DerivativeBasedOptimizer( OptLogBase *loger): SuperClass(loger) { m_analyticalGradients = true; m_gradientTolActive = false; m_gradientTol = 1.0e-3; } DerivativeBasedOptimizer::DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt) : SuperClass(opt) { m_analyticalGradients = opt.m_analyticalGradients; m_gradientTolActive = opt.m_gradientTolActive; m_gradientTol = opt.m_gradientTol; m_gradient = opt.m_gradient; } DerivativeBasedOptimizer::~DerivativeBasedOptimizer() { } void DerivativeBasedOptimizer::setGradientTol(bool active, double norm) { m_gradientTol = norm; m_gradientTolActive = active; } inline double DerivativeBasedOptimizer::getGradientTol() { return m_gradientTol; } void DerivativeBasedOptimizer::init() { SuperClass::init(); m_gradient = matrix_type(m_numberOfParameters,1); } void DerivativeBasedOptimizer::useAnalyticalGradients(bool useAnalyticalGradients) { m_analyticalGradients = useAnalyticalGradients; } const matrix_type DerivativeBasedOptimizer::getNumericalGradient(const matrix_type & x , const matrix_type & maskWidth) { matrix_type grad(m_numberOfParameters,1); matrix_type grid(m_numberOfParameters, 2 * m_numberOfParameters); for(int i=0; i < static_cast(m_numberOfParameters);i++) { for(int j = 0 ; j< 2 * static_cast(m_numberOfParameters);j++) { grid[i][j] = x[i][0] + (( j == i*2 )? +maskWidth[i][0] : 0.0) + (( j == i*2+1 )? -maskWidth[i][0] : 0.0); } } matrix_type values = evaluateSetCostFunction(grid); for(int i=0; i < static_cast(m_numberOfParameters);i++) { if( fabs(m_scales[i][0]) < 1e-5 || fabs(maskWidth[i][0]) < 1e-5 ) { grad[i][0] = 0; continue; } grad[i][0] = ( values[2*i][0] - values[2*i+1][0] )/( 2 * maskWidth[i][0]); } return grad; } const matrix_type DerivativeBasedOptimizer::getAnalyticalGradient(const matrix_type & x) { return (m_maximize == true) ? (m_costFunction->getAnalyticGradient(x) * (-1.0)) : (m_costFunction->getAnalyticGradient(x)) ; } const matrix_type DerivativeBasedOptimizer::getAnalyticalHessian(const matrix_type & x) { return (m_maximize == true) ? (m_costFunction->getAnalyticHessian(x) * (-1.0)) : (m_costFunction->getAnalyticHessian(x)) ; }