|
@@ -1,4 +1,4 @@
|
|
-/**
|
|
|
|
|
|
+/**
|
|
* @file FastMinKernel.h
|
|
* @file FastMinKernel.h
|
|
* @brief Efficient GPs with HIK for classification by regression (Interface)
|
|
* @brief Efficient GPs with HIK for classification by regression (Interface)
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
@@ -14,8 +14,8 @@
|
|
#include <core/basics/Config.h>
|
|
#include <core/basics/Config.h>
|
|
#include <core/basics/Exception.h>
|
|
#include <core/basics/Exception.h>
|
|
#include <core/basics/Persistent.h>
|
|
#include <core/basics/Persistent.h>
|
|
-//
|
|
|
|
-//
|
|
|
|
|
|
+//
|
|
|
|
+//
|
|
#include <core/vector/MatrixT.h>
|
|
#include <core/vector/MatrixT.h>
|
|
#include <core/vector/SparseVectorT.h>
|
|
#include <core/vector/SparseVectorT.h>
|
|
#include <core/vector/VectorT.h>
|
|
#include <core/vector/VectorT.h>
|
|
@@ -31,12 +31,12 @@
|
|
namespace NICE {
|
|
namespace NICE {
|
|
|
|
|
|
|
|
|
|
-/**
|
|
|
|
|
|
+/**
|
|
* @class FastMinKernel
|
|
* @class FastMinKernel
|
|
* @brief Efficient GPs with HIK for classification by regression
|
|
* @brief Efficient GPs with HIK for classification by regression
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
- */
|
|
|
|
-
|
|
|
|
|
|
+ */
|
|
|
|
+
|
|
/** interface to FastMinKernel implementation*/
|
|
/** interface to FastMinKernel implementation*/
|
|
class FastMinKernel : public NICE::Persistent, public OnlineLearnable
|
|
class FastMinKernel : public NICE::Persistent, public OnlineLearnable
|
|
{
|
|
{
|
|
@@ -46,54 +46,38 @@ namespace NICE {
|
|
uint ui_n;
|
|
uint ui_n;
|
|
|
|
|
|
/** dimension of feature vectors */
|
|
/** dimension of feature vectors */
|
|
- uint ui_d;
|
|
|
|
|
|
+ uint ui_d;
|
|
|
|
|
|
/** noise added to the diagonal of the kernel matrix */
|
|
/** noise added to the diagonal of the kernel matrix */
|
|
double d_noise;
|
|
double d_noise;
|
|
-
|
|
|
|
|
|
+
|
|
/** sorted matrix of features (sorted along each dimension) */
|
|
/** sorted matrix of features (sorted along each dimension) */
|
|
NICE::FeatureMatrixT<double> X_sorted;
|
|
NICE::FeatureMatrixT<double> X_sorted;
|
|
-
|
|
|
|
|
|
+
|
|
//! verbose flag for output after calling the restore-function
|
|
//! verbose flag for output after calling the restore-function
|
|
bool b_verbose;
|
|
bool b_verbose;
|
|
//! debug flag for output during debugging
|
|
//! debug flag for output during debugging
|
|
- bool b_debug;
|
|
|
|
|
|
+ bool b_debug;
|
|
|
|
|
|
- /**
|
|
|
|
|
|
+ /**
|
|
* @brief Set number of examples
|
|
* @brief Set number of examples
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
void set_n(const uint & _n){this->ui_n = _n;};
|
|
void set_n(const uint & _n){this->ui_n = _n;};
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief Set number of dimensions
|
|
* @brief Set number of dimensions
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
- void set_d(const uint & _d){this->ui_d = _d;};
|
|
|
|
|
|
+ void set_d(const uint & _d){this->ui_d = _d;};
|
|
|
|
|
|
- /**
|
|
|
|
- * @brief Prepare the efficient HIK-computations part 1: order the features in each dimension and save the permutation. Pay attention: X is of dim n x d, where as X_sorted is of dimensionality d x n!
|
|
|
|
- * @author Alexander Freytag
|
|
|
|
- * @date 07-12-2011 (dd-mm-yyyy)
|
|
|
|
- */
|
|
|
|
- void hik_prepare_kernel_multiplications(const std::vector<std::vector<double> > & _X,
|
|
|
|
- NICE::FeatureMatrixT<double> & _X_sorted,
|
|
|
|
- const uint & _dim = 0
|
|
|
|
- );
|
|
|
|
-
|
|
|
|
- void hik_prepare_kernel_multiplications ( const std::vector< const NICE::SparseVector * > & _X,
|
|
|
|
- NICE::FeatureMatrixT<double> & _X_sorted,
|
|
|
|
- const bool & _dimensionsOverExamples,
|
|
|
|
- const uint & _dim = 0
|
|
|
|
- );
|
|
|
|
-
|
|
|
|
- void randomPermutation(NICE::Vector & _permutation,
|
|
|
|
- const std::vector<uint> & _oldIndices,
|
|
|
|
|
|
+ void randomPermutation(NICE::Vector & _permutation,
|
|
|
|
+ const std::vector<uint> & _oldIndices,
|
|
const uint & _newSize
|
|
const uint & _newSize
|
|
) const;
|
|
) const;
|
|
-
|
|
|
|
|
|
+
|
|
enum ApproximationScheme{ MEDIAN = 0, EXPECTATION=1};
|
|
enum ApproximationScheme{ MEDIAN = 0, EXPECTATION=1};
|
|
ApproximationScheme approxScheme;
|
|
ApproximationScheme approxScheme;
|
|
|
|
|
|
@@ -102,36 +86,36 @@ namespace NICE {
|
|
//------------------------------------------------------
|
|
//------------------------------------------------------
|
|
// several constructors and destructors
|
|
// several constructors and destructors
|
|
//------------------------------------------------------
|
|
//------------------------------------------------------
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief default constructor
|
|
* @brief default constructor
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 20-04-2012 (dd-mm-yyyy)
|
|
* @date 20-04-2012 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
- FastMinKernel();
|
|
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+ FastMinKernel();
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief recommended constructor, initialize with some data
|
|
* @brief recommended constructor, initialize with some data
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 06-12-2011 (dd-mm-yyyy)
|
|
* @date 06-12-2011 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
- FastMinKernel( const std::vector<std::vector<double> > & _X,
|
|
|
|
|
|
+ FastMinKernel( const std::vector<std::vector<double> > & _X,
|
|
const double _noise ,
|
|
const double _noise ,
|
|
- const bool _debug = false,
|
|
|
|
|
|
+ const bool _debug = false,
|
|
const uint & _dim = 0
|
|
const uint & _dim = 0
|
|
);
|
|
);
|
|
|
|
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief recommended constructor, just another sparse data structure
|
|
* @brief recommended constructor, just another sparse data structure
|
|
*
|
|
*
|
|
* @param X vector of sparse vector pointers
|
|
* @param X vector of sparse vector pointers
|
|
* @param noise GP noise
|
|
* @param noise GP noise
|
|
*/
|
|
*/
|
|
- FastMinKernel( const std::vector< const NICE::SparseVector * > & _X,
|
|
|
|
- const double _noise,
|
|
|
|
- const bool _debug = false,
|
|
|
|
- const bool & dimensionsOverExamples=false,
|
|
|
|
|
|
+ FastMinKernel( const std::vector< const NICE::SparseVector * > & _X,
|
|
|
|
+ const double _noise,
|
|
|
|
+ const bool _debug = false,
|
|
|
|
+ const bool & dimensionsOverExamples=false,
|
|
const uint & _dim = 0
|
|
const uint & _dim = 0
|
|
);
|
|
);
|
|
|
|
|
|
@@ -143,14 +127,14 @@ namespace NICE {
|
|
* @param noise additional noise variance of the labels
|
|
* @param noise additional noise variance of the labels
|
|
* @param examples set of indices to include
|
|
* @param examples set of indices to include
|
|
*/
|
|
*/
|
|
- FastMinKernel ( const sparse_t & _X,
|
|
|
|
- const double _noise,
|
|
|
|
- const std::map<uint, uint> & _examples,
|
|
|
|
- const bool _debug = false ,
|
|
|
|
|
|
+ FastMinKernel ( const sparse_t & _X,
|
|
|
|
+ const double _noise,
|
|
|
|
+ const std::map<uint, uint> & _examples,
|
|
|
|
+ const bool _debug = false ,
|
|
const uint & _dim = 0);
|
|
const uint & _dim = 0);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- /**
|
|
|
|
|
|
+ /**
|
|
* @brief Default destructor
|
|
* @brief Default destructor
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 06-12-2011 (dd-mm-yyyy)
|
|
* @date 06-12-2011 (dd-mm-yyyy)
|
|
@@ -160,73 +144,73 @@ namespace NICE {
|
|
///////////////////// ///////////////////// /////////////////////
|
|
///////////////////// ///////////////////// /////////////////////
|
|
// GET / SET
|
|
// GET / SET
|
|
// INCLUDING ACCESS OPERATORS
|
|
// INCLUDING ACCESS OPERATORS
|
|
- ///////////////////// ///////////////////// /////////////////////
|
|
|
|
-
|
|
|
|
-
|
|
|
|
|
|
+ ///////////////////// ///////////////////// /////////////////////
|
|
|
|
+
|
|
|
|
+
|
|
void setApproximationScheme(const ApproximationScheme & _approxScheme = MEDIAN) {approxScheme = _approxScheme;};
|
|
void setApproximationScheme(const ApproximationScheme & _approxScheme = MEDIAN) {approxScheme = _approxScheme;};
|
|
-
|
|
|
|
|
|
+
|
|
virtual void setApproximationScheme(const int & _approxScheme = 0);
|
|
virtual void setApproximationScheme(const int & _approxScheme = 0);
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief Get number of examples
|
|
* @brief Get number of examples
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
uint get_n() const;
|
|
uint get_n() const;
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief Get number of dimensions
|
|
* @brief Get number of dimensions
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
* @date 07-12-2011 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
uint get_d() const;
|
|
uint get_d() const;
|
|
|
|
|
|
- /**
|
|
|
|
|
|
+ /**
|
|
* @brief Computes the ratio of sparsity across the matrix
|
|
* @brief Computes the ratio of sparsity across the matrix
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 11-01-2012 (dd-mm-yyyy)
|
|
* @date 11-01-2012 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
double getSparsityRatio() const;
|
|
double getSparsityRatio() const;
|
|
-
|
|
|
|
|
|
+
|
|
/** set verbose flag used for restore-functionality*/
|
|
/** set verbose flag used for restore-functionality*/
|
|
void setVerbose( const bool & _verbose);
|
|
void setVerbose( const bool & _verbose);
|
|
- bool getVerbose( ) const;
|
|
|
|
-
|
|
|
|
|
|
+ bool getVerbose( ) const;
|
|
|
|
+
|
|
/** set debug flag used for debug output*/
|
|
/** set debug flag used for debug output*/
|
|
void setDebug( const bool & _debug);
|
|
void setDebug( const bool & _debug);
|
|
- bool getDebug( ) const;
|
|
|
|
-
|
|
|
|
|
|
+ bool getDebug( ) const;
|
|
|
|
+
|
|
//------------------------------------------------------
|
|
//------------------------------------------------------
|
|
// high level methods
|
|
// high level methods
|
|
//------------------------------------------------------
|
|
//------------------------------------------------------
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief apply a parameterized function to the feature matrix
|
|
* @brief apply a parameterized function to the feature matrix
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 04-05-2012 (dd-mm-yyyy)
|
|
* @date 04-05-2012 (dd-mm-yyyy)
|
|
*
|
|
*
|
|
* @param pf the parameterized function (optional), if not given, nothing will be done
|
|
* @param pf the parameterized function (optional), if not given, nothing will be done
|
|
- */
|
|
|
|
|
|
+ */
|
|
void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *_pf = NULL );
|
|
void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *_pf = NULL );
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief Prepare the efficient HIK-computations part 2: calculate the partial sum for each dimension. Explicitely exploiting sparsity!!! Pay attention: X_sorted is of dimensionality d x n!
|
|
* @brief Prepare the efficient HIK-computations part 2: calculate the partial sum for each dimension. Explicitely exploiting sparsity!!! Pay attention: X_sorted is of dimensionality d x n!
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 17-01-2012 (dd-mm-yyyy)
|
|
* @date 17-01-2012 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
- void hik_prepare_alpha_multiplications(const NICE::Vector & _alpha,
|
|
|
|
- NICE::VVector & _A,
|
|
|
|
|
|
+ void hik_prepare_alpha_multiplications(const NICE::Vector & _alpha,
|
|
|
|
+ NICE::VVector & _A,
|
|
NICE::VVector & _B
|
|
NICE::VVector & _B
|
|
) const;
|
|
) const;
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Computing K*alpha with the minimum kernel trick, explicitely exploiting sparsity!!!
|
|
* @brief Computing K*alpha with the minimum kernel trick, explicitely exploiting sparsity!!!
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 17-01-2012 (dd-mm-yyyy)
|
|
* @date 17-01-2012 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
- void hik_kernel_multiply(const NICE::VVector & _A,
|
|
|
|
- const NICE::VVector & _B,
|
|
|
|
- const NICE::Vector & _alpha,
|
|
|
|
|
|
+ void hik_kernel_multiply(const NICE::VVector & _A,
|
|
|
|
+ const NICE::VVector & _B,
|
|
|
|
+ const NICE::Vector & _alpha,
|
|
NICE::Vector & _beta
|
|
NICE::Vector & _beta
|
|
) const;
|
|
) const;
|
|
void hik_kernel_multiply_fast(const double *_Tlookup,
|
|
void hik_kernel_multiply_fast(const double *_Tlookup,
|
|
@@ -240,38 +224,38 @@ namespace NICE {
|
|
*
|
|
*
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 20-01-2012 (dd-mm-yyyy)
|
|
* @date 20-01-2012 (dd-mm-yyyy)
|
|
- * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
|
|
|
|
+ * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
* @param B pre-computation matrix (VVector)
|
|
* @param B pre-computation matrix (VVector)
|
|
* @param xstar new feature vector (SparseVector)
|
|
* @param xstar new feature vector (SparseVector)
|
|
* @param beta result of the scalar product
|
|
* @param beta result of the scalar product
|
|
* @param pf optional feature transformation
|
|
* @param pf optional feature transformation
|
|
*/
|
|
*/
|
|
- void hik_kernel_sum(const NICE::VVector & _A,
|
|
|
|
- const NICE::VVector & _B,
|
|
|
|
- const NICE::SparseVector & _xstar,
|
|
|
|
- double & _beta,
|
|
|
|
- const ParameterizedFunction *_pf = NULL
|
|
|
|
|
|
+ void hik_kernel_sum(const NICE::VVector & _A,
|
|
|
|
+ const NICE::VVector & _B,
|
|
|
|
+ const NICE::SparseVector & _xstar,
|
|
|
|
+ double & _beta,
|
|
|
|
+ const ParameterizedFunction *_pf = NULL
|
|
) const;
|
|
) const;
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Computing k_{*}*alpha using the minimum kernel trick and exploiting sparsity of the feature vector given
|
|
* @brief Computing k_{*}*alpha using the minimum kernel trick and exploiting sparsity of the feature vector given
|
|
* NOTE: Whenever possible, you should use sparse features to obtain significantly smaller computation times!
|
|
* NOTE: Whenever possible, you should use sparse features to obtain significantly smaller computation times!
|
|
*
|
|
*
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 18-06-2013 (dd-mm-yyyy)
|
|
* @date 18-06-2013 (dd-mm-yyyy)
|
|
- * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
|
|
|
|
+ * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
* @param B pre-computation matrix (VVector)
|
|
* @param B pre-computation matrix (VVector)
|
|
* @param xstar new feature vector (non-sparse Vector)
|
|
* @param xstar new feature vector (non-sparse Vector)
|
|
* @param beta result of the scalar product
|
|
* @param beta result of the scalar product
|
|
* @param pf optional feature transformation
|
|
* @param pf optional feature transformation
|
|
*/
|
|
*/
|
|
- void hik_kernel_sum(const NICE::VVector & _A,
|
|
|
|
- const NICE::VVector & _B,
|
|
|
|
- const NICE::Vector & _xstar,
|
|
|
|
- double & _beta,
|
|
|
|
- const ParameterizedFunction *_pf = NULL
|
|
|
|
- ) const;
|
|
|
|
-
|
|
|
|
|
|
+ void hik_kernel_sum(const NICE::VVector & _A,
|
|
|
|
+ const NICE::VVector & _B,
|
|
|
|
+ const NICE::Vector & _xstar,
|
|
|
|
+ double & _beta,
|
|
|
|
+ const ParameterizedFunction *_pf = NULL
|
|
|
|
+ ) const;
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief compute beta = k_*^T * alpha by using a large lookup table created by hik_prepare_alpha_multiplications_fast
|
|
* @brief compute beta = k_*^T * alpha by using a large lookup table created by hik_prepare_alpha_multiplications_fast
|
|
* NOTE: Whenever possible, you should use sparse features to obtain significantly smaller computation times!
|
|
* NOTE: Whenever possible, you should use sparse features to obtain significantly smaller computation times!
|
|
@@ -297,7 +281,7 @@ namespace NICE {
|
|
* @param q Quantization object
|
|
* @param q Quantization object
|
|
* @param xstar feature vector (indirect k_*)
|
|
* @param xstar feature vector (indirect k_*)
|
|
* @param beta result of the calculation
|
|
* @param beta result of the calculation
|
|
- */
|
|
|
|
|
|
+ */
|
|
|
|
|
|
void hik_kernel_sum_fast(const double *_Tlookup,
|
|
void hik_kernel_sum_fast(const double *_Tlookup,
|
|
const Quantization * _q,
|
|
const Quantization * _q,
|
|
@@ -306,7 +290,8 @@ namespace NICE {
|
|
) const;
|
|
) const;
|
|
|
|
|
|
/**
|
|
/**
|
|
- * @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
|
|
|
|
|
|
+ * @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations,
|
|
|
|
+ * whenever possible use hikPrepareLookupTable directly.
|
|
* @author Erik Rodner, Alexander Freytag
|
|
* @author Erik Rodner, Alexander Freytag
|
|
*
|
|
*
|
|
* @param alpha coefficient vector
|
|
* @param alpha coefficient vector
|
|
@@ -322,7 +307,7 @@ namespace NICE {
|
|
const Quantization * _q,
|
|
const Quantization * _q,
|
|
const ParameterizedFunction *_pf = NULL
|
|
const ParameterizedFunction *_pf = NULL
|
|
) const;
|
|
) const;
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
|
|
* @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
@@ -363,12 +348,12 @@ namespace NICE {
|
|
*/
|
|
*/
|
|
FeatureMatrix & featureMatrix(void) { return X_sorted; };
|
|
FeatureMatrix & featureMatrix(void) { return X_sorted; };
|
|
const FeatureMatrix & featureMatrix(void) const { return X_sorted; };
|
|
const FeatureMatrix & featureMatrix(void) const { return X_sorted; };
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief solve the linear system K*alpha = y with the minimum kernel trick based on the algorithm of Wu (Wu10_AFD)
|
|
* @brief solve the linear system K*alpha = y with the minimum kernel trick based on the algorithm of Wu (Wu10_AFD)
|
|
* @note method converges slowly for large scale problems and even for normal scale :(
|
|
* @note method converges slowly for large scale problems and even for normal scale :(
|
|
* @author Paul Bodesheim
|
|
* @author Paul Bodesheim
|
|
- *
|
|
|
|
|
|
+ *
|
|
* @param y right hand side of linear system
|
|
* @param y right hand side of linear system
|
|
* @param alpha final solution of the linear system
|
|
* @param alpha final solution of the linear system
|
|
* @param q Quantization
|
|
* @param q Quantization
|
|
@@ -377,7 +362,7 @@ namespace NICE {
|
|
* @param maxIterations maximum number of iterations
|
|
* @param maxIterations maximum number of iterations
|
|
* @param sizeOfRandomSubset nr of Elements that should be randomly considered in each iteration (max: y.size())
|
|
* @param sizeOfRandomSubset nr of Elements that should be randomly considered in each iteration (max: y.size())
|
|
* @param minDelta minimum difference between two solutions alpha_t and alpha_{t+1} (convergence criterion)
|
|
* @param minDelta minimum difference between two solutions alpha_t and alpha_{t+1} (convergence criterion)
|
|
- *
|
|
|
|
|
|
+ *
|
|
* @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
|
|
* @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
|
|
* T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
|
|
* T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
|
|
**/
|
|
**/
|
|
@@ -398,22 +383,22 @@ namespace NICE {
|
|
|
|
|
|
//! get the current noise parameter
|
|
//! get the current noise parameter
|
|
double getNoise (void) const { return this->d_noise; }
|
|
double getNoise (void) const { return this->d_noise; }
|
|
-
|
|
|
|
|
|
+
|
|
double getFrobNormApprox();
|
|
double getFrobNormApprox();
|
|
-
|
|
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief Prepare the efficient HIK-computations for the squared kernel vector |k_*|^2 : calculate the partial squared sums for each dimension.
|
|
* @brief Prepare the efficient HIK-computations for the squared kernel vector |k_*|^2 : calculate the partial squared sums for each dimension.
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 10-04-2012 (dd-mm-yyyy)
|
|
* @date 10-04-2012 (dd-mm-yyyy)
|
|
*/
|
|
*/
|
|
void hikPrepareKVNApproximation(NICE::VVector & _A) const;
|
|
void hikPrepareKVNApproximation(NICE::VVector & _A) const;
|
|
-
|
|
|
|
- /**
|
|
|
|
|
|
+
|
|
|
|
+ /**
|
|
* @brief Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples. You have to run hikPrepareSquaredKernelVector before
|
|
* @brief Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples. You have to run hikPrepareSquaredKernelVector before
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 10-04-2012 (dd-mm-yyyy)
|
|
* @date 10-04-2012 (dd-mm-yyyy)
|
|
- *
|
|
|
|
|
|
+ *
|
|
* @param A pre-calculation array computed by hikPrepareSquaredKernelVector
|
|
* @param A pre-calculation array computed by hikPrepareSquaredKernelVector
|
|
* @param q Quantization
|
|
* @param q Quantization
|
|
* @param pf Parameterized Function to efficiently apply a function to the underlying data
|
|
* @param pf Parameterized Function to efficiently apply a function to the underlying data
|
|
@@ -438,20 +423,20 @@ namespace NICE {
|
|
|
|
|
|
//////////////////////////////////////////
|
|
//////////////////////////////////////////
|
|
// variance computation: sparse inputs
|
|
// variance computation: sparse inputs
|
|
- //////////////////////////////////////////
|
|
|
|
-
|
|
|
|
|
|
+ //////////////////////////////////////////
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
|
|
* @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 10-04-2012 (dd-mm-yyyy)
|
|
* @date 10-04-2012 (dd-mm-yyyy)
|
|
- *
|
|
|
|
- * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
|
|
|
|
+ *
|
|
|
|
+ * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
* @param xstar new feature vector (SparseVector)
|
|
* @param xstar new feature vector (SparseVector)
|
|
* @param norm result of the squared norm approximation
|
|
* @param norm result of the squared norm approximation
|
|
* @param pf optional feature transformation
|
|
* @param pf optional feature transformation
|
|
*/
|
|
*/
|
|
void hikComputeKVNApproximation(const NICE::VVector & _A, const NICE::SparseVector & _xstar, double & _norm, const ParameterizedFunction *_pf = NULL ) ;
|
|
void hikComputeKVNApproximation(const NICE::VVector & _A, const NICE::SparseVector & _xstar, double & _norm, const ParameterizedFunction *_pf = NULL ) ;
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
|
|
* @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
@@ -471,25 +456,25 @@ namespace NICE {
|
|
*
|
|
*
|
|
* @param xstar feature vector
|
|
* @param xstar feature vector
|
|
* @param kstar kernel vector
|
|
* @param kstar kernel vector
|
|
- */
|
|
|
|
|
|
+ */
|
|
void hikComputeKernelVector( const NICE::SparseVector & _xstar, NICE::Vector & _kstar) const;
|
|
void hikComputeKernelVector( const NICE::SparseVector & _xstar, NICE::Vector & _kstar) const;
|
|
-
|
|
|
|
|
|
+
|
|
//////////////////////////////////////////
|
|
//////////////////////////////////////////
|
|
// variance computation: non-sparse inputs
|
|
// variance computation: non-sparse inputs
|
|
- //////////////////////////////////////////
|
|
|
|
-
|
|
|
|
|
|
+ //////////////////////////////////////////
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
|
|
* @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 19-12-2013 (dd-mm-yyyy)
|
|
* @date 19-12-2013 (dd-mm-yyyy)
|
|
- *
|
|
|
|
- * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
|
|
|
|
+ *
|
|
|
|
+ * @param A pre-computation matrix (VVector) (use the prepare method)
|
|
* @param xstar new feature vector (Vector)
|
|
* @param xstar new feature vector (Vector)
|
|
* @param norm result of the squared norm approximation
|
|
* @param norm result of the squared norm approximation
|
|
* @param pf optional feature transformation
|
|
* @param pf optional feature transformation
|
|
*/
|
|
*/
|
|
void hikComputeKVNApproximation(const NICE::VVector & _A, const NICE::Vector & _xstar, double & _norm, const ParameterizedFunction *_pf = NULL ) ;
|
|
void hikComputeKVNApproximation(const NICE::VVector & _A, const NICE::Vector & _xstar, double & _norm, const ParameterizedFunction *_pf = NULL ) ;
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
|
|
* @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
@@ -509,28 +494,28 @@ namespace NICE {
|
|
*
|
|
*
|
|
* @param xstar feature vector
|
|
* @param xstar feature vector
|
|
* @param kstar kernel vector
|
|
* @param kstar kernel vector
|
|
- */
|
|
|
|
- void hikComputeKernelVector( const NICE::Vector & _xstar, NICE::Vector & _kstar) const;
|
|
|
|
-
|
|
|
|
|
|
+ */
|
|
|
|
+ void hikComputeKernelVector( const NICE::Vector & _xstar, NICE::Vector & _kstar) const;
|
|
|
|
+
|
|
/** Persistent interface */
|
|
/** Persistent interface */
|
|
virtual void restore ( std::istream & _is, int _format = 0 );
|
|
virtual void restore ( std::istream & _is, int _format = 0 );
|
|
- virtual void store ( std::ostream & _os, int _format = 0 ) const;
|
|
|
|
|
|
+ virtual void store ( std::ostream & _os, int _format = 0 ) const;
|
|
virtual void clear ();
|
|
virtual void clear ();
|
|
-
|
|
|
|
|
|
+
|
|
///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
|
|
///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
|
|
// interface specific methods for incremental extensions
|
|
// interface specific methods for incremental extensions
|
|
///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
|
|
///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
|
|
-
|
|
|
|
- virtual void addExample( const NICE::SparseVector * _example,
|
|
|
|
- const double & _label,
|
|
|
|
|
|
+
|
|
|
|
+ virtual void addExample( const NICE::SparseVector * _example,
|
|
|
|
+ const double & _label,
|
|
const bool & _performOptimizationAfterIncrement = true
|
|
const bool & _performOptimizationAfterIncrement = true
|
|
);
|
|
);
|
|
|
|
|
|
virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
|
|
virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
|
|
const NICE::Vector & _newLabels,
|
|
const NICE::Vector & _newLabels,
|
|
const bool & _performOptimizationAfterIncrement = true
|
|
const bool & _performOptimizationAfterIncrement = true
|
|
- );
|
|
|
|
-
|
|
|
|
|
|
+ );
|
|
|
|
+
|
|
|
|
|
|
/**
|
|
/**
|
|
* @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
|
|
* @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
|
|
@@ -538,20 +523,20 @@ namespace NICE {
|
|
* @date 02-01-2014 (dd-mm-yyyy)
|
|
* @date 02-01-2014 (dd-mm-yyyy)
|
|
*
|
|
*
|
|
* @param example new feature vector
|
|
* @param example new feature vector
|
|
- */
|
|
|
|
|
|
+ */
|
|
void addExample(const NICE::SparseVector * _example, const NICE::ParameterizedFunction *_pf = NULL);
|
|
void addExample(const NICE::SparseVector * _example, const NICE::ParameterizedFunction *_pf = NULL);
|
|
-
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* @brief Add multiple new example to the feature-storage. You have to update the corresponding variables explicitely after that.
|
|
* @brief Add multiple new example to the feature-storage. You have to update the corresponding variables explicitely after that.
|
|
* @author Alexander Freytag
|
|
* @author Alexander Freytag
|
|
* @date 02-01-2014 (dd-mm-yyyy)
|
|
* @date 02-01-2014 (dd-mm-yyyy)
|
|
*
|
|
*
|
|
* @param newExamples new feature vectors
|
|
* @param newExamples new feature vectors
|
|
- */
|
|
|
|
- void addMultipleExamples(const std::vector<const NICE::SparseVector * > & _newExamples, const NICE::ParameterizedFunction *_pf = NULL);
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-
|
|
|
|
|
|
+ */
|
|
|
|
+ void addMultipleExamples(const std::vector<const NICE::SparseVector * > & _newExamples, const NICE::ParameterizedFunction *_pf = NULL);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
|
|
};
|
|
};
|
|
|
|
|