Browse Source

more quantization flexibility, still unstable

Alexander Freytag 9 years ago
parent
commit
9004cc3d46

+ 31 - 1
FMKGPHyperparameterOptimization.cpp

@@ -39,6 +39,10 @@
 #include "gp-hik-core/parameterizedFunctions/PFExp.h"
 #include "gp-hik-core/parameterizedFunctions/PFMKL.h"
 #include "gp-hik-core/parameterizedFunctions/PFWeightedDim.h"
+// 
+#include "gp-hik-core/quantization/Quantization1DAequiDist0To1.h"
+#include "gp-hik-core/quantization/Quantization1DAequiDist0ToMax.h"
+#include "gp-hik-core/quantization/QuantizationNDAequiDist0ToMax.h"
 
 
 
@@ -446,7 +450,33 @@ void FMKGPHyperparameterOptimization::initFromConfig ( const Config *_conf,
     int numBins = _conf->gI ( _confSection, "num_bins", 100 );
     if ( this->b_verbose )
       std::cerr << "FMKGPHyperparameterOptimization: quantization initialized with " << numBins << " bins." << std::endl;
-    this->q = new Quantization ( numBins );
+    
+    
+    std::string s_quantType = _conf->gS( _confSection, "s_quantType", "1d-aequi-0-1" );
+    if ( s_quantType == "1d-aequi-0-1" )
+    {
+      this->q = new NICE::Quantization1DAequiDist0To1 ( numBins );
+    }
+    else if ( s_quantType == "1d-aequi-0-max" )
+    {
+      // FIXME this explicite setting is just one option. alternatively, we could compute the largest value of all training data
+      double vmax = _conf->gD( _confSection, "vmax-Quantization1DAequiDist0ToMax", 1.0 );
+      NICE::Vector upperBound ( 1 );
+      upperBound ( 0 ) = vmax;
+      
+      this->q = new NICE::Quantization1DAequiDist0ToMax ( numBins, &upperBound );
+    }
+    else if ( s_quantType == "nd-aequi-0-1" )
+    {
+      // FIXME load the upper bounds from a separate file or compute them here...
+      this->q = new NICE::QuantizationNDAequiDist0ToMax ( numBins );
+    }
+    else
+    {
+      fthrow(Exception, "Quantization type is unknown " << transform);
+    }      
+      
+    
   }
   else
   {

+ 49 - 49
FastMinKernel.cpp

@@ -250,14 +250,14 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
 
 double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVector & _A, 
                                                               const NICE::VVector & _B,
-                                                              const Quantization & _q,
+                                                              const Quantization * _q,
                                                               const ParameterizedFunction *_pf 
                                                              ) const
 {
   //NOTE keep in mind: for doing this, we already have precomputed A and B using hik_prepare_alpha_multiplications!
   
   // number of quantization bins
-  uint hmax = _q.size();
+  uint hmax = _q->size();
 
   // store (transformed) prototypes
   double *prototypes = new double [ hmax ];
@@ -265,9 +265,9 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
     if ( _pf != NULL ) {
       // FIXME: the transformed prototypes could change from dimension to another dimension
       // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = _pf->f ( 1, _q.getPrototype(i) );
+      prototypes[i] = _pf->f ( 1, _q->getPrototype(i) );
     } else {
-      prototypes[i] = _q.getPrototype(i);
+      prototypes[i] = _q->getPrototype(i);
     }
 
 
@@ -294,7 +294,7 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
     uint index = 0;
     // we use the quantization of the original features! the transformed feature were
     // already used to calculate A and B, this of course assumes monotonic functions!!!
-    uint qBin = _q.quantize ( i->first ); 
+    uint qBin = _q->quantize ( i->first, dim ); 
 
     // the next loop is linear in max(hmax, n)
     // REMARK: this could be changed to hmax*log(n), when
@@ -319,7 +319,7 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
           i++;
 
           if ( i->first !=  iPredecessor->first )
-            qBin = _q.quantize ( i->first );
+            qBin = _q->quantize ( i->first, dim );
         }
         // compute current element in the lookup table and keep in mind that
         // index is the next element and not the previous one
@@ -344,12 +344,12 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
 }
 
 double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha, 
-                                             const Quantization & _q, 
+                                             const Quantization * _q, 
                                              const ParameterizedFunction *_pf 
                                             ) const
 {
   // number of quantization bins
-  uint hmax = _q.size();
+  uint hmax = _q->size();
 
   // store (transformed) prototypes
   double *prototypes = new double [ hmax ];
@@ -357,9 +357,9 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
     if ( _pf != NULL ) {
       // FIXME: the transformed prototypes could change from dimension to another dimension
       // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = _pf->f ( 1, _q.getPrototype(i) );
+      prototypes[i] = _pf->f ( 1, _q->getPrototype(i) );
     } else {
-      prototypes[i] = _q.getPrototype(i);
+      prototypes[i] = _q->getPrototype(i);
     }
 
   // creating the lookup table as pure C, which might be beneficial
@@ -391,7 +391,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
     uint index = 0;
     
     // we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
-    uint qBin = _q.quantize ( i->first ); 
+    uint qBin = _q->quantize ( i->first, dim ); 
     
     double alpha_sum(0.0);
     double alpha_times_x_sum(0.0);
@@ -423,7 +423,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
           i++;
 
           if ( i->first !=  iPredecessor->first )
-            qBin = _q.quantize ( i->first );
+            qBin = _q->quantize ( i->first, dim );
         }
         // compute current element in the lookup table and keep in mind that
         // index is the next element and not the previous one
@@ -454,7 +454,7 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
                                          const double & _alphaNew, 
                                          const double & _alphaOld, 
                                          const uint & _idx, 
-                                         const Quantization & _q, 
+                                         const Quantization * _q, 
                                          const ParameterizedFunction *_pf 
                                         ) const
 {
@@ -466,7 +466,7 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
   }
   
   // number of quantization bins
-  uint hmax = _q.size();
+  uint hmax = _q->size();
 
   // store (transformed) prototypes
   double *prototypes = new double [ hmax ];
@@ -474,9 +474,9 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
     if ( _pf != NULL ) {
       // FIXME: the transformed prototypes could change from dimension to another dimension
       // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = _pf->f ( 1, _q.getPrototype(i) );
+      prototypes[i] = _pf->f ( 1, _q->getPrototype(i) );
     } else {
-      prototypes[i] = _q.getPrototype(i);
+      prototypes[i] = _q->getPrototype(i);
     }
   
   double diffOfAlpha(_alphaNew - _alphaOld);
@@ -494,7 +494,7 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
     for (uint j = 0; j < hmax; j++)
     {
         double fval;
-        uint q_bin = _q.quantize(x_i);
+        uint q_bin = _q->quantize( x_i, dim );
         
         if ( q_bin > j )
           fval = prototypes[j];
@@ -575,7 +575,7 @@ void FastMinKernel::hik_kernel_multiply(const NICE::VVector & _A,
 }
 
 void FastMinKernel::hik_kernel_multiply_fast(const double *_Tlookup, 
-                                             const Quantization & _q, 
+                                             const Quantization * _q, 
                                              const NICE::Vector & _alpha, 
                                              NICE::Vector & _beta) const
 {
@@ -593,8 +593,8 @@ void FastMinKernel::hik_kernel_multiply_fast(const double *_Tlookup,
     {
       const SortedVectorSparse<double>::dataelement & de = i->second;
       uint feat = de.first;
-      uint qBin = _q.quantize(i->first);
-      _beta[feat] += _Tlookup[dim*_q.size() + qBin];
+      uint qBin = _q->quantize( i->first, dim );
+      _beta[feat] += _Tlookup[dim*_q->size() + qBin];
     }
   }
   
@@ -752,7 +752,7 @@ void FastMinKernel::hik_kernel_sum(const NICE::VVector & _A,
 }
 
 void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup, 
-                                        const Quantization & _q, 
+                                        const Quantization * _q, 
                                         const NICE::Vector & _xstar, 
                                         double & _beta
                                        ) const
@@ -768,14 +768,14 @@ void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
   for ( uint dim = 0; dim < this->ui_d; dim++)
   {
     double v = _xstar[dim];
-    uint qBin = _q.quantize(v);
+    uint qBin = _q->quantize( v, dim );
     
-    _beta += _Tlookup[dim*_q.size() + qBin];
+    _beta += _Tlookup[dim*_q->size() + qBin];
   }
 }
 
 void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup, 
-                                        const Quantization & _q, 
+                                        const Quantization * _q, 
                                         const NICE::SparseVector & _xstar, 
                                         double & _beta
                                        ) const
@@ -789,15 +789,15 @@ void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
   {
     uint dim = i->first;
     double v = i->second;
-    uint qBin = _q.quantize(v);
+    uint qBin = _q->quantize( v, dim );
     
-    _beta += _Tlookup[dim*_q.size() + qBin];
+    _beta += _Tlookup[dim*_q->size() + qBin];
   }
 }
 
 double *FastMinKernel::solveLin(const NICE::Vector & _y, 
                                 NICE::Vector & _alpha,
-                                const Quantization & _q, 
+                                const Quantization * _q, 
                                 const ParameterizedFunction *_pf, 
                                 const bool & _useRandomSubsets, 
                                 uint _maxIterations, 
@@ -811,7 +811,7 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
   bool verboseMinimal ( false );
   
   // number of quantization bins
-  uint hmax = _q.size();
+  uint hmax = _q->size();
   
   NICE::Vector diagonalElements(_y.size(),0.0);
   this->X_sorted.hikDiagonalElements(diagonalElements);
@@ -877,7 +877,7 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
         for (uint j = 0; j < this->ui_d; j++)
         {
           x_i = this->X_sorted(j,perm[i]);
-          pseudoResidual(perm[i]) += Tlookup[j*hmax + _q.quantize(x_i)];
+          pseudoResidual(perm[i]) += Tlookup[j*hmax + _q->quantize( x_i, j )];
         }
       
         //NOTE: this threshhold could also be a parameter of the function call
@@ -928,7 +928,7 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
         for (uint j = 0; j < this->ui_d; j++)
         {
           x_i = this->X_sorted(j,i);
-          pseudoResidual(i) += Tlookup[j*hmax + _q.quantize(x_i)];
+          pseudoResidual(i) += Tlookup[j*hmax + _q->quantize( x_i, j )];
         }
       
         //NOTE: this threshhold could also be a parameter of the function call
@@ -1132,13 +1132,13 @@ void FastMinKernel::hikPrepareKVNApproximation(NICE::VVector & _A) const
 }
 
 double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A, 
-                                                       const Quantization & _q, 
+                                                       const Quantization * _q, 
                                                        const ParameterizedFunction *_pf ) const
 {
   //NOTE keep in mind: for doing this, we already have precomputed A using hikPrepareSquaredKernelVector!
   
   // number of quantization bins
-  uint hmax = _q.size();
+  uint hmax = _q->size();
 
   // store (transformed) prototypes
   double *prototypes = new double [ hmax ];
@@ -1146,9 +1146,9 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
     if ( _pf != NULL ) {
       // FIXME: the transformed prototypes could change from dimension to another dimension
       // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = _pf->f ( 1, _q.getPrototype(i) );
+      prototypes[i] = _pf->f ( 1, _q->getPrototype(i) );
     } else {
-      prototypes[i] = _q.getPrototype(i);
+      prototypes[i] = _q->getPrototype(i);
     }
 
 
@@ -1172,7 +1172,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
     uint index = 0;
     // we use the quantization of the original features! the transformed feature were
     // already used to calculate A and B, this of course assumes monotonic functions!!!
-    uint qBin = _q.quantize ( i->first ); 
+    uint qBin = _q->quantize ( i->first, dim ); 
 
     // the next loop is linear in max(hmax, n)
     // REMARK: this could be changed to hmax*log(n), when
@@ -1198,7 +1198,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
           i++;
 
           if ( i->first !=  iPredecessor->first )
-            qBin = _q.quantize ( i->first );
+            qBin = _q->quantize ( i->first, dim );
         }
         // compute current element in the lookup table and keep in mind that
         // index is the next element and not the previous one
@@ -1224,12 +1224,12 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
   return Tlookup;  
 }
 
-double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantization & _q,
+double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantization * _q,
                                                                 const ParameterizedFunction *_pf 
                                                                ) const
 {
   // number of quantization bins
-  uint hmax = _q.size();
+  uint hmax = _q->size();
 
   // store (transformed) prototypes
   double *prototypes = new double [ hmax ];
@@ -1237,9 +1237,9 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
     if ( _pf != NULL ) {
       // FIXME: the transformed prototypes could change from dimension to another dimension
       // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = _pf->f ( 1, _q.getPrototype(i) );
+      prototypes[i] = _pf->f ( 1, _q->getPrototype(i) );
     } else {
-      prototypes[i] = _q.getPrototype(i);
+      prototypes[i] = _q->getPrototype(i);
     }
 
   // creating the lookup table as pure C, which might be beneficial
@@ -1262,7 +1262,7 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
     uint index = 0;
     
     // we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
-    uint qBin = _q.quantize ( i->first ); 
+    uint qBin = _q->quantize ( i->first, dim ); 
     
     double sum(0.0);
     
@@ -1287,7 +1287,7 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
           i++;
 
           if ( i->first !=  iPredecessor->first )
-            qBin = _q.quantize ( i->first );
+            qBin = _q->quantize ( i->first, dim );
         }
         // compute current element in the lookup table and keep in mind that
         // index is the next element and not the previous one
@@ -1374,7 +1374,7 @@ void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & _A,
 }
 
 void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup, 
-                                                   const Quantization & _q, 
+                                                   const Quantization * _q, 
                                                    const NICE::SparseVector & _xstar, 
                                                    double & _norm
                                                   ) const
@@ -1387,9 +1387,9 @@ void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
     double v = i->second;
     // we do not need a parameterized function here, since the quantizer works on the original feature values. 
     // nonetheless, the lookup table was created using the parameterized function    
-    uint qBin = _q.quantize(v);
+    uint qBin = _q->quantize( v, dim );
     
-    _norm += _Tlookup[dim*_q.size() + qBin];
+    _norm += _Tlookup[dim*_q->size() + qBin];
   }  
 }
 
@@ -1517,7 +1517,7 @@ void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & _A,
 }
 
 void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup, 
-                                                   const Quantization & _q, 
+                                                   const Quantization * _q, 
                                                    const NICE::Vector & _xstar, 
                                                    double & _norm
                                                   ) const
@@ -1525,14 +1525,14 @@ void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
   _norm = 0.0;
   // runtime is O(d) if the quantizer is O(1)
   uint dim ( 0 );
-  for (Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++ )
+  for ( NICE::Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++ )
   {
     double v = *i;
     // we do not need a parameterized function here, since the quantizer works on the original feature values. 
     // nonetheless, the lookup table was created using the parameterized function    
-    uint qBin = _q.quantize(v);
+    uint qBin = _q->quantize( v, dim );
     
-    _norm += _Tlookup[dim*_q.size() + qBin];
+    _norm += _Tlookup[dim*_q->size() + qBin];
   }  
 }
 

+ 11 - 11
FastMinKernel.h

@@ -229,7 +229,7 @@ namespace NICE {
                                NICE::Vector & _beta
                               ) const;
       void hik_kernel_multiply_fast(const double *_Tlookup, 
-                                    const Quantization & _q, 
+                                    const Quantization * _q, 
                                     const NICE::Vector & _alpha, 
                                     NICE::Vector & _beta
                                    ) const;
@@ -283,7 +283,7 @@ namespace NICE {
       * @param beta result of the calculation
       */
       void hik_kernel_sum_fast(const double* _Tlookup, 
-                               const Quantization & _q, 
+                               const Quantization * _q, 
                                const NICE::Vector & _xstar, 
                                double & _beta
                               ) const;
@@ -299,7 +299,7 @@ namespace NICE {
       */      
 
       void hik_kernel_sum_fast(const double *_Tlookup, 
-                               const Quantization & _q, 
+                               const Quantization * _q, 
                                const NICE::SparseVector & _xstar, 
                                double & _beta
                               ) const;
@@ -318,7 +318,7 @@ namespace NICE {
       */
       double *hik_prepare_alpha_multiplications_fast(const NICE::VVector & _A, 
                                                      const NICE::VVector & _B, 
-                                                     const Quantization & _q, 
+                                                     const Quantization * _q, 
                                                      const ParameterizedFunction *_pf = NULL 
                                                     ) const;
       
@@ -334,7 +334,7 @@ namespace NICE {
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
       double* hikPrepareLookupTable(const NICE::Vector & _alpha, 
-                                    const Quantization & _q, 
+                                    const Quantization * _q, 
                                     const ParameterizedFunction *_pf = NULL
                                    ) const;
 
@@ -353,7 +353,7 @@ namespace NICE {
                                 const double & _alphaNew, 
                                 const double & _alphaOld, 
                                 const uint & _idx, 
-                                const Quantization & _q, 
+                                const Quantization * _q, 
                                 const ParameterizedFunction *pf 
                                ) const;
 
@@ -382,7 +382,7 @@ namespace NICE {
        **/
       double *solveLin(const NICE::Vector & _y, 
                        NICE::Vector & _alpha, 
-                       const Quantization & _q, 
+                       const Quantization * _q, 
                        const ParameterizedFunction *_pf = NULL, 
                        const bool & _useRandomSubsets = true, 
                        uint _maxIterations = 10000, 
@@ -420,7 +420,7 @@ namespace NICE {
       * @return C standard vector representing a q.size()*d double matrix and the lookup table T. Elements can be accessed with
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
-      double * hikPrepareKVNApproximationFast(NICE::VVector & _A, const Quantization & _q, const ParameterizedFunction *_pf = NULL ) const;
+      double * hikPrepareKVNApproximationFast(NICE::VVector & _A, const Quantization * _q, const ParameterizedFunction *_pf = NULL ) const;
       
       /**
       * @brief Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples ( equals hikPrepareSquaredKernelVector + hikPrepareSquaredKernelVectorFast, but is faster). Approximation does not considere mixed terms between dimensions.
@@ -433,7 +433,7 @@ namespace NICE {
       * @return C standard vector representing a q.size()*d double matrix and the lookup table T. Elements can be accessed with
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
-      double* hikPrepareLookupTableForKVNApproximation(const Quantization & _q, const ParameterizedFunction *_pf = NULL) const;
+      double* hikPrepareLookupTableForKVNApproximation(const Quantization * _q, const ParameterizedFunction *_pf = NULL) const;
       
     //////////////////////////////////////////
     // variance computation: sparse inputs
@@ -461,7 +461,7 @@ namespace NICE {
       * @param xstar feature vector (indirect k_*)
       * @param norm result of the calculation
       */
-      void hikComputeKVNApproximationFast(const double *_Tlookup, const Quantization & _q, const NICE::SparseVector & _xstar, double & _norm ) const;
+      void hikComputeKVNApproximationFast(const double *_Tlookup, const Quantization * _q, const NICE::SparseVector & _xstar, double & _norm ) const;
 
       /**
       * @brief Compute the kernel vector k_* between training examples and test example. Runtime. O(n \times D). Exploiting sparsity
@@ -499,7 +499,7 @@ namespace NICE {
       * @param xstar feature vector (indirect k_*)
       * @param norm result of the calculation
       */
-      void hikComputeKVNApproximationFast(const double *_Tlookup, const Quantization & _q, const NICE::Vector & _xstar, double & _norm ) const;      
+      void hikComputeKVNApproximationFast(const double *_Tlookup, const Quantization * _q, const NICE::Vector & _xstar, double & _norm ) const;      
       
       /**
       * @brief Compute the kernel vector k_* between training examples and test example. Runtime. O(n \times D). Does not exploit sparsity - deprecated!

+ 1 - 1
Quantization.cpp

@@ -17,7 +17,7 @@ Quantization::Quantization( )
 }
 
 Quantization::Quantization( uint _numBins,
-                            NICE::Vector * v_upperBounds = NULL
+                            NICE::Vector * v_upperBounds
                           )
 {
 }

+ 6 - 4
Quantization.h

@@ -1,6 +1,6 @@
 /** 
 * @file Quantization.h
-* @brief Quantization of one-dimensional signals with a standard range of [0,1] (Interface)
+* @brief Quantization of signals (Interface)
 * @author Erik Rodner, Alexander Freytag
 * @date 01/09/2012
 */
@@ -17,7 +17,7 @@ namespace NICE {
   
  /** 
  * @class Quantization
- * @brief Quantization of one-dimensional signals with a standard range of [0,1]
+ * @brief Quantization of signals
  * @author Erik Rodner, Alexander Freytag
  */
  
@@ -75,7 +75,9 @@ class Quantization  : public NICE::Persistent
   *
   * @return value of the prototype
   */
-  virtual double getPrototype (uint _bin) const = 0;
+  virtual double getPrototype ( uint _bin, 
+                                const uint & _dim = 0    
+                              ) const = 0;
 
   /**
   * @brief Determine for a given signal value the bin in the vocabulary. This is not the corresponding prototype, which 
@@ -86,7 +88,7 @@ class Quantization  : public NICE::Persistent
   * @return index of the bin entry corresponding to the given signal value
   */
   virtual uint quantize ( double _value, 
-                          const uint & _dim
+                          const uint & _dim = 0
                         ) const = 0;
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////

+ 8 - 10
quantization/Quantization1DAequiDist0To1.cpp

@@ -30,13 +30,11 @@ Quantization1DAequiDist0To1::~Quantization1DAequiDist0To1()
 {
 }
 
-uint Quantization1DAequiDist0To1::size() const
-{
-  return this->ui_numBins;
-}
-  
-double Quantization1DAequiDist0To1::getPrototype (uint _bin) const
+double Quantization1DAequiDist0To1::getPrototype ( uint _bin, 
+                                                   const uint & _dim       
+                                                 ) const
 {
+  //  _dim will be ignored for this type of quantization. all dimensions are treated equally...  
   return _bin / (double)(this->ui_numBins-1);
 }
   
@@ -81,7 +79,7 @@ void Quantization1DAequiDist0To1::restore ( std::istream & _is,
       if ( tmp.compare("Quantization") == 0 )
       {
         // restore parent object
-        Quantization::restore(is);
+        Quantization::restore( _is );
       }       
       else
       {
@@ -100,14 +98,14 @@ void Quantization1DAequiDist0To1::restore ( std::istream & _is,
 }
 
 void Quantization1DAequiDist0To1::store ( std::ostream & _os, 
-                           int _format 
-                         ) const
+                                          int _format 
+                                        ) const
 {
   // show starting point
   _os << this->createStartTag( "Quantization1DAequiDist0To1" ) << std::endl;
   
   // store parent object
-  Quantization::store(os); 
+  Quantization::store( _os ); 
     
   // done
   _os << this->createEndTag( "Quantization1DAequiDist0To1" ) << std::endl;

+ 26 - 2
quantization/Quantization1DAequiDist0To1.h

@@ -4,8 +4,8 @@
 * @author Erik Rodner, Alexander Freytag
 * @date 01/09/2012
 */
-#ifndef _NICE_QUANTIZATIONINCLUDE
-#define _NICE_QUANTIZATIONINCLUDE
+#ifndef _NICE_QUANTIZATION1DAEQUIDIST0TO1INCLUDE
+#define _NICE_QUANTIZATION1DAEQUIDIST0TO1INCLUDE
 
 // NICE-core includes
 #include <core/basics/types.h>
@@ -54,6 +54,30 @@ class Quantization1DAequiDist0To1  : public NICE::Quantization
   /** simple destructor */
   virtual ~Quantization1DAequiDist0To1();
   
+  /**
+  * @brief get specific word or prototype element of the quantization
+  *
+  * @param bin the index of the bin
+  *
+  * @return value of the prototype
+  */
+  virtual double getPrototype ( uint _bin, 
+                                const uint & _dim = 0    
+                              ) const;    
+  
+  /**
+  * @brief Determine for a given signal value the bin in the vocabulary. This is not the corresponding prototype, which 
+  * has to be requested with getPrototype afterwards
+  *
+  * @param value signal function value
+  *
+  * @return index of the bin entry corresponding to the given signal value
+  */
+  virtual uint quantize ( double _value, 
+                          const uint & _dim = 0
+                        ) const;
+                                            
+  
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   ///////////////////// INTERFACE PERSISTENT /////////////////////

+ 17 - 18
quantization/Quantization1DAequiDist0ToMax.cpp

@@ -1,8 +1,8 @@
 /** 
 * @file Quantization1DAequiDist0ToMax.cpp
-* @brief Quantization1DAequiDist0ToMax of one-dimensional signals with a standard range of [0,1] (Implementation)
+* @brief Quantization1DAequiDist0ToMax of one-dimensional signals with selectable interval [0, vMax] (Implementation)
 * @author Erik Rodner, Alexander Freytag
-* @date 01/09/2012
+* @date 13-10-2015 ( dd-mm-yyyy )
 
 */
 #include <iostream>
@@ -24,37 +24,36 @@ Quantization1DAequiDist0ToMax::Quantization1DAequiDist0ToMax(
   this->ui_numBins = _numBins;
   this->v_upperBounds.resize ( 1 );
   if ( (_upperBounds != NULL)  && (_upperBounds->size() > 0) )
-    this->v_upperBounds(0) = *_upperBounds(0);
+    this->v_upperBounds[0] = (*_upperBounds)[0];
   else
-    this->v_upperBounds(0) = 1.0;
+    this->v_upperBounds[0] = 1.0;
 }
 
 Quantization1DAequiDist0ToMax::~Quantization1DAequiDist0ToMax()
 {
 }
 
-uint Quantization1DAequiDist0ToMax::size() const
-{
-  return this->ui_numBins;
-}
   
-double Quantization1DAequiDist0ToMax::getPrototype (uint _bin) const
+double Quantization1DAequiDist0ToMax::getPrototype ( uint _bin, 
+                                                     const uint & _dim       
+                                                   ) const
 {
-  return _bin / (double)(this->ui_numBins-1);
+  //  _dim will be ignored for this type of quantization. all dimensions are treated equally...  
+  return (this->v_upperBounds[0]*_bin) / (double)(this->ui_numBins-1);
 }
   
 uint Quantization1DAequiDist0ToMax::quantize ( double _value,
-                                             const uint & _dim
-                                           ) const
+                                               const uint & _dim
+                                             ) const
 {
   //  _dim will be ignored for this type of quantization. all dimensions are treated equally...
   
   if ( _value <= 0.0 ) 
     return 0;
-  else if ( _value >= 1.0 ) 
+  else if ( _value >= this->v_upperBounds[0] ) 
     return this->ui_numBins-1;
   else 
-    return (uint)( _value * (this->ui_numBins-1) + 0.5 );
+    return (uint)( _value/this->v_upperBounds[0]  * (this->ui_numBins-1) + 0.5 );
 }
 
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
@@ -84,7 +83,7 @@ void Quantization1DAequiDist0ToMax::restore ( std::istream & _is,
       if ( tmp.compare("Quantization") == 0 )
       {
         // restore parent object
-        Quantization::restore(is);
+        Quantization::restore(  _is );
       }       
       else
       {
@@ -103,14 +102,14 @@ void Quantization1DAequiDist0ToMax::restore ( std::istream & _is,
 }
 
 void Quantization1DAequiDist0ToMax::store ( std::ostream & _os, 
-                           int _format 
-                         ) const
+                                            int _format 
+                                          ) const
 {
   // show starting point
   _os << this->createStartTag( "Quantization1DAequiDist0ToMax" ) << std::endl;
   
   // store parent object
-  Quantization::store(os); 
+  Quantization::store( _os ); 
     
   // done
   _os << this->createEndTag( "Quantization1DAequiDist0ToMax" ) << std::endl;

+ 31 - 8
quantization/Quantization1DAequiDist0ToMax.h

@@ -1,11 +1,11 @@
 /** 
 * @file Quantization1DAequiDist0ToMax.h
-* @brief Quantization of one-dimensional signals with a standard range of [0,1] (Interface)
-* @author Erik Rodner, Alexander Freytag
-* @date 01/09/2012
+* @brief Quantization of one-dimensional signals with selectable interval [0, vMax] (Interface)
+* @author Alexander Freytag
+* @date 13-10-2015 ( dd-mm-yyyy )
 */
-#ifndef _NICE_QUANTIZATIONINCLUDE
-#define _NICE_QUANTIZATIONINCLUDE
+#ifndef _NICE_QUANTIZATION1DAEQUIDIST0TOMAXINCLUDE
+#define _NICE_QUANTIZATION1DAEQUIDIST0TOMAXINCLUDE
 
 // NICE-core includes
 #include <core/basics/types.h>
@@ -15,10 +15,10 @@
 
 namespace NICE {
   
- /** 
+ /** Quantization1DAequiDist0ToMax
  * @class Quantization
- * @brief Quantization of one-dimensional signals with a standard range of [0,1]
- * @author Erik Rodner, Alexander Freytag
+ * @brief Quantization of one-dimensional signals with selectable interval [0, vMax]
+ * @author Alexander Freytag
  */
  
 class Quantization1DAequiDist0ToMax  : public NICE::Quantization
@@ -54,6 +54,29 @@ class Quantization1DAequiDist0ToMax  : public NICE::Quantization
   /** simple destructor */
   virtual ~Quantization1DAequiDist0ToMax();
   
+  /**
+  * @brief get specific word or prototype element of the quantization
+  *
+  * @param bin the index of the bin
+  *
+  * @return value of the prototype
+  */
+  virtual double getPrototype ( uint _bin, 
+                                const uint & _dim = 0    
+                              ) const;    
+  
+  /**
+  * @brief Determine for a given signal value the bin in the vocabulary. This is not the corresponding prototype, which 
+  * has to be requested with getPrototype afterwards
+  *
+  * @param value signal function value
+  *
+  * @return index of the bin entry corresponding to the given signal value
+  */
+  virtual uint quantize ( double _value, 
+                          const uint & _dim = 0
+                        ) const;
+  
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   ///////////////////// INTERFACE PERSISTENT /////////////////////

+ 110 - 0
quantization/QuantizationNDAequiDist0ToMax.cpp

@@ -0,0 +1,110 @@
+/** 
+* @file QuantizationNDAequiDist0ToMax.cpp
+* @brief QuantizationNDAequiDist0ToMax of one-dimensional signals with selectable interval [0, vMax] (Implementation)
+* @author Alexander Freytag
+* @date 13-10-2015 ( dd-mm-yyyy )
+
+*/
+#include <iostream>
+
+#include "QuantizationNDAequiDist0ToMax.h"
+
+using namespace NICE;
+
+QuantizationNDAequiDist0ToMax::QuantizationNDAequiDist0ToMax( ) 
+{
+  this->ui_numBins = 1;
+}
+
+QuantizationNDAequiDist0ToMax::QuantizationNDAequiDist0ToMax( 
+                               uint _numBins, 
+                               NICE::Vector * _upperBounds
+                             )
+{
+  this->ui_numBins    = _numBins;
+  this->v_upperBounds = *_upperBounds;
+}
+
+QuantizationNDAequiDist0ToMax::~QuantizationNDAequiDist0ToMax()
+{
+}
+
+  
+double QuantizationNDAequiDist0ToMax::getPrototype ( uint _bin, 
+                                                     const uint & _dim       
+                                                   ) const
+{
+  return (this->v_upperBounds[_dim]*_bin) / (double)(this->ui_numBins-1);
+}
+  
+uint QuantizationNDAequiDist0ToMax::quantize ( double _value,
+                                               const uint & _dim
+                                             ) const
+{
+  if ( _value <= 0.0 ) 
+    return 0;
+  else if ( _value >= this->v_upperBounds[_dim] ) 
+    return this->ui_numBins-1;
+  else 
+    return (uint)( _value/this->v_upperBounds[_dim]  * (this->ui_numBins-1) + 0.5 );
+}
+
+// ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
+
+void QuantizationNDAequiDist0ToMax::restore ( std::istream & _is, 
+                                              int _format 
+                                            )
+{
+  if ( _is.good() )
+  {    
+    std::string tmp;    
+
+    bool b_endOfBlock ( false ) ;
+    
+    while ( !b_endOfBlock )
+    {
+      _is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "QuantizationNDAequiDist0ToMax" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }                  
+      
+      tmp = this->removeStartTag ( tmp );
+      
+      if ( tmp.compare("Quantization") == 0 )
+      {
+        // restore parent object
+        Quantization::restore( _is );
+      }       
+      else
+      {
+        std::cerr << "WARNING -- unexpected QuantizationNDAequiDist0ToMax object -- " << tmp << " -- for restoration... aborting" << std::endl;
+        throw;  
+      }
+      //FIXME also store and restore the upper bounds
+      
+      _is >> tmp; // end of block 
+      tmp = this->removeEndTag ( tmp );      
+    }
+   }
+  else
+  {
+    std::cerr << "QuantizationNDAequiDist0ToMax::restore -- InStream not initialized - restoring not possible!" << std::endl;
+  }
+}
+
+void QuantizationNDAequiDist0ToMax::store ( std::ostream & _os, 
+                                            int _format 
+                                          ) const
+{
+  // show starting point
+  _os << this->createStartTag( "QuantizationNDAequiDist0ToMax" ) << std::endl;
+  
+  // store parent object
+  Quantization::store( _os ); 
+    
+  // done
+  _os << this->createEndTag( "QuantizationNDAequiDist0ToMax" ) << std::endl;
+}

+ 96 - 0
quantization/QuantizationNDAequiDist0ToMax.h

@@ -0,0 +1,96 @@
+/** 
+* @file QuantizationNDAequiDist0ToMax.h
+* @brief Dimension-specific quantization of one-dimensional signals with selectable interval [0, vMax] (Interface)
+* @author Alexander Freytag
+* @date 13-10-2015 ( dd-mm-yyyy )
+*/
+#ifndef _NICE_QUANTIZATIONNDAEQUIDIST0TOMAXINCLUDE
+#define _NICE_QUANTIZATIONNDAEQUIDIST0TOMAXINCLUDE
+
+// NICE-core includes
+#include <core/basics/types.h>
+#include <core/basics/Persistent.h>
+
+#include "gp-hik-core/Quantization.h"
+
+namespace NICE {
+  
+ /** 
+ * @class QuantizationNDAequiDist0ToMax
+ * @brief Dimension-specific quantization of one-dimensional signals with selectable interval [0, vMax]
+ * @author Alexander Freytag
+ */
+ 
+class QuantizationNDAequiDist0ToMax  : public NICE::Quantization
+{
+
+  /** TODO
+   * The current implementation only provides uniform quantization. We could extend this
+   * by giving a ParameterizedFunction object to the constructor, which would allow us to inverse transform function values
+   * before performing the binning.
+   */
+
+  protected:
+
+  public:
+
+  /** 
+   * @brief default constructor
+   * @author Alexander Freytag
+   * @date 06-02-2014
+   */
+  
+  QuantizationNDAequiDist0ToMax( );
+  
+  /**
+   * @brief simple constructor
+   * @author Erik Rodner
+   * @date 
+   */
+  QuantizationNDAequiDist0ToMax( uint _numBins, 
+                               NICE::Vector * v_upperBounds = NULL
+                              );
+    
+  /** simple destructor */
+  virtual ~QuantizationNDAequiDist0ToMax();
+  
+  /**
+  * @brief get specific word or prototype element of the quantization
+  *
+  * @param bin the index of the bin
+  *
+  * @return value of the prototype
+  */
+  virtual double getPrototype ( uint _bin, 
+                                const uint & _dim = 0    
+                              ) const;    
+  
+  /**
+  * @brief Determine for a given signal value the bin in the vocabulary. This is not the corresponding prototype, which 
+  * has to be requested with getPrototype afterwards
+  *
+  * @param value signal function value
+  *
+  * @return index of the bin entry corresponding to the given signal value
+  */
+  virtual uint quantize ( double _value, 
+                          const uint & _dim = 0
+                        ) const;
+  
+  ///////////////////// INTERFACE PERSISTENT /////////////////////
+  // interface specific methods for store and restore
+  ///////////////////// INTERFACE PERSISTENT /////////////////////
+  virtual void restore ( std::istream & _is, 
+                         int _format = 0 
+                       );
+  virtual void store ( std::ostream & _os, 
+                       int _format = 0 
+                     ) const; 
+  virtual void clear () {};    
+
+ 
+};
+
+}
+
+#endif