浏览代码

unstable, towards extraction of quantization parameters from training data, bug fixes

Alexander Freytag 9 年之前
父节点
当前提交
9266de620b

+ 59 - 37
FMKGPHyperparameterOptimization.cpp

@@ -255,8 +255,8 @@ FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization( )
   this->b_debug = false;
   
   //stupid unneeded default values
-  this->i_binaryLabelPositive = -1;
-  this->i_binaryLabelNegative = -2;
+  this->i_binaryLabelPositive = 0;
+  this->i_binaryLabelNegative = 1;
   this->knownClasses.clear();  
   
   this->b_usePreviousAlphas = false;
@@ -284,8 +284,8 @@ FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization( const bool & _
   this->b_debug = false;
   
   //stupid unneeded default values
-  this->i_binaryLabelPositive = -1;
-  this->i_binaryLabelNegative = -2;
+  this->i_binaryLabelPositive = 0;
+  this->i_binaryLabelNegative = 1;
   this->knownClasses.clear();   
   
   this->b_usePreviousAlphas = false;
@@ -320,8 +320,8 @@ FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization ( const Config
   this->b_debug = false;
   
   //stupid unneeded default values
-  this->i_binaryLabelPositive = -1;
-  this->i_binaryLabelNegative = -2;
+  this->i_binaryLabelPositive = 0;
+  this->i_binaryLabelNegative = 1;
   this->knownClasses.clear();  
   
   this->b_usePreviousAlphas = false;
@@ -357,8 +357,8 @@ FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization ( const Config
   this->b_debug = false;
   
   //stupid unneeded default values
-  this->i_binaryLabelPositive = -1;
-  this->i_binaryLabelNegative = -2;
+  this->i_binaryLabelPositive = 0;
+  this->i_binaryLabelNegative = 1;
   this->knownClasses.clear();    
   
   this->b_usePreviousAlphas = false;
@@ -453,27 +453,22 @@ void FMKGPHyperparameterOptimization::initFromConfig ( const Config *_conf,
     
     
     std::string s_quantType = _conf->gS( _confSection, "s_quantType", "1d-aequi-0-1" );
+
     if ( s_quantType == "1d-aequi-0-1" )
     {
       this->q = new NICE::Quantization1DAequiDist0To1 ( numBins );
     }
     else if ( s_quantType == "1d-aequi-0-max" )
-    {
-      // FIXME this explicite setting is just one option. alternatively, we could compute the largest value of all training data
-      double vmax = _conf->gD( _confSection, "vmax-Quantization1DAequiDist0ToMax", 1.0 );
-      NICE::Vector upperBound ( 1 );
-      upperBound ( 0 ) = vmax;
-      
-      this->q = new NICE::Quantization1DAequiDist0ToMax ( numBins, &upperBound );
+    {     
+      this->q = new NICE::Quantization1DAequiDist0ToMax ( numBins );
     }
     else if ( s_quantType == "nd-aequi-0-1" )
     {
-      // FIXME load the upper bounds from a separate file or compute them here...
       this->q = new NICE::QuantizationNDAequiDist0ToMax ( numBins );
     }
     else
     {
-      fthrow(Exception, "Quantization type is unknown " << transform);
+      fthrow(Exception, "Quantization type is unknown " << s_quantType);
     }      
       
     
@@ -672,7 +667,10 @@ void FMKGPHyperparameterOptimization::setFastMinKernel ( FastMinKernel * _fmk )
       this->fmk = NULL;
     }    
     this->fmk = _fmk;
-  }  
+  }
+  
+  //
+  this->q->computeParametersFromData ( this->fmk->featureMatrix() );
 }
 
 void FMKGPHyperparameterOptimization::setNrOfEigenvaluesToConsiderForVarApprox ( const int & _nrOfEigenvaluesToConsiderForVarApprox )
@@ -833,9 +831,9 @@ void FMKGPHyperparameterOptimization::computeMatricesAndLUTs ( const GPLikelihoo
     this->precomputedA[ i->first ] = A;
     this->precomputedB[ i->first ] = B;
 
-    if ( q != NULL )
+    if ( this->q != NULL )
     {
-      double *T = fmk->hik_prepare_alpha_multiplications_fast ( A, B, *q, pf );
+      double *T = fmk->hik_prepare_alpha_multiplications_fast ( A, B, this->q, this->pf );
       //just to be sure that we do not waste space here
       if ( precomputedT[ i->first ] != NULL )
         delete precomputedT[ i->first ];
@@ -1122,9 +1120,9 @@ void FMKGPHyperparameterOptimization::prepareVarianceApproximationRough()
   this->precomputedAForVarEst = AVar;
   this->precomputedAForVarEst.setIoUntilEndOfFile ( false );
 
-  if ( q != NULL )
+  if ( this->q != NULL )
   {   
-    double *T = this->fmk->hikPrepareLookupTableForKVNApproximation ( *q, pf );
+    double *T = this->fmk->hikPrepareLookupTableForKVNApproximation ( this->q, this->pf );
     this->precomputedTForVarEst = T;
   }
 }
@@ -1159,7 +1157,7 @@ uint FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & _xst
     if ( this->q != NULL ) {
       std::map<uint, double *>::const_iterator j = this->precomputedT.find ( classno );
       double *T = j->second;
-      this->fmk->hik_kernel_sum_fast ( T, *q, _xstar, beta );
+      this->fmk->hik_kernel_sum_fast ( T, this->q, _xstar, beta );
     } else {
       const PrecomputedType & A = i->second;
       std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
@@ -1220,7 +1218,7 @@ uint FMKGPHyperparameterOptimization::classify ( const NICE::Vector & _xstar,
     {
       std::map<uint, double *>::const_iterator j = this->precomputedT.find ( classno );
       double *T = j->second;
-      this->fmk->hik_kernel_sum_fast ( T, *q, _xstar, beta );
+      this->fmk->hik_kernel_sum_fast ( T, this->q, _xstar, beta );
     }
     else
     {
@@ -1283,13 +1281,13 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateRough
   // ---------------- compute the approximation of the second term --------------------
   double normKStar;
 
-  if ( q != NULL )
+  if ( this->q != NULL )
   {
     if ( precomputedTForVarEst == NULL )
     {
       fthrow ( Exception, "The precomputed LUT for uncertainty prediction is NULL...have you prepared the uncertainty prediction? Aborting..." );
     }
-    fmk->hikComputeKVNApproximationFast ( precomputedTForVarEst, *q, _x, normKStar );
+    fmk->hikComputeKVNApproximationFast ( precomputedTForVarEst, this->q, _x, normKStar );
   }
   else
   {
@@ -1498,13 +1496,13 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateRough
   // ---------------- compute the approximation of the second term --------------------
   double normKStar;
 
-  if ( q != NULL )
+  if ( this->q != NULL )
   {
     if ( precomputedTForVarEst == NULL )
     {
       fthrow ( Exception, "The precomputed LUT for uncertainty prediction is NULL...have you prepared the uncertainty prediction? Aborting..." );
     }
-    fmk->hikComputeKVNApproximationFast ( precomputedTForVarEst, *q, x, normKStar );
+    fmk->hikComputeKVNApproximationFast ( precomputedTForVarEst, this->q, x, normKStar );
   }
   else
   {
@@ -1512,7 +1510,7 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateRough
     {
       fthrow ( Exception, "The precomputedAForVarEst is empty...have you trained this classifer? Aborting..." );
     }    
-    fmk->hikComputeKVNApproximation ( precomputedAForVarEst, x, normKStar, pf );
+    fmk->hikComputeKVNApproximation ( precomputedAForVarEst, x, normKStar, this->pf );
   }
 
   predVariance = kSelf - ( 1.0 / eigenMax[0] )* normKStar;
@@ -1759,7 +1757,28 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
         {   
           if ( this->q != NULL )
             delete this->q;
-          this->q = new Quantization();
+          
+          std::string s_quantType;
+           _is >> s_quantType;
+           s_quantType = this->removeStartTag ( s_quantType );
+          
+          if ( s_quantType == "Quantization1DAequiDist0To1" )
+          {
+            this->q = new NICE::Quantization1DAequiDist0To1();
+          }
+          else if ( s_quantType == "Quantization1DAequiDist0ToMax" )
+          {           
+            this->q = new NICE::Quantization1DAequiDist0ToMax ( );
+          }
+          else if ( s_quantType == "QuantizationNDAequiDist0ToMax" )
+          {
+            this->q = new NICE::QuantizationNDAequiDist0ToMax ( );
+          }
+          else
+          {
+            fthrow(Exception, "Quantization type is unknown " << s_quantType);
+          }
+               
           this->q->restore ( _is, _format );
         }
         else
@@ -1793,14 +1812,17 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
           throw;
         } 
         
-        std::string transform = this->removeStartTag ( tmp );
-        
+        std::string transform ( this->removeStartTag( tmp ) );
 
         if ( transform == "PFAbsExp" )
         {
-          this->pf = new PFAbsExp ();
+          this->pf = new NICE::PFAbsExp ();
         } else if ( transform == "PFExp" ) {
-          this->pf = new PFExp ();
+          this->pf = new NICE::PFExp ();
+        }
+        else if ( transform == "PFIdentity" )
+        {
+          this->pf = new NICE::PFIdentity( );          
         } else {
           fthrow(Exception, "Transformation type is unknown " << transform);
         }
@@ -1826,7 +1848,7 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
           PrecomputedType pct;
           pct.setIoUntilEndOfFile ( false );
           pct.restore ( _is, _format );
-          precomputedA.insert ( std::pair<uint, PrecomputedType> ( nr, pct ) );
+          this->precomputedA.insert ( std::pair<uint, PrecomputedType> ( nr, pct ) );
         }
         
         _is >> tmp; // end of block 
@@ -1848,7 +1870,7 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
           PrecomputedType pct;
           pct.setIoUntilEndOfFile ( false );
           pct.restore ( _is, _format );
-          precomputedB.insert ( std::pair<uint, PrecomputedType> ( nr, pct ) );
+          this->precomputedB.insert ( std::pair<uint, PrecomputedType> ( nr, pct ) );
         }    
         
         _is >> tmp; // end of block 
@@ -2250,7 +2272,7 @@ void FMKGPHyperparameterOptimization::store ( std::ostream & _os,
     if ( q != NULL )
     {
       _os << "NOTNULL" << std::endl;
-      q->store ( _os, _format );
+      this->q->store ( _os, _format );
     }
     else
     {

+ 2 - 2
FMKGPHyperparameterOptimization.h

@@ -103,9 +103,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     NICE::Vector labels; 
     
     //! store the class number of the positive class (i.e., larger class no), only used in binary settings
-    int i_binaryLabelPositive;
+    uint i_binaryLabelPositive;
     //! store the class number of the negative class (i.e., smaller class no), only used in binary settings
-    int i_binaryLabelNegative;
+    uint i_binaryLabelNegative;
     
     //! contains all class numbers of the currently known classes
     std::set<uint> knownClasses;

+ 11 - 13
GMHIKernel.cpp

@@ -34,7 +34,7 @@ GMHIKernel::~GMHIKernel()
 void GMHIKernel::multiply (NICE::Vector & y, const NICE::Vector & x) const
 {
   //do we want to use any quantization at all?
-  if (q != NULL)
+  if ( this->q != NULL )
   {
     double *T;
     if (useOldPreparation)
@@ -43,13 +43,13 @@ void GMHIKernel::multiply (NICE::Vector & y, const NICE::Vector & x) const
       NICE::VVector B; 
       // prepare to calculate sum_i x_i K(x,x_i)
       fmk->hik_prepare_alpha_multiplications(x, A, B);
-      T = fmk->hik_prepare_alpha_multiplications_fast(A, B, *q, pf);
+      T = fmk->hik_prepare_alpha_multiplications_fast(A, B, this->q, pf);
     }
     else
     {
-      T = fmk->hikPrepareLookupTable(x, *q, pf );
+      T = fmk->hikPrepareLookupTable(x, this->q, pf );
     }
-    fmk->hik_kernel_multiply_fast ( T, *q, x, y ); 
+    fmk->hik_kernel_multiply_fast ( T, this->q, x, y ); 
     delete [] T;
   }
   else //no quantization
@@ -111,23 +111,23 @@ void GMHIKernel::setUseOldPreparation( const bool & _useOldPreparation)
 
 uint GMHIKernel::getNumParameters() const 
 {
-  if ( pf == NULL )
+  if ( this->pf == NULL )
     return 0;
   else
-    return pf->parameters().size();
+    return this->pf->parameters().size();
 }
 
-void GMHIKernel::getParameters(Vector & parameters) const
+void GMHIKernel::getParameters( NICE::Vector & parameters ) const
 {
-  if ( pf == NULL )
+  if ( this->pf == NULL )
     parameters.clear();
   else {
-    parameters.resize( pf->parameters().size() );
-    parameters = pf->parameters();
+    parameters.resize( this->pf->parameters().size() );
+    parameters = this->pf->parameters();
   }
 }
 
-void GMHIKernel::setParameters(const Vector & parameters)
+void GMHIKernel::setParameters( const NICE::Vector & parameters )
 {
   if ( pf == NULL && parameters.size() > 0 )
     fthrow(Exception, "Unable to set parameters of a non-parameterized GMHIKernel object");
@@ -135,8 +135,6 @@ void GMHIKernel::setParameters(const Vector & parameters)
   pf->parameters() = parameters;
   
   fmk->applyFunctionToFeatureMatrix( pf );
-
-  // only for debugging with small matrices: fmk->featureMatrix().print();
 }
 
 void GMHIKernel::getDiagonalElements ( Vector & diagonalElements ) const

+ 8 - 0
Quantization.h

@@ -13,6 +13,9 @@
 // 
 #include <core/vector/VectorT.h>
 
+// gp-hik-core includes
+#include "gp-hik-core/FeatureMatrixT.h"
+
 namespace NICE {
   
  /** 
@@ -90,6 +93,11 @@ class Quantization  : public NICE::Persistent
   virtual uint quantize ( double _value, 
                           const uint & _dim = 0
                         ) const = 0;
+                        
+                        
+                        
+                        
+  virtual void computeParametersFromData ( const NICE::FeatureMatrixT *  _fm ) = 0;                        
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore

+ 33 - 33
parameterizedFunctions/PFAbsExp.h

@@ -70,39 +70,39 @@ class PFAbsExp : public ParameterizedFunction
       
       while ( !b_endOfBlock )
       {
-	is >> tmp; // start of block 
-	
-	if ( this->isEndTag( tmp, "PFAbsExp" ) )
-	{
-	  b_endOfBlock = true;
-	  continue;
-	}
-		    
-	
-	tmp = this->removeStartTag ( tmp );
-	
-	if ( tmp.compare("upperBound") == 0 )
-	{
-	  is >> upperBound;
-	  is >> tmp; // end of block 
-	  tmp = this->removeEndTag ( tmp );
-	}
-	else if ( tmp.compare("lowerBound") == 0 )
-	{
-	  is >> lowerBound;
-	  is >> tmp; // end of block 
-	  tmp = this->removeEndTag ( tmp );	    
-	}
-	else if ( tmp.compare("ParameterizedFunction") == 0 )
-	{
-	  // restore parent object
-	  ParameterizedFunction::restore(is);
-	}	
-	else
-	{
-	  std::cerr << "WARNING -- unexpected PFAbsExp object -- " << tmp << " -- for restoration... aborting" << std::endl;
-	  throw;	
-	}      
+        is >> tmp; // start of block 
+        
+        if ( this->isEndTag( tmp, "PFAbsExp" ) )
+        {
+          b_endOfBlock = true;
+          continue;
+        }
+              
+        
+        tmp = this->removeStartTag ( tmp );
+        
+        if ( tmp.compare("upperBound") == 0 )
+        {
+          is >> upperBound;
+          is >> tmp; // end of block 
+          tmp = this->removeEndTag ( tmp );
+        }
+        else if ( tmp.compare("lowerBound") == 0 )
+        {
+          is >> lowerBound;
+          is >> tmp; // end of block 
+          tmp = this->removeEndTag ( tmp );	    
+        }
+        else if ( tmp.compare("ParameterizedFunction") == 0 )
+        {
+          // restore parent object
+          ParameterizedFunction::restore(is);
+        }	
+        else
+        {
+          std::cerr << "WARNING -- unexpected PFAbsExp object -- " << tmp << " -- for restoration... aborting" << std::endl;
+          throw;	
+        }      
       }
       
 

+ 8 - 6
quantization/Quantization1DAequiDist0To1.cpp

@@ -52,6 +52,10 @@ uint Quantization1DAequiDist0To1::quantize ( double _value,
     return (uint)( _value * (this->ui_numBins-1) + 0.5 );
 }
 
+virtual void Quantization1DAequiDist0ToMax::computeParametersFromData ( const NICE::FeatureMatrix *  _fm )
+{
+  // nothing to do here...
+}
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
 void Quantization1DAequiDist0To1::restore ( std::istream & _is, 
@@ -60,8 +64,9 @@ void Quantization1DAequiDist0To1::restore ( std::istream & _is,
 {
   if ( _is.good() )
   {    
-    std::string tmp;    
-
+    
+    std::string tmp;  
+    
     bool b_endOfBlock ( false ) ;
     
     while ( !b_endOfBlock )
@@ -85,10 +90,7 @@ void Quantization1DAequiDist0To1::restore ( std::istream & _is,
       {
         std::cerr << "WARNING -- unexpected Quantization1DAequiDist0To1 object -- " << tmp << " -- for restoration... aborting" << std::endl;
         throw;  
-      }
-      
-      _is >> tmp; // end of block 
-      tmp = this->removeEndTag ( tmp );      
+      } 
     }
    }
   else

+ 2 - 1
quantization/Quantization1DAequiDist0To1.h

@@ -76,7 +76,8 @@ class Quantization1DAequiDist0To1  : public NICE::Quantization
   virtual uint quantize ( double _value, 
                           const uint & _dim = 0
                         ) const;
-                                            
+                        
+  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) ;                        
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore

+ 9 - 3
quantization/Quantization1DAequiDist0ToMax.cpp

@@ -56,6 +56,14 @@ uint Quantization1DAequiDist0ToMax::quantize ( double _value,
     return (uint)( _value/this->v_upperBounds[0]  * (this->ui_numBins-1) + 0.5 );
 }
 
+
+
+virtual void Quantization1DAequiDist0ToMax::computeParametersFromData ( const NICE::FeatureMatrix *  _fm )
+{
+      double vmax = _fm->getLargestValue();      
+      this->upperBound.resize ( 1 );
+      this->upperBound ( 0 ) = vmax;
+}
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
 void Quantization1DAequiDist0ToMax::restore ( std::istream & _is, 
@@ -64,6 +72,7 @@ void Quantization1DAequiDist0ToMax::restore ( std::istream & _is,
 {
   if ( _is.good() )
   {    
+    
     std::string tmp;    
 
     bool b_endOfBlock ( false ) ;
@@ -90,9 +99,6 @@ void Quantization1DAequiDist0ToMax::restore ( std::istream & _is,
         std::cerr << "WARNING -- unexpected Quantization1DAequiDist0ToMax object -- " << tmp << " -- for restoration... aborting" << std::endl;
         throw;  
       }
-      
-      _is >> tmp; // end of block 
-      tmp = this->removeEndTag ( tmp );      
     }
    }
   else

+ 3 - 0
quantization/Quantization1DAequiDist0ToMax.h

@@ -76,6 +76,9 @@ class Quantization1DAequiDist0ToMax  : public NICE::Quantization
   virtual uint quantize ( double _value, 
                           const uint & _dim = 0
                         ) const;
+                        
+                        
+  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) ;
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore

+ 22 - 5
quantization/QuantizationNDAequiDist0ToMax.cpp

@@ -49,6 +49,17 @@ uint QuantizationNDAequiDist0ToMax::quantize ( double _value,
     return (uint)( _value/this->v_upperBounds[_dim]  * (this->ui_numBins-1) + 0.5 );
 }
 
+
+virtual void Quantization1DAequiDist0ToMax::computeParametersFromData ( const NICE::FeatureMatrix *  _fm )
+{
+      double vmax = _fm->getLargestValue();      
+      this->upperBound.resize ( 1 );
+      this->upperBound ( 0 ) = vmax;
+      
+      double d_quantile ( 0.99 );
+      this->upperBound = this->fmk->getLargestValuePerDimension( d_quantile );  
+}
+
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
 void QuantizationNDAequiDist0ToMax::restore ( std::istream & _is, 
@@ -57,7 +68,14 @@ void QuantizationNDAequiDist0ToMax::restore ( std::istream & _is,
 {
   if ( _is.good() )
   {    
-    std::string tmp;    
+    std::string tmp;
+    _is >> tmp; //class name 
+    
+    if ( ! this->isStartTag( tmp, "Quantization1DAequiDist0ToMax" ) )
+    {
+        std::cerr << " WARNING - attempt to restore Quantization1DAequiDist0ToMax, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+        throw;
+    } 
 
     bool b_endOfBlock ( false ) ;
     
@@ -83,10 +101,9 @@ void QuantizationNDAequiDist0ToMax::restore ( std::istream & _is,
         std::cerr << "WARNING -- unexpected QuantizationNDAequiDist0ToMax object -- " << tmp << " -- for restoration... aborting" << std::endl;
         throw;  
       }
-      //FIXME also store and restore the upper bounds
-      
-      _is >> tmp; // end of block 
-      tmp = this->removeEndTag ( tmp );      
+      //FIXME also store and restore the upper bounds      
+//       _is >> tmp; // end of block 
+//       tmp = this->removeEndTag ( tmp );     
     }
    }
   else

+ 4 - 1
quantization/QuantizationNDAequiDist0ToMax.h

@@ -76,7 +76,10 @@ class QuantizationNDAequiDist0ToMax  : public NICE::Quantization
   virtual uint quantize ( double _value, 
                           const uint & _dim = 0
                         ) const;
-  
+                        
+                        
+  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) ;
+                          
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   ///////////////////// INTERFACE PERSISTENT /////////////////////

+ 26 - 16
tests/TestFastHIK.cpp

@@ -12,6 +12,10 @@
 #include <gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.h>
 #include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
 #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+// 
+// 
+#include "gp-hik-core/Quantization.h"
+#include "gp-hik-core/quantization/Quantization1DAequiDist0To1.h"
 
 #include "TestFastHIK.h"
 
@@ -199,8 +203,11 @@ void TestFastHIK::testKernelMultiplicationFast()
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast ===================== " << std::endl;
   
-  Quantization q_gen ( numBins );
-  Quantization q ( 2*numBins -1);
+  NICE::Quantization * q_gen;
+  q_gen = new Quantization1DAequiDist0To1 ( numBins );  
+  
+  NICE::Quantization * q;
+  q = new Quantization1DAequiDist0To1 ( 2*numBins -1 );   
 
   // data is generated, such that there is no approximation error
   vector< vector<double> > dataMatrix;
@@ -212,7 +219,7 @@ void TestFastHIK::testKernelMultiplicationFast()
       if ( drand48() < sparse_prob ) {
         v[k] = 0;
       } else {
-        v[k] = q_gen.getPrototype( (rand() % numBins) );
+        v[k] = q_gen->getPrototype( (rand() % numBins) );
       }
     }
 
@@ -239,7 +246,7 @@ void TestFastHIK::testKernelMultiplicationFast()
     y[i] = sin(i);
    
   ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
-  GMHIKernel gmkFast ( &fmk, pf, &q );
+  GMHIKernel gmkFast ( &fmk, pf, q );
 
 //   pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
     
@@ -377,7 +384,8 @@ void TestFastHIK::testKernelSumFast()
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSumFast ===================== " << std::endl;
   
-  Quantization q ( numBins );
+  NICE::Quantization * q;
+  q = new Quantization1DAequiDist0To1 ( numBins );
 
   // data is generated, such that there is no approximation error
   vector< vector<double> > dataMatrix;
@@ -389,7 +397,7 @@ void TestFastHIK::testKernelSumFast()
       if ( drand48() < sparse_prob ) {
         v[k] = 0;
       } else {
-        v[k] = q.getPrototype( (rand() % numBins) );
+        v[k] = q->getPrototype( (rand() % numBins) );
       }
     }
 
@@ -414,7 +422,7 @@ void TestFastHIK::testKernelSumFast()
     if ( drand48() < sparse_prob ) {
       xstar[i] = 0;
     } else {
-      xstar[i] = q.getPrototype( (rand() % numBins) );
+      xstar[i] = q->getPrototype( (rand() % numBins) );
     }
 
   // convert to STL vector
@@ -510,7 +518,8 @@ void TestFastHIK::testLUTUpdate()
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testLUTUpdate ===================== " << std::endl;
 
-  Quantization q ( numBins );
+  NICE::Quantization * q;
+  q = new Quantization1DAequiDist0To1 ( numBins );
 
   // data is generated, such that there is no approximation error
   vector< vector<double> > dataMatrix;
@@ -522,7 +531,7 @@ void TestFastHIK::testLUTUpdate()
       if ( drand48() < sparse_prob ) {
         v[k] = 0;
       } else {
-        v[k] = q.getPrototype( (rand() % numBins) );
+        v[k] = q->getPrototype( (rand() % numBins) );
       }
     }
 
@@ -600,7 +609,7 @@ void TestFastHIK::testLUTUpdate()
   }
   
   
-  bool equal = compareLUTs(T, TNew, q.size()*d, 10e-8);
+  bool equal = compareLUTs(T, TNew, q->size()*d, 10e-8);
   
   if (verbose)
   {
@@ -609,16 +618,16 @@ void TestFastHIK::testLUTUpdate()
     else
     {
       std::cerr << "T are not equal :( " << std::endl;
-      for (uint i = 0; i < q.size()*d; i++)
+      for (uint i = 0; i < q->size()*d; i++)
       {
-        if ( (i % q.size()) == 0)
+        if ( (i % q->size()) == 0)
           std::cerr << std::endl;
         std::cerr << T[i] << " ";
       }
       std::cerr << "TNew: "<< std::endl;
-      for (uint i = 0; i < q.size()*d; i++)
+      for (uint i = 0; i < q->size()*d; i++)
       {
-        if ( (i % q.size()) == 0)
+        if ( (i % q->size()) == 0)
           std::cerr << std::endl;
         std::cerr << TNew[i] << " ";
       }     
@@ -641,7 +650,8 @@ void TestFastHIK::testLinSolve()
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testLinSolve ===================== " << std::endl;
 
-  NICE::Quantization q ( numBins );
+  NICE::Quantization * q;
+  q = new Quantization1DAequiDist0To1 ( numBins );
 
   // data is generated, such that there is no approximation error
   std::vector< std::vector<double> > dataMatrix;
@@ -653,7 +663,7 @@ void TestFastHIK::testLinSolve()
       if ( drand48() < sparse_prob ) {
         v[k] = 0;
       } else {
-        v[k] = q.getPrototype( (rand() % numBins) );
+        v[k] = q->getPrototype( (rand() % numBins) );
       }
     }
 

+ 15 - 5
tests/TestGPHIKPersistent.cpp

@@ -76,13 +76,13 @@ void TestGPHIKPersistent::testPersistentMethods()
   }  
   
   // TRAIN CLASSIFIER FROM SCRATCH
+  std::string confsection ( "GPHIKClassifier" );  
+  conf.sB ( confsection, "use_quantization", true );
+  conf.sS ( confsection, "s_quantType", "1d-aequi-0-1" );
+  conf.sS ( confsection, "transform", "identity");  
   
   classifier = new GPHIKClassifier ( &conf );  
-  
-  yBinTrain *= 2;
-  yBinTrain -= 1;
-  yBinTrain *= -1;
-  
+   
   if ( verbose )
   {
     std::cerr << yBinTrain << std::endl;
@@ -106,6 +106,11 @@ void TestGPHIKPersistent::testPersistentMethods()
   //   
   fbOut.close(); 
   
+  if ( verbose )
+  {
+    std::cerr << "store done successfully" << std::endl;    
+  }  
+  
   
   // TEST RESTORING ABILITIES
     
@@ -121,6 +126,11 @@ void TestGPHIKPersistent::testPersistentMethods()
   //   
   fbIn.close();   
   
+  if ( verbose )
+  {
+    std::cerr << "restore done successfully" << std::endl;    
+  }    
+  
   
   // TEST both classifiers to produce equal results