Johannes Ruehle пре 9 година
родитељ
комит
eeafe8785b
7 измењених фајлова са 1145 додато и 238 уклоњено
  1. 70 77
      FastMinKernel.cpp
  2. 260 0
      GMHIKernelRaw.cpp
  3. 78 0
      GMHIKernelRaw.h
  4. 326 0
      GPHIKRawClassifier.cpp
  5. 177 0
      GPHIKRawClassifier.h
  6. 192 158
      tests/TestFastHIK.cpp
  7. 42 3
      tests/TestGPHIKOnlineLearnable.cpp

+ 70 - 77
FastMinKernel.cpp

@@ -147,8 +147,6 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
                                                       NICE::VVector & _A,
                                                       NICE::VVector & _B) const
 {
-//   std::cerr << "FastMinKernel::hik_prepare_alpha_multiplications" << std::endl;
-//   std::cerr << "alpha: " << alpha << std::endl;
   _A.resize( this->ui_d );
   _B.resize( this->ui_d );
 
@@ -189,14 +187,10 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
   for (uint i = 0; i < this->ui_d; i++)
   {
     uint numNonZero = this->X_sorted.getNumberOfNonZeroElementsPerDimension(i);
-    //DEBUG
-    //std::cerr << "number of non-zero elements in dimension " << i << " / " << d << ": " << numNonZero << std::endl;
     _A[i].resize( numNonZero );
     _B[i].resize( numNonZero  );
   }
 
-  //  for more information see hik_prepare_alpha_multiplications
-
   for (uint dim = 0; dim < this->ui_d; dim++)
   {
     double alpha_sum(0.0);
@@ -230,7 +224,7 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
 double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVector & _A,
                                                               const NICE::VVector & _B,
                                                               const Quantization * _q,
-                                                              const ParameterizedFunction *_pf 
+                                                              const ParameterizedFunction *_pf
                                                              ) const
 {
   //NOTE keep in mind: for doing this, we already have precomputed A and B using hik_prepare_alpha_multiplications!
@@ -242,7 +236,7 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
   double * prototypes   = new double [ hmax * this->ui_d ];
   double * p_prototypes = prototypes;
 
-  for (uint dim = 0; dim < this->ui_d; dim++)  
+  for (uint dim = 0; dim < this->ui_d; dim++)
   {
     for ( uint i = 0 ; i < hmax ; i++ )
     {
@@ -253,16 +247,14 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
       {
         *p_prototypes = _q->getPrototype( i, dim );
       }
-      
+
       p_prototypes++;
     }
-  } 
-  
+  }
+
   // creating the lookup table as pure C, which might be beneficial
   // for fast evaluation
   double *Tlookup = new double [ hmax * this->ui_d ];
-//     std::cerr << "size of LUT: " << hmax * this->ui_d << std::endl;
-//   sizeOfLUT = hmax * this->d;
 
 
   // loop through all dimensions
@@ -281,7 +273,7 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
     uint index = 0;
     // we use the quantization of the original features! the transformed feature were
     // already used to calculate A and B, this of course assumes monotonic functions!!!
-    uint qBin = _q->quantize ( i->first, dim ); 
+    uint qBin = _q->quantize ( i->first, dim );
 
     // the next loop is linear in max(hmax, n)
     // REMARK: this could be changed to hmax*log(n), when
@@ -331,9 +323,9 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
   return Tlookup;
 }
 
-double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha, 
-                                             const Quantization * _q, 
-                                             const ParameterizedFunction *_pf 
+double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
+                                             const Quantization * _q,
+                                             const ParameterizedFunction *_pf
                                             ) const
 {
   // number of quantization bins
@@ -343,7 +335,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
   double * prototypes   = new double [ hmax * this->ui_d ];
   double * p_prototypes = prototypes;
 
-  for (uint dim = 0; dim < this->ui_d; dim++)  
+  for (uint dim = 0; dim < this->ui_d; dim++)
   {
     for ( uint i = 0 ; i < hmax ; i++ )
     {
@@ -354,7 +346,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
       {
         *p_prototypes = _q->getPrototype( i, dim );
       }
-      
+
       p_prototypes++;
     }
   }
@@ -388,8 +380,8 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
     uint index = 0;
 
     // we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
-    uint qBin = _q->quantize ( i->first, dim ); 
-    
+    uint qBin = _q->quantize ( i->first, dim );
+
     double alpha_sum(0.0);
     double alpha_times_x_sum(0.0);
     double alpha_sum_prev(0.0);
@@ -447,12 +439,12 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
 }
 
 
-void FastMinKernel::hikUpdateLookupTable(double * _T, 
-                                         const double & _alphaNew, 
-                                         const double & _alphaOld, 
-                                         const uint & _idx, 
-                                         const Quantization * _q, 
-                                         const ParameterizedFunction *_pf 
+void FastMinKernel::hikUpdateLookupTable(double * _T,
+                                         const double & _alphaNew,
+                                         const double & _alphaOld,
+                                         const uint & _idx,
+                                         const Quantization * _q,
+                                         const ParameterizedFunction *_pf
                                         ) const
 {
 
@@ -469,7 +461,7 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
   double * prototypes   = new double [ hmax * this->ui_d ];
   double * p_prototypes = prototypes;
 
-  for (uint dim = 0; dim < this->ui_d; dim++)  
+  for (uint dim = 0; dim < this->ui_d; dim++)
   {
     for ( uint i = 0 ; i < hmax ; i++ )
     {
@@ -480,11 +472,11 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
       {
         *p_prototypes = _q->getPrototype( i, dim );
       }
-      
+
       p_prototypes++;
     }
   }
-  
+
   double diffOfAlpha(_alphaNew - _alphaOld);
 
   // loop through all dimensions
@@ -501,7 +493,7 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
     {
         double fval;
         uint q_bin = _q->quantize( x_i, dim );
-        
+
         if ( q_bin > j )
           fval = prototypes[ dim*hmax + j ];
         else
@@ -583,9 +575,9 @@ void FastMinKernel::hik_kernel_multiply(const NICE::VVector & _A,
   }
 }
 
-void FastMinKernel::hik_kernel_multiply_fast(const double *_Tlookup, 
-                                             const Quantization * _q, 
-                                             const NICE::Vector & _alpha, 
+void FastMinKernel::hik_kernel_multiply_fast(const double *_Tlookup,
+                                             const Quantization * _q,
+                                             const NICE::Vector & _alpha,
                                              NICE::Vector & _beta) const
 {
   _beta.resize( this->ui_n );
@@ -760,9 +752,9 @@ void FastMinKernel::hik_kernel_sum(const NICE::VVector & _A,
   }
 }
 
-void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup, 
-                                        const Quantization * _q, 
-                                        const NICE::Vector & _xstar, 
+void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
+                                        const Quantization * _q,
+                                        const NICE::Vector & _xstar,
                                         double & _beta
                                        ) const
 {
@@ -778,14 +770,14 @@ void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
   {
     double v = _xstar[dim];
     uint qBin = _q->quantize( v, dim );
-    
+
     _beta += _Tlookup[dim*_q->getNumberOfBins() + qBin];
   }
 }
 
-void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup, 
-                                        const Quantization * _q, 
-                                        const NICE::SparseVector & _xstar, 
+void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
+                                        const Quantization * _q,
+                                        const NICE::SparseVector & _xstar,
                                         double & _beta
                                        ) const
 {
@@ -799,19 +791,19 @@ void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
     uint dim = i->first;
     double v = i->second;
     uint qBin = _q->quantize( v, dim );
-    
+
     _beta += _Tlookup[dim*_q->getNumberOfBins() + qBin];
   }
 }
 
 double *FastMinKernel::solveLin(const NICE::Vector & _y,
                                 NICE::Vector & _alpha,
-                                const Quantization * _q, 
-                                const ParameterizedFunction *_pf, 
-                                const bool & _useRandomSubsets, 
-                                uint _maxIterations, 
-                                const uint & _sizeOfRandomSubset, 
-                                double _minDelta, 
+                                const Quantization * _q,
+                                const ParameterizedFunction *_pf,
+                                const bool & _useRandomSubsets,
+                                uint _maxIterations,
+                                const uint & _sizeOfRandomSubset,
+                                double _minDelta,
                                 bool _timeAnalysis
                                ) const
 {
@@ -826,7 +818,7 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
 
   // number of quantization bins
   uint hmax = _q->getNumberOfBins();
-  
+
   NICE::Vector diagonalElements(_y.size(),0.0);
   this->X_sorted.hikDiagonalElements(diagonalElements);
   diagonalElements += this->d_noise;
@@ -867,11 +859,13 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
 
     if (sizeOfRandomSubset <= 0)
       sizeOfRandomSubset = _y.size();
+    if (sizeOfRandomSubset > _y.size())
+      sizeOfRandomSubset = _y.size();
 
     for ( iter = 1; iter <= _maxIterations; iter++ )
     {
       NICE::Vector perm;
-      this->randomPermutation( perm, indices, _sizeOfRandomSubset );
+      this->randomPermutation( perm, indices, sizeOfRandomSubset );
 
       if ( _timeAnalysis )
       {
@@ -890,7 +884,6 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
 
       for ( uint i = 0; i < sizeOfRandomSubset; i++)
       {
-
         pseudoResidual(perm[i]) = -_y(perm[i]) + (this->d_noise * _alpha(perm[i]));
         for (uint j = 0; j < this->ui_d; j++)
         {
@@ -1151,8 +1144,8 @@ void FastMinKernel::hikPrepareKVNApproximation(NICE::VVector & _A) const
   }
 }
 
-double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A, 
-                                                       const Quantization * _q, 
+double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
+                                                       const Quantization * _q,
                                                        const ParameterizedFunction *_pf ) const
 {
   //NOTE keep in mind: for doing this, we already have precomputed A using hikPrepareSquaredKernelVector!
@@ -1163,8 +1156,8 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
   // store (transformed) prototypes
   double *prototypes = new double [ hmax * this->ui_d ];
   double * p_prototypes = prototypes;
-  
-  for (uint dim = 0; dim < this->ui_d; dim++)  
+
+  for (uint dim = 0; dim < this->ui_d; dim++)
   {
     for ( uint i = 0 ; i < hmax ; i++ )
     {
@@ -1175,7 +1168,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
       {
         *p_prototypes = _q->getPrototype( i, dim );
       }
-      
+
       p_prototypes++;
     }
   }
@@ -1201,7 +1194,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
     uint index = 0;
     // we use the quantization of the original features! the transformed feature were
     // already used to calculate A and B, this of course assumes monotonic functions!!!
-    uint qBin = _q->quantize ( i->first, dim ); 
+    uint qBin = _q->quantize ( i->first, dim );
 
     // the next loop is linear in max(hmax, n)
     // REMARK: this could be changed to hmax*log(n), when
@@ -1238,7 +1231,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
           t = _A[dim][index];
         } else {
           // standard case
-          t =  _A[dim][index-1] + pow( fval, 2 ) * (this->ui_n-nrZeroIndices-(index) );          
+          t =  _A[dim][index-1] + pow( fval, 2 ) * (this->ui_n-nrZeroIndices-(index) );
         }
       }
 
@@ -1252,7 +1245,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
 }
 
 double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantization * _q,
-                                                                const ParameterizedFunction *_pf 
+                                                                const ParameterizedFunction *_pf
                                                                ) const
 {
   // number of quantization bins
@@ -1262,7 +1255,7 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
   double *prototypes = new double [ hmax * this->ui_d ];
   double * p_prototypes = prototypes;
 
-  for (uint dim = 0; dim < this->ui_d; dim++)  
+  for (uint dim = 0; dim < this->ui_d; dim++)
   {
     for ( uint i = 0 ; i < hmax ; i++ )
     {
@@ -1273,10 +1266,10 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
       {
         *p_prototypes = _q->getPrototype( i, dim );
       }
-      
+
       p_prototypes++;
     }
-  }    
+  }
 
   // creating the lookup table as pure C, which might be beneficial
   // for fast evaluation
@@ -1298,7 +1291,7 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
     uint index = 0;
 
     // we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
-    uint qBin = _q->quantize ( i->first, dim ); 
+    uint qBin = _q->quantize ( i->first, dim );
 
     double sum(0.0);
 
@@ -1409,9 +1402,9 @@ void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & _A,
   }
 }
 
-void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup, 
-                                                   const Quantization * _q, 
-                                                   const NICE::SparseVector & _xstar, 
+void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
+                                                   const Quantization * _q,
+                                                   const NICE::SparseVector & _xstar,
                                                    double & _norm
                                                   ) const
 {
@@ -1421,12 +1414,12 @@ void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
   {
     uint dim = i->first;
     double v = i->second;
-    // we do not need a parameterized function here, since the quantizer works on the original feature values. 
-    // nonetheless, the lookup table was created using the parameterized function    
+    // we do not need a parameterized function here, since the quantizer works on the original feature values.
+    // nonetheless, the lookup table was created using the parameterized function
     uint qBin = _q->quantize( v, dim );
-    
+
     _norm += _Tlookup[dim*_q->getNumberOfBins() + qBin];
-  }  
+  }
 }
 
 void FastMinKernel::hikComputeKernelVector ( const NICE::SparseVector& _xstar,
@@ -1552,9 +1545,9 @@ void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & _A,
   }
 }
 
-void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup, 
-                                                   const Quantization * _q, 
-                                                   const NICE::Vector & _xstar, 
+void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
+                                                   const Quantization * _q,
+                                                   const NICE::Vector & _xstar,
                                                    double & _norm
                                                   ) const
 {
@@ -1564,12 +1557,12 @@ void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
   for ( NICE::Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++ )
   {
     double v = *i;
-    // we do not need a parameterized function here, since the quantizer works on the original feature values. 
-    // nonetheless, the lookup table was created using the parameterized function    
+    // we do not need a parameterized function here, since the quantizer works on the original feature values.
+    // nonetheless, the lookup table was created using the parameterized function
     uint qBin = _q->quantize( v, dim );
-    
+
     _norm += _Tlookup[dim*_q->getNumberOfBins() + qBin];
-  }  
+  }
 }
 
 

+ 260 - 0
GMHIKernelRaw.cpp

@@ -0,0 +1,260 @@
+/**
+* @file GMHIKernelRaw.cpp
+* @brief Fast multiplication with histogram intersection kernel matrices (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 01/02/2012
+
+*/
+#include <iostream>
+
+#include <core/vector/VVector.h>
+#include <core/basics/Timer.h>
+
+#include "GMHIKernelRaw.h"
+
+using namespace NICE;
+using namespace std;
+
+
+GMHIKernelRaw::GMHIKernelRaw( const std::vector< const NICE::SparseVector *> &_examples, const double _d_noise )
+{
+    this->examples_raw = NULL;
+    this->nnz_per_dimension = NULL;
+    this->table_A = NULL;
+    this->table_B = NULL;
+
+    initData(_examples);
+    this->d_noise = _d_noise;
+}
+
+GMHIKernelRaw::~GMHIKernelRaw()
+{
+    cleanupData();
+}
+
+void GMHIKernelRaw::cleanupData()
+{
+    if ( this->examples_raw != NULL ) {
+        for ( uint d = 0; d < this->num_dimension; d++ )
+            if (examples_raw[d] != NULL)
+                delete [] examples_raw[d];
+        delete [] this->examples_raw;
+        this->examples_raw = NULL;
+    }
+    if ( this->nnz_per_dimension != NULL ) {
+        delete [] this->nnz_per_dimension;
+        this->nnz_per_dimension = NULL;
+    }
+    if ( this->table_A != NULL ) {
+        for ( uint d = 0; d < this->num_dimension; d++ )
+            if (table_A[d] != NULL)
+                delete [] table_A[d];
+        delete [] this->table_A;
+        this->table_A = NULL;
+    }
+    if ( this->table_B != NULL ) {
+        for ( uint d = 0; d < this->num_dimension; d++ )
+            if (table_B[d] != NULL)
+                delete [] table_B[d];
+        delete [] this->table_B;
+        this->table_B = NULL;
+    }
+
+}
+
+void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_examples )
+{
+    if (_examples.size() == 0 )
+        fthrow(Exception, "No examples given for learning");
+
+    cleanupData();
+
+    this->num_dimension = _examples[0]->getDim();
+    this->examples_raw = new sparseVectorElement *[num_dimension];
+    this->nnz_per_dimension = new uint [num_dimension];
+    this->num_examples = _examples.size();
+
+    // waste memory and allocate a non-sparse data block
+    sparseVectorElement **examples_raw_increment = new sparseVectorElement *[num_dimension];
+    for (uint d = 0; d < this->num_dimension; d++)
+    {
+        this->examples_raw[d] = new sparseVectorElement [ this->num_examples ];
+        examples_raw_increment[d] = this->examples_raw[d];
+        this->nnz_per_dimension[d] = 0;
+    }
+
+    uint example_index = 0;
+    for (std::vector< const NICE::SparseVector * >::const_iterator i = _examples.begin();
+            i != _examples.end(); i++, example_index++)
+    {
+        const NICE::SparseVector *x = *i;
+        for ( NICE::SparseVector::const_iterator j = x->begin(); j != x->end(); j++ )
+        {
+            uint index = j->first;
+            double value = j->second;
+            examples_raw_increment[index]->value = value;
+            examples_raw_increment[index]->example_index = example_index;
+            // move to the next element
+            examples_raw_increment[index]++;
+            this->nnz_per_dimension[index]++;
+        }
+    }
+
+    delete [] examples_raw_increment;
+
+    // sort along each dimension
+    for (uint d = 0; d < this->num_dimension; d++)
+    {
+        uint nnz = this->nnz_per_dimension[d];
+        if ( nnz > 1 )
+            std::sort( this->examples_raw[d], this->examples_raw[d] + nnz );
+    }
+
+    // pre-allocate the A and B matrices
+    this->table_A = allocateTable();
+    this->table_A = new double *[this->num_dimension];
+    this->table_B = new double *[this->num_dimension];
+    for (uint i = 0; i < this->num_dimension; i++)
+    {
+        uint nnz = this->nnz_per_dimension[i];
+        if (nnz>0) {
+            this->table_A[i] = new double [ nnz ];
+            this->table_B[i] = new double [ nnz ];
+        } else {
+            this->table_A[i] = NULL;
+            this->table_B[i] = NULL;
+        }
+    }
+}
+
+double **GMHIKernelRaw::allocateTable() const
+{
+    double **table;
+    table = new double *[this->num_dimension];
+    for (uint i = 0; i < this->num_dimension; i++)
+    {
+        uint nnz = this->nnz_per_dimension[i];
+        if (nnz>0) {
+            table[i] = new double [ nnz ];
+        } else {
+            table[i] = NULL;
+        }
+    }
+    return table;
+}
+
+void GMHIKernelRaw::copyTable(double **src, double **dst) const
+{
+    for (uint i = 0; i < this->num_dimension; i++)
+    {
+        uint nnz = this->nnz_per_dimension[i];
+        if (nnz>0) {
+            for (uint j = 0; j < nnz; j++)
+                dst[i][j] = src[i][j];
+        } else {
+            dst[i] = NULL;
+        }
+    }
+}
+
+void GMHIKernelRaw::updateTables ( const NICE::Vector _x ) const
+{
+    for (uint dim = 0; dim < this->num_dimension; dim++)
+    {
+      double alpha_sum = 0.0;
+      double alpha_times_x_sum = 0.0;
+      uint nnz = nnz_per_dimension[dim];
+
+      // loop through all elements in sorted order
+      sparseVectorElement *training_values_in_dim = examples_raw[dim];
+      for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
+      {
+        // index of the feature
+        int index = training_values_in_dim->example_index;
+        // element of the feature
+        double elem = training_values_in_dim->value;
+
+        alpha_times_x_sum += _x[index] * elem;
+        this->table_A[dim][cntNonzeroFeat] = alpha_times_x_sum;
+
+        alpha_sum += _x[index];
+        this->table_B[dim][cntNonzeroFeat] = alpha_sum;
+      }
+    }
+
+}
+
+/** multiply with a vector: A*x = y */
+void GMHIKernelRaw::multiply (NICE::Vector & _y, const NICE::Vector & _x) const
+{
+  // STEP 1: initialize tables A and B
+  updateTables(_x);
+
+  _y.resize( this->num_examples );
+  _y.set(0.0);
+
+  for (uint dim = 0; dim < this->num_dimension; dim++)
+  {
+    uint nnz = this->nnz_per_dimension[dim];
+    uint nz  = this->num_examples - nnz;
+
+    if ( nnz == 0 ) {
+      // all values are zero in this dimension :) and we can simply ignore the feature
+      continue;
+    }
+
+    sparseVectorElement *training_values_in_dim = examples_raw[dim];
+    for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
+    {
+      uint feat = training_values_in_dim->example_index;
+      uint inversePosition = cntNonzeroFeat;
+      double fval = training_values_in_dim->value;
+
+      double firstPart = this->table_A[dim][inversePosition];
+      double secondPart = this->table_B[dim][nnz-1] - this->table_B[dim][inversePosition];
+
+      _y[feat] += firstPart + fval * secondPart;
+    }
+  }
+
+  for (uint feat = 0; feat < this->num_examples; feat++)
+    _y[feat] += this->d_noise * _x[feat];
+
+
+}
+
+/** get the number of rows in A */
+uint GMHIKernelRaw::rows () const
+{
+  // return the number of examples
+  return num_examples;
+}
+
+/** get the number of columns in A */
+uint GMHIKernelRaw::cols () const
+{
+  // return the number of examples
+  return num_examples;
+}
+
+double **GMHIKernelRaw::getTableA() const
+{
+    double **t = allocateTable();
+    copyTable(this->table_A, t);
+    return t;
+}
+
+double **GMHIKernelRaw::getTableB() const
+{
+    double **t = allocateTable();
+    copyTable(this->table_B, t);
+    return t;
+}
+
+uint *GMHIKernelRaw::getNNZPerDimension() const
+{
+    uint *v = new uint[this->num_dimension];
+    for (uint i = 0; i < this->num_dimension; i++)
+        v[i] = this->nnz_per_dimension[i];
+    return v;
+}

+ 78 - 0
GMHIKernelRaw.h

@@ -0,0 +1,78 @@
+/**
+* @file GMHIKernelRaw.h
+* @author Erik Rodner, Alexander Freytag
+* @brief Fast multiplication with histogram intersection kernel matrices (Interface)
+
+*/
+#ifndef _NICE_GMHIKERNELRAWINCLUDE
+#define _NICE_GMHIKERNELRAWINCLUDE
+
+#include <vector>
+
+#include <core/algebra/GenericMatrix.h>
+
+namespace NICE {
+
+ /**
+ * @class GMHIKernel
+ * @brief Fast multiplication with histogram intersection kernel matrices
+ * @author Erik Rodner, Alexander Freytag
+ */
+
+class GMHIKernelRaw : public GenericMatrix
+{
+  public:
+    typedef struct sparseVectorElement {
+        uint example_index;
+        double value;
+
+        bool operator< (const sparseVectorElement & a) const
+        {
+            return value < a.value;
+        }
+
+    } sparseVectorElement;
+
+  protected:
+
+    sparseVectorElement **examples_raw;
+    double **table_A;
+    double **table_B;
+
+    uint *nnz_per_dimension;
+    uint num_dimension;
+    uint num_examples;
+    double d_noise;
+
+    void initData ( const std::vector< const NICE::SparseVector *> & examples );
+    void cleanupData ();
+    double **allocateTable() const;
+    void copyTable(double **src, double **dst) const;
+
+  public:
+
+    /** simple constructor */
+    GMHIKernelRaw( const std::vector< const NICE::SparseVector *> & examples, const double d_noise = 0.1 );
+
+    /** multiply with a vector: A*x = y; this is not really const anymore!! */
+    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
+
+    /** get the number of rows in A */
+    virtual uint rows () const;
+
+    /** get the number of columns in A */
+    virtual uint cols () const;
+
+    double **getTableA() const;
+    double **getTableB() const;
+    uint *getNNZPerDimension() const;
+
+    /** simple destructor */
+    virtual ~GMHIKernelRaw();
+
+    sparseVectorElement **getDataMatrix() const { return examples_raw; };
+    void updateTables ( const NICE::Vector _x ) const;
+};
+
+}
+#endif

+ 326 - 0
GPHIKRawClassifier.cpp

@@ -0,0 +1,326 @@
+/**
+* @file GPHIKRawClassifier.cpp
+* @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning) (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/01/2012
+
+*/
+
+// STL includes
+#include <iostream>
+
+// NICE-core includes
+#include <core/basics/numerictools.h>
+#include <core/basics/Timer.h>
+
+#include <core/algebra/ILSConjugateGradients.h>
+
+// gp-hik-core includes
+#include "GPHIKRawClassifier.h"
+#include "GMHIKernelRaw.h"
+
+using namespace std;
+using namespace NICE;
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PROTECTED METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+
+
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PUBLIC METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+GPHIKRawClassifier::GPHIKRawClassifier( )
+{
+  this->b_isTrained = false;
+  this->confSection = "";
+  this->nnz_per_dimension = NULL;
+
+  // in order to be sure about all necessary variables be setup with default values, we
+  // run initFromConfig with an empty config
+  NICE::Config tmpConfEmpty ;
+  this->initFromConfig ( &tmpConfEmpty, this->confSection );
+
+}
+
+GPHIKRawClassifier::GPHIKRawClassifier( const Config *_conf,
+                                  const string & _confSection
+                                )
+{
+  ///////////
+  // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
+  ///////////
+
+  this->b_isTrained = false;
+  this->confSection = "";
+  this->q = NULL;
+
+  ///////////
+  // here comes the new code part different from the empty constructor
+  ///////////
+
+  this->confSection = _confSection;
+
+  // if no config file was given, we either restore the classifier from an external file, or run ::init with
+  // an emtpy config (using default values thereby) when calling the train-method
+  if ( _conf != NULL )
+  {
+    this->initFromConfig( _conf, _confSection );
+  }
+  else
+  {
+    // if no config was given, we create an empty one
+    NICE::Config tmpConfEmpty ;
+    this->initFromConfig ( &tmpConfEmpty, this->confSection );
+  }
+
+}
+
+GPHIKRawClassifier::~GPHIKRawClassifier()
+{
+  delete solver;
+}
+
+void GPHIKRawClassifier::initFromConfig(const Config *_conf,
+                                     const string & _confSection
+                                    )
+{
+  this->d_noise     = _conf->gD( _confSection, "noise", 0.01);
+
+  this->confSection = _confSection;
+  this->b_verbose   = _conf->gB( _confSection, "verbose", false);
+  this->b_debug     = _conf->gB( _confSection, "debug", false);
+  this->f_tolerance = _conf->gD( _confSection, "f_tolerance", 1e-10);
+
+  string ilssection = "FMKGPHyperparameterOptimization";
+  uint ils_max_iterations = _conf->gI( ilssection, "ils_max_iterations", 1000 );
+  double ils_min_delta = _conf->gD( ilssection, "ils_min_delta", 1e-7 );
+  double ils_min_residual = _conf->gD( ilssection, "ils_min_residual", 1e-7 );
+  bool ils_verbose = _conf->gB( ilssection, "ils_verbose", false );
+  this->solver = new ILSConjugateGradients( ils_verbose, ils_max_iterations, ils_min_delta, ils_min_residual );
+}
+
+///////////////////// ///////////////////// /////////////////////
+//                         GET / SET
+///////////////////// ///////////////////// /////////////////////
+
+std::set<uint> GPHIKRawClassifier::getKnownClassNumbers ( ) const
+{
+  if ( ! this->b_isTrained )
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );
+
+  fthrow(Exception, "GPHIKRawClassifier::getKnownClassNumbers() not yet implemented");
+}
+
+
+///////////////////// ///////////////////// /////////////////////
+//                      CLASSIFIER STUFF
+///////////////////// ///////////////////// /////////////////////
+
+
+
+void GPHIKRawClassifier::classify ( const NICE::SparseVector * _xstar,
+                                 uint & _result,
+                                 SparseVector & _scores
+                               ) const
+{
+  if ( ! this->b_isTrained )
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );
+  _scores.clear();
+
+  GMHIKernelRaw::sparseVectorElement **dataMatrix = gm->getDataMatrix();
+
+  uint maxClassNo = 0;
+  for ( std::map<uint, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
+  {
+    uint classno = i->first;
+    maxClassNo = std::max ( maxClassNo, classno );
+    double beta = 0;
+
+    if ( this->q != NULL ) {
+      std::map<uint, double *>::const_iterator j = this->precomputedT.find ( classno );
+      double *T = j->second;
+      for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++ )
+      {
+        uint dim = i->first;
+        double v = i->second;
+        uint qBin = q->quantize( v, dim );
+
+        beta += T[dim * q->getNumberOfBins() + qBin];
+      }
+    } else {
+      const PrecomputedType & A = i->second;
+      std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
+      const PrecomputedType & B = j->second;
+
+      for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++)
+      {
+        uint dim = i->first;
+        double fval = i->second;
+
+        uint nnz = this->nnz_per_dimension[dim];
+        uint nz = this->num_examples - nnz;
+
+        if ( nnz == 0 ) continue;
+        if ( fval < this->f_tolerance ) continue;
+
+        uint position = 0;
+
+        //this->X_sorted.findFirstLargerInDimension(dim, fval, position);
+        GMHIKernelRaw::sparseVectorElement fval_element;
+        fval_element.value = fval;
+        GMHIKernelRaw::sparseVectorElement *it = upper_bound ( dataMatrix[dim], dataMatrix[dim] + nnz, fval_element );
+        position = distance ( dataMatrix[dim], it );
+
+        bool posIsZero ( position == 0 );
+        if ( !posIsZero )
+            position--;
+
+
+        double firstPart = 0.0;
+        if ( !posIsZero && ((position-nz) < this->num_examples) )
+          firstPart = (A[dim][position-nz]);
+
+        double secondPart( B[dim][this->num_examples-1-nz]);
+        if ( !posIsZero && (position >= nz) )
+            secondPart -= B[dim][position-nz];
+
+        // but apply using the transformed one
+        beta += firstPart + secondPart* fval;
+      }
+    }
+
+    _scores[ classno ] = beta;
+  }
+  _scores.setDim ( *this->knownClasses.rbegin() + 1 );
+
+
+  if ( this->knownClasses.size() > 2 )
+  { // multi-class classification
+    _result = _scores.maxElement();
+  }
+  else if ( this->knownClasses.size() == 2 ) // binary setting
+  {
+    uint class1 = *(this->knownClasses.begin());
+    uint class2 = *(this->knownClasses.rbegin());
+    uint class_for_which_we_have_a_score = _scores.begin()->first;
+    uint class_for_which_we_dont_have_a_score = (class1 == class_for_which_we_have_a_score ? class2 : class1);
+
+    _scores[class_for_which_we_dont_have_a_score] = - _scores[class_for_which_we_have_a_score];
+
+    _result = _scores[class_for_which_we_have_a_score] > 0.0 ? class_for_which_we_have_a_score : class_for_which_we_dont_have_a_score;
+  }
+
+}
+
+
+/** training process */
+void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
+                              const NICE::Vector & _labels
+                            )
+{
+  // security-check: examples and labels have to be of same size
+  if ( _examples.size() != _labels.size() )
+  {
+    fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );
+  }
+  this->num_examples = _examples.size();
+
+  this->knownClasses.clear();
+  for ( uint i = 0; i < _labels.size(); i++ )
+    this->knownClasses.insert((uint)_labels[i]);
+
+  std::map<uint, NICE::Vector> binLabels;
+  for ( set<uint>::const_iterator j = knownClasses.begin(); j != knownClasses.end(); j++ )
+  {
+    uint current_class = *j;
+    Vector labels_binary ( _labels.size() );
+    for ( uint i = 0; i < _labels.size(); i++ )
+        labels_binary[i] = ( _labels[i] == current_class ) ? 1.0 : -1.0;
+
+    binLabels.insert ( pair<uint, NICE::Vector>( current_class, labels_binary) );
+  }
+
+  // handle special binary case
+  if ( knownClasses.size() == 2 )
+  {
+    std::map<uint, NICE::Vector>::iterator it = binLabels.begin();
+    it++;
+    binLabels.erase( binLabels.begin(), it );
+  }
+
+  train ( _examples, binLabels );
+}
+
+void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
+                              std::map<uint, NICE::Vector> & _binLabels
+                            )
+{
+  // security-check: examples and labels have to be of same size
+  for ( std::map< uint, NICE::Vector >::const_iterator binLabIt = _binLabels.begin();
+        binLabIt != _binLabels.end();
+        binLabIt++
+      )
+  {
+    if ( _examples.size() != binLabIt->second.size() )
+    {
+      fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );
+    }
+  }
+
+  if ( this->b_verbose )
+    std::cerr << "GPHIKRawClassifier::train" << std::endl;
+
+  Timer t;
+  t.start();
+
+  precomputedA.clear();
+  precomputedB.clear();
+  precomputedT.clear();
+
+  // sort examples in each dimension and "transpose" the feature matrix
+  // set up the GenericMatrix interface
+  gm = new GMHIKernelRaw ( _examples, this->d_noise );
+  nnz_per_dimension = gm->getNNZPerDimension();
+
+  // solve linear equations for each class
+  // be careful when parallising this!
+  for ( map<uint, NICE::Vector>::const_iterator i = _binLabels.begin();
+          i != _binLabels.end(); i++ )
+  {
+    uint classno = i->first;
+    if (b_verbose)
+        std::cerr << "Training for class " << classno << endl;
+    const Vector & y = i->second;
+    Vector alpha;
+    solver->solveLin( *gm, y, alpha );
+    // TODO: get lookup tables, A, B, etc. and store them
+    gm->updateTables(alpha);
+    double **A = gm->getTableA();
+    double **B = gm->getTableB();
+    precomputedA.insert ( pair<uint, PrecomputedType> ( classno, A ) );
+    precomputedB.insert ( pair<uint, PrecomputedType> ( classno, B ) );
+  }
+
+
+  t.stop();
+  if ( this->b_verbose )
+    std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;
+
+
+  //indicate that we finished training successfully
+  this->b_isTrained = true;
+
+  // clean up all examples ??
+  if ( this->b_verbose )
+    std::cerr << "Learning finished" << std::endl;
+
+
+}
+
+

+ 177 - 0
GPHIKRawClassifier.h

@@ -0,0 +1,177 @@
+/**
+* @file GPHIKRawClassifier.h
+* @brief ..
+* @author Erik Rodner
+* @date 16-09-2015 (dd-mm-yyyy)
+*/
+#ifndef _NICE_GPHIKRAWCLASSIFIERINCLUDE
+#define _NICE_GPHIKRAWCLASSIFIERINCLUDE
+
+// STL includes
+#include <string>
+#include <limits>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Persistent.h>
+#include <core/vector/SparseVectorT.h>
+#include <core/algebra/IterativeLinearSolver.h>
+//
+#include <set>
+#include "quantization/Quantization.h"
+#include "GMHIKernelRaw.h"
+
+namespace NICE {
+
+ /**
+ * @class GPHIKClassifier
+ * @brief ...
+ * @author Erik Rodner
+ */
+
+class GPHIKRawClassifier //: public NICE::Persistent
+{
+
+  protected:
+
+    /////////////////////////
+    /////////////////////////
+    // PROTECTED VARIABLES //
+    /////////////////////////
+    /////////////////////////
+
+    ///////////////////////////////////
+    // output/debug related settings //
+    ///////////////////////////////////
+
+    /** verbose flag for useful output*/
+    bool b_verbose;
+    /** debug flag for several outputs useful for debugging*/
+    bool b_debug;
+
+    //////////////////////////////////////
+    //      general specifications      //
+    //////////////////////////////////////
+
+    /** Header in configfile where variable settings are stored */
+    std::string confSection;
+
+    //////////////////////////////////////
+    // classification related variables //
+    //////////////////////////////////////
+    /** memorize whether the classifier was already trained*/
+    bool b_isTrained;
+
+
+    /** Gaussian label noise for model regularization */
+    double d_noise;
+
+    IterativeLinearSolver *solver;
+    /** object performing feature quantization */
+    NICE::Quantization *q;
+
+    typedef double ** PrecomputedType;
+
+    /** precomputed arrays A (1 per class) needed for classification without quantization  */
+    std::map< uint, PrecomputedType > precomputedA;
+    /** precomputed arrays B (1 per class) needed for classification without quantization  */
+    std::map< uint, PrecomputedType > precomputedB;
+
+    /** precomputed LUTs (1 per class) needed for classification with quantization  */
+    std::map< uint, double * > precomputedT;
+
+    uint *nnz_per_dimension;
+    uint num_examples;
+
+    double f_tolerance;
+
+    GMHIKernelRaw *gm;
+    std::set<uint> knownClasses;
+
+    /////////////////////////
+    /////////////////////////
+    //  PROTECTED METHODS  //
+    /////////////////////////
+    /////////////////////////
+
+
+  public:
+
+    /**
+     * @brief default constructor
+     */
+    GPHIKRawClassifier( );
+
+
+    /**
+     * @brief standard constructor
+     */
+    GPHIKRawClassifier( const NICE::Config *_conf ,
+                     const std::string & s_confSection = "GPHIKClassifier"
+                   );
+
+    /**
+     * @brief simple destructor
+     */
+    ~GPHIKRawClassifier();
+
+    /**
+    * @brief Setup internal variables and objects used
+    * @param conf Config file to specify variable settings
+    * @param s_confSection
+    */
+    void initFromConfig(const NICE::Config *_conf,
+                        const std::string & s_confSection
+                       );
+
+    ///////////////////// ///////////////////// /////////////////////
+    //                         GET / SET
+    ///////////////////// ///////////////////// /////////////////////
+
+    /**
+     * @brief Return currently known class numbers
+     */
+    std::set<uint> getKnownClassNumbers ( ) const;
+
+    ///////////////////// ///////////////////// /////////////////////
+    //                      CLASSIFIER STUFF
+    ///////////////////// ///////////////////// /////////////////////
+
+    /**
+     * @brief classify a given example with the previously learned model
+     * @author Alexander Freytag, Erik Rodner
+     * @param example (SparseVector) to be classified given in a sparse representation
+     * @param result (int) class number of most likely class
+     * @param scores (SparseVector) classification scores for known classes
+     */
+    void classify ( const NICE::SparseVector * _example,
+                    uint & _result,
+                    NICE::SparseVector & _scores
+                  ) const;
+
+    /**
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors
+     * @date 18-10-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag, Erik Rodner
+     * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
+     * @param labels (Vector) class labels (multi-class)
+     */
+    void train ( const std::vector< const NICE::SparseVector *> & _examples,
+                 const NICE::Vector & _labels
+               );
+
+    /**
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors
+     * @author Alexander Freytag, Erik Rodner
+     * @param examples examples to use given in a sparse data structure
+     * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
+     */
+    void train ( const std::vector< const NICE::SparseVector *> & _examples,
+                 std::map<uint, NICE::Vector> & _binLabels
+               );
+
+};
+
+}
+
+#endif

+ 192 - 158
tests/TestFastHIK.cpp

@@ -16,8 +16,9 @@
 #include <gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.h>
 #include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
 #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
-// 
-// 
+#include <gp-hik-core/GMHIKernelRaw.h>
+//
+//
 #include "gp-hik-core/quantization/Quantization.h"
 #include "gp-hik-core/quantization/Quantization1DAequiDist0To1.h"
 
@@ -26,11 +27,11 @@
 #include <gtest/gtest.h>
 
 const bool b_debug = false;
-const bool verbose = false;
+const bool verbose = true;
 const bool verboseStartEnd = true;
 const bool solveLinWithoutRand = false;
-const uint n = 30;//1500;//1500;//10;
-const uint d = 5;//200;//2;
+const uint n = 1500;//1500;//1500;//10;
+const uint d = 100;//200;//2;
 const uint numBins = 11;//1001;//1001;
 const uint solveLinMaxIterations = 1000;
 const double sparse_prob = 0.6;
@@ -39,42 +40,42 @@ const bool smallTest = false;
 bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
 {
   bool result(true);
-  
+
 //   std::cerr << "A.size(): " << A.size() << " B.size(): " << B.size() << std::endl;
-  
+
   NICE::VVector::const_iterator itA = A.begin();
   NICE::VVector::const_iterator itB = B.begin();
-  
+
   while ( (itA != A.end()) && ( itB != B.end()) )
   {
     if (itA->size() != itB->size())
     {
       result = false;
       break;
-    } 
-    
+    }
+
     for(uint i = 0; (i < itA->size()) && (i < itB->size()); i++)
     {
       if (fabs((*itA)[i] - (*itB)[i]) > tolerance)
       {
         result = false;
-        break;        
+        break;
       }
     }
 
     if (result == false)
-          break;        
+          break;
     itA++;
     itB++;
   }
-  
+
   return result;
 }
 
 bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const double & tolerance = 10e-8)
 {
   bool result = true;
-  
+
   for (int i = 0; i < size; i++)
   {
     if ( fabs(LUT1[i] - LUT2[i]) > tolerance)
@@ -84,7 +85,7 @@ bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const
       break;
     }
   }
-  
+
   return result;
 }
 
@@ -105,54 +106,92 @@ TEST(TestFastHIK,testKernelMultiplication)
   for ( uint i = 0 ; i < d; i++ )
   {
     for ( uint k = 0; k < n; k++ )
-      if ( drand48() < sparse_prob ) 
+      if ( drand48() < sparse_prob )
       {
         dataMatrix[i][k] = 0.0;
         nrZeros++;
       }
   }
 
-  if ( verbose ) {
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     cerr << endl;
   }
 
   double noise = 1.0;
+  NICE::Timer t;
+  t.start();
   FastMinKernel fmk ( dataMatrix, noise );
-    
+  t.stop();
+  if (verbose)
+    std::cerr << "Time for FastMinKernel setup: " << t.getLast() << endl;
+
   if ( (n*d)>0)
   {
     ASSERT_NEAR(fmk.getSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
     if (verbose)
       std::cerr << "fmk.getSparsityRatio(): " << fmk.getSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
   }
-  
+
   GMHIKernel gmk ( &fmk );
   if (verbose)
-    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution
   else
-    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution
 
   Vector y ( n );
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
- 
+
+
+  // Test the GMHIKernel interface
   Vector alpha;
-  
+
+  t.start();
   gmk.multiply ( alpha, y );
-  
+  t.stop();
+  if (verbose)
+      std::cerr << "Time for kernel multiplication with GMHIKernel: " << t.getLast() << std::endl;
+
+
+  // convert data structures to test the GMHIKernelRaw interface
+  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+  transposeVectorOfVectors(dataMatrix_transposed);
+  std::vector< const NICE::SparseVector * > dataMatrix_sparse;
+  for ( std::vector< std::vector<double> >::const_iterator i = dataMatrix_transposed.begin(); i != dataMatrix_transposed.end(); i++ )
+  {
+    Vector w ( *i );
+    SparseVector *v = new SparseVector ( w );
+    dataMatrix_sparse.push_back(v);
+  }
+
+  t.start();
+  GMHIKernelRaw gmk_raw ( dataMatrix_sparse, noise );
+  t.stop();
+  if (verbose)
+    std::cerr << "Time for GMHIKernelRaw setup: " << t.getLast() << std::endl;
+
+  Vector alpha_raw;
+  t.start();
+  gmk_raw.multiply ( alpha_raw, y );
+  t.stop();
+  if (verbose)
+      std::cerr << "Time for kernel multiplication with GMHIKernelRaw: " << t.getLast() << std::endl;
+
+
+
+
+  // compute the kernel matrix multiplication exactly
   NICE::IntersectionKernelFunction<double> hikSlow;
-  
+
   // tic
   time_t  slow_start = clock();
-  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
-  transposeVectorOfVectors(dataMatrix_transposed);
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
   //toc
   float time_slowComputation = (float) (clock() - slow_start);
   if (verbose)
-    std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;  
+    std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;
 
   // tic
   time_t  slow_sparse_start = clock();
@@ -161,10 +200,7 @@ TEST(TestFastHIK,testKernelMultiplication)
   //toc
   float time_slowComputation_usingSparsity = (float) (clock() - slow_sparse_start);
   if (verbose)
-    std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;    
-
-  if ( verbose ) 
-    cerr << "K = " << K << endl;
+    std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;
 
   // check the trace calculation
   //ASSERT_NEAR( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
@@ -173,10 +209,11 @@ TEST(TestFastHIK,testKernelMultiplication)
   // let us compute the kernel multiplication with the slow version
   Vector alpha_slow = K*y;
 
-  if (verbose)
-    std::cerr << "Sparse multiplication [alpha, alpha_slow]: " << std::endl <<  alpha << std::endl << alpha_slow << std::endl << std::endl;
-  
+  if (b_debug)
+    std::cerr << "Sparse multiplication [alpha, alpha_slow, alpha_raw]: " << std::endl <<  alpha << std::endl << alpha_slow << std::endl << alpha_raw << std::endl << std::endl;
+
   ASSERT_NEAR((alpha-alpha_slow).normL1(), 0.0, 1e-8);
+  ASSERT_NEAR((alpha_raw-alpha_slow).normL1(), 0.0, 1e-8);
 
   // test the case, where we first transform and then use the multiply stuff
   NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
@@ -184,6 +221,7 @@ TEST(TestFastHIK,testKernelMultiplication)
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   ParameterizedFunction *pf = new PFAbsExp( 1.2 );
   fmk.applyFunctionToFeatureMatrix( pf );
+
 //   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 
   Vector galpha;
@@ -194,18 +232,20 @@ TEST(TestFastHIK,testKernelMultiplication)
   ASSERT_NEAR((galpha-galpha_slow).normL1(), 0.0, 1e-8);
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplication done ===================== " << std::endl;
+
+  delete pf;
 }
 
 TEST(TestFastHIK, testKernelMultiplicationFast)
 {
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast ===================== " << std::endl;
-  
+
   NICE::Quantization * q_gen;
-  q_gen = new Quantization1DAequiDist0To1 ( numBins );  
-  
+  q_gen = new Quantization1DAequiDist0To1 ( numBins );
+
   NICE::Quantization * q;
-  q = new Quantization1DAequiDist0To1 ( 2*numBins -1 );   
+  q = new Quantization1DAequiDist0To1 ( 2*numBins -1 );
 
   // data is generated, such that there is no approximation error
   vector< vector<double> > dataMatrix;
@@ -223,49 +263,41 @@ TEST(TestFastHIK, testKernelMultiplicationFast)
 
     dataMatrix.push_back(v);
   }
-  
-  if ( verbose ) {
-    cerr << "data matrix: " << endl;
-    printMatrix ( dataMatrix );
-    cerr << endl;
-  }
 
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise );
-  
+
   GMHIKernel gmk ( &fmk );
   if (verbose)
-    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution
   else
-    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution
 
   Vector y ( n );
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
-   
+
   ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
   GMHIKernel gmkFast ( &fmk, pf, q );
 
+
 //   pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
-    
+
   Vector alpha;
-  
+
   gmk.multiply ( alpha, y );
-  
+
   Vector alphaFast;
-  
+
   gmkFast.multiply ( alphaFast, y );
-  
+
   NICE::IntersectionKernelFunction<double> hikSlow;
-  
+
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   transposeVectorOfVectors(dataMatrix_transposed);
 
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
 
-  if ( verbose ) 
-    cerr << "K = " << K << endl;
-
   // check the trace calculation
   //ASSERT_NEAR( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
   ASSERT_NEAR( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
@@ -273,9 +305,9 @@ TEST(TestFastHIK, testKernelMultiplicationFast)
   // let us compute the kernel multiplication with the slow version
   Vector alpha_slow = K*y;
 
-  if ( verbose )
+  if ( b_debug )
     std::cerr << "Sparse multiplication [alpha, alphaFast, alpha_slow]: " << std::endl <<  alpha << std::endl << alphaFast << std::endl << alpha_slow << std::endl << std::endl;
- 
+
   ASSERT_NEAR(0.0, (alphaFast-alpha_slow).normL1(), 1e-8);
 
   // test the case, where we first transform and then use the multiply stuff
@@ -284,28 +316,29 @@ TEST(TestFastHIK, testKernelMultiplicationFast)
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   pf->parameters()[0] = 1.2;
   fmk.applyFunctionToFeatureMatrix( pf );
-//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 
   Vector galphaFast;
   gmkFast.multiply ( galphaFast, y );
-  
+
   Vector galpha;
-  
+
   gmk.multiply ( galpha, y );
 
   Vector galpha_slow = gK * y;
-  
-  if (verbose)
+
+  if ( b_debug )
     std::cerr << "Sparse multiplication [galpha, galphaFast, galpha_slow]: " << std::endl <<  galpha << std::endl << galphaFast << std::endl << galpha_slow << std::endl << std::endl;
 
   // clean-up
   delete q_gen;
   delete q;
-  
+
   // final assertion
   ASSERT_NEAR((galphaFast-galpha_slow).normL1(), 0.0, 1e-8);
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast done ===================== " << std::endl;
+
+  delete pf;
 }
 
 
@@ -313,7 +346,7 @@ TEST(TestFastHIK, testKernelSum)
 {
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSum ===================== " << std::endl;
-  
+
   vector< vector<double> > dataMatrix;
   generateRandomFeatures ( d, n, dataMatrix );
 
@@ -321,14 +354,14 @@ TEST(TestFastHIK, testKernelSum)
   for ( uint i = 0 ; i < d; i++ )
   {
     for ( uint k = 0; k < n; k++ )
-      if ( drand48() < sparse_prob ) 
+      if ( drand48() < sparse_prob )
       {
         dataMatrix[i][k] = 0.0;
         nrZeros++;
       }
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     cerr << endl;
@@ -336,13 +369,13 @@ TEST(TestFastHIK, testKernelSum)
 
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise );
-  
+
   Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
 
   NICE::VVector ASparse;
   NICE::VVector BSparse;
-  fmk.hik_prepare_alpha_multiplications ( alpha, ASparse, BSparse ); 
-  
+  fmk.hik_prepare_alpha_multiplications ( alpha, ASparse, BSparse );
+
   Vector xstar (d);
   for ( uint i = 0 ; i < d ; i++ )
     if ( drand48() < sparse_prob ) {
@@ -351,14 +384,14 @@ TEST(TestFastHIK, testKernelSum)
       xstar[i] = rand();
     }
   SparseVector xstarSparse ( xstar );
-    
+
   double betaSparse;
   fmk.hik_kernel_sum ( ASparse, BSparse, xstarSparse, betaSparse );
-  
+
   if (verbose)
     std::cerr << "kernelSumSparse done, now do the thing without exploiting sparsity" << std::endl;
 
-  
+
   // checking the result
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   transposeVectorOfVectors(dataMatrix_transposed);
@@ -386,7 +419,7 @@ TEST(TestFastHIK, testKernelSumFast)
 {
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSumFast ===================== " << std::endl;
-  
+
   NICE::Quantization * q;
   q = new Quantization1DAequiDist0To1 ( numBins );
 
@@ -406,8 +439,8 @@ TEST(TestFastHIK, testKernelSumFast)
 
     dataMatrix.push_back(v);
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     cerr << endl;
@@ -416,7 +449,7 @@ TEST(TestFastHIK, testKernelSumFast)
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise );
   Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
-  if ( verbose )
+  if ( b_debug )
     std::cerr << "alpha = " << alpha << endl;
 
   // generate xstar
@@ -434,10 +467,10 @@ TEST(TestFastHIK, testKernelSumFast)
   for ( uint i = 0 ; i < d; i++ )
     xstar_stl[i] = xstar[i];
 
-  if ( verbose ) 
+  if ( b_debug )
     cerr << "xstar = " << xstar << endl;
- 
-  for ( double gamma = 1.0 ; gamma < 2.0; gamma += 0.5 ) 
+
+  for ( double gamma = 1.0 ; gamma < 2.0; gamma += 0.5 )
   {
     if (verbose)
       std::cerr << "testing hik_kernel_sum_fast with ghik parameter: " << gamma << endl;
@@ -451,17 +484,18 @@ TEST(TestFastHIK, testKernelSumFast)
     NICE::VVector B;
     if (verbose)
       std::cerr << "fmk.hik_prepare_alpha_multiplications ( alpha, A, B ) " << std::endl;
-    fmk.hik_prepare_alpha_multiplications ( alpha, A, B ); 
+    fmk.hik_prepare_alpha_multiplications ( alpha, A, B );
 
-    if (verbose)
+    if (b_debug)
       //std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast( A, B, q )" << std::endl;
       std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast_alltogether( alpha, q, &pf )" << std::endl;
-    double *TlookupOld = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, &pf ); 
-    double *TlookupNew = fmk.hikPrepareLookupTable( alpha, q, &pf ); 
-    
+
+    double *TlookupOld = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, &pf );
+    double *TlookupNew = fmk.hikPrepareLookupTable( alpha, q, &pf );
+
     int maxAcces(numBins*d);
-    
-    if (verbose)
+
+    if (b_debug)
     {
       std::cerr << "TlookupOld:  " << std::endl;
       for (int i = 0; i < maxAcces; i++)
@@ -476,20 +510,20 @@ TEST(TestFastHIK, testKernelSumFast)
         std::cerr << TlookupNew[i] << " ";
         if ( (i%numBins) == (numBins-1))
           std::cerr << std::endl;
-      }    
+      }
     }
-    
+
     if (verbose)
       std::cerr << "fmk.hik_kernel_sum_fast ( Tlookup, q, xstar, beta_fast )" << std::endl;
-    
+
     double beta_fast;
     fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar, beta_fast );
-    
+
     NICE::SparseVector xstar_sparse(xstar);
-    
+
     double beta_fast_sparse;
     fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar_sparse, beta_fast_sparse );
-    
+
     double betaSparse;
     fmk.hik_kernel_sum ( A, B, xstar_sparse, betaSparse, &pf );
 
@@ -503,10 +537,10 @@ TEST(TestFastHIK, testKernelSumFast)
     for ( uint i = 0 ; i < n; i++ )
       beta_slow += kstar_stl[i] * alpha[i];
 
-    if (verbose)
+    if (b_debug)
       std::cerr << "beta_slow: " << beta_slow << std::endl << "beta_fast: " << beta_fast << std::endl << "beta_fast_sparse: " << beta_fast_sparse << std::endl << "betaSparse: " << betaSparse<< std::endl;
-    
-    // clean-up 
+
+    // clean-up
     delete [] TlookupNew;
     delete [] TlookupOld;    
     
@@ -515,10 +549,10 @@ TEST(TestFastHIK, testKernelSumFast)
   
 
   } // for-loop
-  
+
   // clean-up
   delete q;
-  
+
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSumFast done ===================== " << std::endl;
 
@@ -548,8 +582,8 @@ TEST(TestFastHIK, testLUTUpdate)
 
     dataMatrix.push_back(v);
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     cerr << endl;
@@ -557,19 +591,19 @@ TEST(TestFastHIK, testLUTUpdate)
 
   double noise = 1.0;
   NICE::FastMinKernel fmk ( dataMatrix, noise );
-  
+
   NICE::ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
 
   NICE::Vector alpha ( n );
   for ( uint i = 0; i < alpha.size(); i++ )
     alpha[i] = sin(i);
-  
+
   if (verbose)
     std::cerr << "prepare LUT" << std::endl;
   double * T = fmk.hikPrepareLookupTable(alpha, q, pf);
   if (verbose)
     std::cerr << "preparation done -- printing T" << std::endl;
-  
+
   int maxAcces(numBins*d);
   if (verbose)
   {
@@ -578,21 +612,21 @@ TEST(TestFastHIK, testLUTUpdate)
       std::cerr << T[i] << " ";
       if ( (i%numBins) == (numBins-1))
         std::cerr << std::endl;
-    }    
+    }
   }
 
   //lets change index 2
   int idx(2);
   double valAlphaOld(alpha[idx]);
   double valAlphaNew(1.2); //this value is definitely different from the previous one
-      
+
   Vector alphaNew(alpha);
   alphaNew[idx] = valAlphaNew;
-  
+
   double * TNew = fmk.hikPrepareLookupTable(alphaNew, q, pf);
   if (verbose)
     std::cerr << "calculated the new LUT, no print it: " << std::endl;
-  
+
   if (verbose)
   {
     for (int i = 0; i < maxAcces; i++)
@@ -600,7 +634,7 @@ TEST(TestFastHIK, testLUTUpdate)
       std::cerr << TNew[i] << " ";
       if ( (i%numBins) == (numBins-1))
         std::cerr << std::endl;
-    } 
+    }
   }
 
   if (verbose)
@@ -608,7 +642,7 @@ TEST(TestFastHIK, testLUTUpdate)
   fmk.hikUpdateLookupTable(T, valAlphaNew, valAlphaOld, idx, q, pf );
   if (verbose)
     std::cerr << "update is done, now print the updated version: " << std::endl;
-  
+
   if (verbose)
   {
     for (int i = 0; i < maxAcces; i++)
@@ -616,12 +650,12 @@ TEST(TestFastHIK, testLUTUpdate)
       std::cerr << T[i] << " ";
       if ( (i%numBins) == (numBins-1))
         std::cerr << std::endl;
-    } 
+    }
   }
-  
-  
+
+
   bool equal = compareLUTs(T, TNew, q->getNumberOfBins()*d, 10e-8);
-  
+
   if (verbose)
   {
     if (equal)
@@ -641,16 +675,16 @@ TEST(TestFastHIK, testLUTUpdate)
         if ( (i % q->getNumberOfBins()) == 0)
           std::cerr << std::endl;
         std::cerr << TNew[i] << " ";
-      }     
-    
-    }    
+      }
+
+    }
   }
 
-  
-  
+
+
   // clean-up
-  delete q;  
-  delete pf;    
+  delete q;
+  delete pf;
   delete [] T;
   delete [] TNew;  
     
@@ -687,8 +721,8 @@ TEST(TestFastHIK, testLinSolve)
 
     dataMatrix.push_back(v);
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     std::cerr << "data matrix: " << std::endl;
     printMatrix ( dataMatrix );
     std::cerr << std::endl;
@@ -696,14 +730,14 @@ TEST(TestFastHIK, testLinSolve)
 
   double noise = 1.0;
   NICE::FastMinKernel fmk ( dataMatrix, noise );
-  
+
   NICE::ParameterizedFunction *pf = new NICE::PFAbsExp ( 1.0 );
   fmk.applyFunctionToFeatureMatrix( pf );
 
-  NICE::Vector y ( n );  
+  NICE::Vector y ( n );
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
-  
+
   NICE::Vector alpha;
   NICE::Vector alphaRandomized;
 
@@ -716,20 +750,20 @@ TEST(TestFastHIK, testLinSolve)
   fmk.solveLin(y,alphaRandomized,q,pf,true,solveLinMaxIterations,30);
   //toc
   t.stop();
-  float time_randomizedSolving = t.getLast();  
+  float time_randomizedSolving = t.getLast();
   if ( verbose )
-    std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;  
-  
+    std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;
+
   // test the case, where we first transform and then use the multiply stuff
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   transposeVectorOfVectors(dataMatrix_transposed);
-  
+
   NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.0 );
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
-  
+
   NICE::Vector K_alphaRandomized;
   K_alphaRandomized.multiply(gK, alphaRandomized);
-  
+
   if (solveLinWithoutRand)
   {
     if ( verbose )
@@ -737,35 +771,35 @@ TEST(TestFastHIK, testLinSolve)
     fmk.solveLin(y,alpha,q,pf,false,1000);
     Vector K_alpha;
     K_alpha.multiply(gK, alpha);
-    
+
     if ( verbose )
     {
       std::cerr << "now assert that K_alpha == y" << std::endl;
       std::cerr << "(K_alpha-y).normL1(): " << (K_alpha-y).normL1() << std::endl;
     }
   }
-   
+
 //   std::cerr << "alpha: " << alpha << std::endl;
 //   std::cerr << "K_times_alpha: " << K_alpha << std::endl;
 //   std::cerr << "y: " << y << std::endl;
-//   
+//
 //   Vector test_alpha;
 //   ILSConjugateGradients cgm;
 //   cgm.solveLin( GMStandard(gK),y,test_alpha);
-//   
+//
 //   K_alpha.multiply( gK, test_alpha);
-//   
+//
 //   std::cerr << "test_alpha (CGM): " << test_alpha << std::endl;
 //   std::cerr << "K_times_alpha (CGM): " << K_alpha << std::endl;
-  
+
   if ( verbose )
   {
     std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
-    std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl; 
+    std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl;
   }
-  
+
   // clean-up
-  delete q;  
+  delete q;
   delete pf;
     
   // final assertion        
@@ -778,15 +812,15 @@ TEST(TestFastHIK, testLinSolve)
 TEST(TestFastHIK, testKernelVector)
 {
   if (verboseStartEnd)
-    std::cerr << "================== TestFastHIK::testKernelVector ===================== " << std::endl;  
-  
+    std::cerr << "================== TestFastHIK::testKernelVector ===================== " << std::endl;
+
   std::vector< std::vector<double> > dataMatrix;
-  
+
   std::vector<double> dim1; dim1.push_back(0.2);dim1.push_back(0.1);dim1.push_back(0.0);dim1.push_back(0.0);dim1.push_back(0.4); dataMatrix.push_back(dim1);
   std::vector<double> dim2; dim2.push_back(0.3);dim2.push_back(0.6);dim2.push_back(1.0);dim2.push_back(0.4);dim2.push_back(0.3); dataMatrix.push_back(dim2);
   std::vector<double> dim3; dim3.push_back(0.5);dim3.push_back(0.3);dim3.push_back(0.0);dim3.push_back(0.6);dim3.push_back(0.3); dataMatrix.push_back(dim3);
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     std::cerr << "data matrix: " << std::endl;
     printMatrix ( dataMatrix );
     std::cerr << endl;
@@ -794,13 +828,13 @@ TEST(TestFastHIK, testKernelVector)
 
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise, b_debug );
-  
+
 
   std::vector<double> xStar; xStar.push_back(0.2);xStar.push_back(0.7);xStar.push_back(0.1);
   NICE::Vector xStarVec (xStar);
   std::vector<double> x2; x2.push_back(0.7);x2.push_back(0.3);xStar.push_back(0.0);
   NICE::Vector x2Vec (x2);
-  
+
   NICE::SparseVector xStarsparse( xStarVec );
   NICE::SparseVector x2sparse( x2Vec );
 
@@ -810,35 +844,35 @@ TEST(TestFastHIK, testKernelVector)
     fmk.store ( std::cerr );
     xStarsparse.store ( std::cerr );
   }
-  
+
   NICE::Vector k1;
   fmk.hikComputeKernelVector( xStarsparse, k1 );
 
-  
+
   NICE::Vector k2;
   fmk.hikComputeKernelVector( x2sparse, k2 );
-   
+
   NICE::Vector k1GT(5); k1GT[0] = 0.6; k1GT[1] = 0.8; k1GT[2] = 0.7; k1GT[3] = 0.5; k1GT[4] = 0.6;
   NICE::Vector k2GT(5); k2GT[0] = 0.5; k2GT[1] = 0.4; k2GT[2] = 0.3; k2GT[3] = 0.3; k2GT[4] = 0.7;
-  
-  if (verbose)
+
+  if (b_debug)
   {
     std::cerr << "k1: " << k1 << std::endl;
     std::cerr << "GT: " << k1GT << std::endl;
     std::cerr << "k2: " << k2 << std::endl;
     std::cerr << "GT: " << k2GT << std::endl;
   }
-    
+
   for (int i = 0; i < 5; i++)
   {
     ASSERT_NEAR(k1[i]-k1GT[i], 0.0, 1e-6);
     ASSERT_NEAR(k2[i]-k2GT[i], 0.0, 1e-6);
   }
 
-  
+
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelVector done ===================== " << std::endl;
-  
+
 }
 
 #endif

+ 42 - 3
tests/TestGPHIKOnlineLearnable.cpp

@@ -16,6 +16,7 @@
 
 // gp-hik-core includes
 #include "gp-hik-core/GPHIKClassifier.h"
+#include "gp-hik-core/GPHIKRawClassifier.h"
 
 #include <gtest/gtest.h>
 
@@ -104,6 +105,33 @@ void evaluateClassifier ( NICE::Matrix & confusionMatrix,
   }
 }
 
+void evaluateClassifierRaw ( NICE::Matrix & confusionMatrix,
+                          const NICE::GPHIKRawClassifier * classifier,
+                          const NICE::Matrix & data,
+                          const NICE::Vector & yMulti,
+                          const std::map< uint,uint > & mapClNoToIdxTrain,
+                          const std::map< uint,uint > & mapClNoToIdxTest
+                        )
+{
+  int i_loopEnd  ( (int)data.rows() );
+
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example_nonsparse ( data.getRow(i) );
+    NICE::SparseVector example (example_nonsparse);
+    NICE::SparseVector scores;
+    uint result;
+
+    // classify with incrementally trained classifier
+    classifier->classify( &example, result, scores );
+
+    uint gtlabel = mapClNoToIdxTest.find(yMulti[i])->second;
+    uint predlabel = mapClNoToIdxTrain.find(result)->second;
+    confusionMatrix( gtlabel, predlabel ) += 1.0;
+  }
+}
+
+
 void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
                                 const NICE::GPHIKClassifier * classifierScratch, 
                                 const NICE::Matrix & data
@@ -267,6 +295,7 @@ TEST(TestGPHIKOnlineLearnable, testOnlineLearningOCCtoBinary)
   
   conf.sB ( "GPHIKClassifier", "eig_verbose", false);
   conf.sS ( "GPHIKClassifier", "optimization_method", "downhillsimplex");
+  conf.sB ( "GPHIKClassifier", "verbose", true);
   
   std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
   
@@ -323,6 +352,8 @@ TEST(TestGPHIKOnlineLearnable, testOnlineLearningOCCtoBinary)
   NICE::GPHIKClassifier * classifierScratch = new NICE::GPHIKClassifier ( &conf );
   classifierScratch->train ( examplesTrain, yBinTrain );
   
+  NICE::GPHIKRawClassifier * classifierScratchRaw = new NICE::GPHIKRawClassifier ( &conf );
+  classifierScratchRaw->train ( examplesTrain, yBinTrain );
     
   // TEST both classifiers to produce equal results
   
@@ -351,17 +382,19 @@ TEST(TestGPHIKOnlineLearnable, testOnlineLearningOCCtoBinary)
   
   NICE::Matrix confusionMatrix         ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
   NICE::Matrix confusionMatrixScratch  ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
-  
+  NICE::Matrix confusionMatrixScratchRaw  ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
     
   // ------------------------------------------
   // ------------- CLASSIFICATION --------------
   // ------------------------------------------  
   evaluateClassifier ( confusionMatrix, classifier, dataTest, yBinTest,
-                          mapClNoToIdxTrain,mapClNoToIdxTest ); 
+                          mapClNoToIdxTrain, mapClNoToIdxTest );
   
   evaluateClassifier ( confusionMatrixScratch, classifierScratch, dataTest, yBinTest,
-                          mapClNoToIdxTrain,mapClNoToIdxTest );  
+                          mapClNoToIdxTrain, mapClNoToIdxTest );
   
+  evaluateClassifierRaw ( confusionMatrixScratchRaw, classifierScratchRaw, dataTest, yBinTest,
+                          mapClNoToIdxTrain, mapClNoToIdxTest );
     
   // post-process confusion matrices
   confusionMatrix.normalizeColumnsL1();
@@ -370,20 +403,26 @@ TEST(TestGPHIKOnlineLearnable, testOnlineLearningOCCtoBinary)
   confusionMatrixScratch.normalizeColumnsL1();
   double arrScratch ( confusionMatrixScratch.trace()/confusionMatrixScratch.cols() );
 
+  confusionMatrixScratchRaw.normalizeColumnsL1();
+  double arrScratchRaw ( confusionMatrixScratchRaw.trace()/confusionMatrixScratchRaw.cols() );
   
   if ( verbose ) 
   {
     std::cerr << "confusionMatrix: " << confusionMatrix  << std::endl;
   
     std::cerr << "confusionMatrixScratch: " << confusionMatrixScratch << std::endl;
+
+    std::cerr << "confusionMatrixScratchRaw: " << confusionMatrixScratchRaw << std::endl;
   } 
   
   ASSERT_NEAR( arr, arrScratch, 1e-8);
+  ASSERT_NEAR( arrScratch, arrScratchRaw, 1e-8);
   
   // don't waste memory
   
   delete classifier;
   delete classifierScratch;  
+  delete classifierScratchRaw;
   
   for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
   {