Explorar o código

Merge branch 'raw_version'

Erik Rodner %!s(int64=9) %!d(string=hai) anos
pai
achega
9f26f5dc49

+ 6 - 1
FMKGPHyperparameterOptimization.cpp

@@ -23,7 +23,6 @@
 #include <core/basics/Exception.h>
 #include <core/basics/Exception.h>
 // 
 // 
 #include <core/vector/Algorithms.h>
 #include <core/vector/Algorithms.h>
-#include <core/vector/Eigen.h>
 // 
 // 
 #include <core/optimization/blackbox/DownhillSimplexOptimizer.h>
 #include <core/optimization/blackbox/DownhillSimplexOptimizer.h>
 
 
@@ -827,6 +826,12 @@ void FMKGPHyperparameterOptimization::computeMatricesAndLUTs ( const GPLikelihoo
     PrecomputedType A;
     PrecomputedType A;
     PrecomputedType B;
     PrecomputedType B;
 
 
+    if ( this->b_debug &&  i->first == 1)
+    {
+        std::cerr << "Training for class " << i->first << endl;
+        std::cerr << "  " << i->second << std::endl;
+    }
+
     fmk->hik_prepare_alpha_multiplications ( i->second, A, B );
     fmk->hik_prepare_alpha_multiplications ( i->second, A, B );
     A.setIoUntilEndOfFile ( false );
     A.setIoUntilEndOfFile ( false );
     B.setIoUntilEndOfFile ( false );
     B.setIoUntilEndOfFile ( false );

+ 3 - 10
FastMinKernel.cpp

@@ -147,8 +147,6 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
                                                       NICE::VVector & _A,
                                                       NICE::VVector & _A,
                                                       NICE::VVector & _B) const
                                                       NICE::VVector & _B) const
 {
 {
-//   std::cerr << "FastMinKernel::hik_prepare_alpha_multiplications" << std::endl;
-//   std::cerr << "alpha: " << alpha << std::endl;
   _A.resize( this->ui_d );
   _A.resize( this->ui_d );
   _B.resize( this->ui_d );
   _B.resize( this->ui_d );
 
 
@@ -189,14 +187,10 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
   for (uint i = 0; i < this->ui_d; i++)
   for (uint i = 0; i < this->ui_d; i++)
   {
   {
     uint numNonZero = this->X_sorted.getNumberOfNonZeroElementsPerDimension(i);
     uint numNonZero = this->X_sorted.getNumberOfNonZeroElementsPerDimension(i);
-    //DEBUG
-    //std::cerr << "number of non-zero elements in dimension " << i << " / " << d << ": " << numNonZero << std::endl;
     _A[i].resize( numNonZero );
     _A[i].resize( numNonZero );
     _B[i].resize( numNonZero  );
     _B[i].resize( numNonZero  );
   }
   }
 
 
-  //  for more information see hik_prepare_alpha_multiplications
-
   for (uint dim = 0; dim < this->ui_d; dim++)
   for (uint dim = 0; dim < this->ui_d; dim++)
   {
   {
     double alpha_sum(0.0);
     double alpha_sum(0.0);
@@ -261,8 +255,6 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
   // creating the lookup table as pure C, which might be beneficial
   // creating the lookup table as pure C, which might be beneficial
   // for fast evaluation
   // for fast evaluation
   double *Tlookup = new double [ hmax * this->ui_d ];
   double *Tlookup = new double [ hmax * this->ui_d ];
-//     std::cerr << "size of LUT: " << hmax * this->ui_d << std::endl;
-//   sizeOfLUT = hmax * this->d;
 
 
 
 
   // loop through all dimensions
   // loop through all dimensions
@@ -867,11 +859,13 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
 
 
     if (sizeOfRandomSubset <= 0)
     if (sizeOfRandomSubset <= 0)
       sizeOfRandomSubset = _y.size();
       sizeOfRandomSubset = _y.size();
+    if (sizeOfRandomSubset > _y.size())
+      sizeOfRandomSubset = _y.size();
 
 
     for ( iter = 1; iter <= _maxIterations; iter++ )
     for ( iter = 1; iter <= _maxIterations; iter++ )
     {
     {
       NICE::Vector perm;
       NICE::Vector perm;
-      this->randomPermutation( perm, indices, _sizeOfRandomSubset );
+      this->randomPermutation( perm, indices, sizeOfRandomSubset );
 
 
       if ( _timeAnalysis )
       if ( _timeAnalysis )
       {
       {
@@ -890,7 +884,6 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
 
 
       for ( uint i = 0; i < sizeOfRandomSubset; i++)
       for ( uint i = 0; i < sizeOfRandomSubset; i++)
       {
       {
-
         pseudoResidual(perm[i]) = -_y(perm[i]) + (this->d_noise * _alpha(perm[i]));
         pseudoResidual(perm[i]) = -_y(perm[i]) + (this->d_noise * _alpha(perm[i]));
         for (uint j = 0; j < this->ui_d; j++)
         for (uint j = 0; j < this->ui_d; j++)
         {
         {

+ 275 - 0
GMHIKernelRaw.cpp

@@ -0,0 +1,275 @@
+/**
+* @file GMHIKernelRaw.cpp
+* @brief Fast multiplication with histogram intersection kernel matrices (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 01/02/2012
+
+*/
+#include <iostream>
+
+#include <core/vector/VVector.h>
+#include <core/basics/Timer.h>
+
+#include "GMHIKernelRaw.h"
+
+using namespace NICE;
+using namespace std;
+
+
+GMHIKernelRaw::GMHIKernelRaw( const std::vector< const NICE::SparseVector *> &_examples, const double _d_noise )
+{
+    this->examples_raw = NULL;
+    this->nnz_per_dimension = NULL;
+    this->table_A = NULL;
+    this->table_B = NULL;
+    this->d_noise = _d_noise;
+
+    initData(_examples);
+}
+
+GMHIKernelRaw::~GMHIKernelRaw()
+{
+    cleanupData();
+}
+
+void GMHIKernelRaw::cleanupData()
+{
+    if ( this->examples_raw != NULL ) {
+        for ( uint d = 0; d < this->num_dimension; d++ )
+            if (examples_raw[d] != NULL)
+                delete [] examples_raw[d];
+        delete [] this->examples_raw;
+        this->examples_raw = NULL;
+    }
+    if ( this->nnz_per_dimension != NULL ) {
+        delete [] this->nnz_per_dimension;
+        this->nnz_per_dimension = NULL;
+    }
+    if ( this->table_A != NULL ) {
+        for ( uint d = 0; d < this->num_dimension; d++ )
+            if (table_A[d] != NULL)
+                delete [] table_A[d];
+        delete [] this->table_A;
+        this->table_A = NULL;
+    }
+    if ( this->table_B != NULL ) {
+        for ( uint d = 0; d < this->num_dimension; d++ )
+            if (table_B[d] != NULL)
+                delete [] table_B[d];
+        delete [] this->table_B;
+        this->table_B = NULL;
+    }
+
+}
+
+void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_examples )
+{
+    if (_examples.size() == 0 )
+        fthrow(Exception, "No examples given for learning");
+
+    cleanupData();
+
+    this->num_dimension = _examples[0]->getDim();
+    this->examples_raw = new sparseVectorElement *[num_dimension];
+    this->nnz_per_dimension = new uint [num_dimension];
+    this->num_examples = _examples.size();
+
+    // waste memory and allocate a non-sparse data block
+    sparseVectorElement **examples_raw_increment = new sparseVectorElement *[num_dimension];
+    for (uint d = 0; d < this->num_dimension; d++)
+    {
+        this->examples_raw[d] = new sparseVectorElement [ this->num_examples ];
+        examples_raw_increment[d] = this->examples_raw[d];
+        this->nnz_per_dimension[d] = 0;
+    }
+
+    // additionally allocate a Vector with as many entries as examples
+    // this vector will contain the L1 norm values of all examples + noise
+    // thereby, it represents the diagonal entries of our kernel matrix for
+    // the special case of minimum kernel
+    this->diagonalElements.resize ( this->num_examples );
+    this->diagonalElements.set ( this->d_noise );
+
+    uint example_index = 0;
+    NICE::Vector::iterator itDiagEl = this->diagonalElements.begin();
+
+    // minor pre-allocation
+    uint index;
+    double value;
+    double l1norm;
+
+    for ( std::vector< const NICE::SparseVector * >::const_iterator i = _examples.begin();
+          i != _examples.end();
+          i++, example_index++, itDiagEl++
+        )
+    {
+        l1norm = 0.0;
+        const NICE::SparseVector *x = *i;
+        for ( NICE::SparseVector::const_iterator j = x->begin(); j != x->end(); j++ )
+        {
+            index = j->first;
+            value = j->second;
+            examples_raw_increment[index]->value = value;
+            examples_raw_increment[index]->example_index = example_index;
+            // move to the next element
+            examples_raw_increment[index]++;
+            this->nnz_per_dimension[index]++;
+
+            l1norm = l1norm + value;
+        }
+        *itDiagEl = *itDiagEl + l1norm;
+    }
+
+    delete [] examples_raw_increment;
+
+    // sort along each dimension
+    for (uint d = 0; d < this->num_dimension; d++)
+    {
+        uint nnz = this->nnz_per_dimension[d];
+        if ( nnz > 1 )
+            std::sort( this->examples_raw[d], this->examples_raw[d] + nnz );
+    }
+
+    // pre-allocate the A and B matrices
+    this->table_A = allocateTable();
+    this->table_B = allocateTable();
+}
+
+double **GMHIKernelRaw::allocateTable() const
+{
+    double **table;
+    table = new double *[this->num_dimension];
+    for (uint i = 0; i < this->num_dimension; i++)
+    {
+        uint nnz = this->nnz_per_dimension[i];
+        if (nnz>0) {
+            table[i] = new double [ nnz ];
+        } else {
+            table[i] = NULL;
+        }
+    }
+    return table;
+}
+
+void GMHIKernelRaw::copyTable(double **src, double **dst) const
+{
+    for (uint i = 0; i < this->num_dimension; i++)
+    {
+        uint nnz = this->nnz_per_dimension[i];
+        if (nnz>0) {
+            for (uint j = 0; j < nnz; j++)
+                dst[i][j] = src[i][j];
+        } else {
+            dst[i] = NULL;
+        }
+    }
+}
+
+void GMHIKernelRaw::updateTables ( const NICE::Vector _x ) const
+{
+    for (uint dim = 0; dim < this->num_dimension; dim++)
+    {
+      double alpha_sum = 0.0;
+      double alpha_times_x_sum = 0.0;
+      uint nnz = nnz_per_dimension[dim];
+
+      // loop through all elements in sorted order
+      sparseVectorElement *training_values_in_dim = examples_raw[dim];
+      for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
+      {
+        // index of the feature
+        int index = training_values_in_dim->example_index;
+        // element of the feature
+        double elem = training_values_in_dim->value;
+
+        alpha_times_x_sum += _x[index] * elem;
+        this->table_A[dim][cntNonzeroFeat] = alpha_times_x_sum;
+
+        alpha_sum += _x[index];
+        this->table_B[dim][cntNonzeroFeat] = alpha_sum;
+      }
+    }
+
+
+}
+
+/** multiply with a vector: A*x = y */
+void GMHIKernelRaw::multiply (NICE::Vector & _y, const NICE::Vector & _x) const
+{
+  // STEP 1: initialize tables A and B
+  updateTables(_x);
+
+  _y.resize( this->num_examples );
+  _y.set(0.0);
+
+  for (uint dim = 0; dim < this->num_dimension; dim++)
+  {
+    uint nnz = this->nnz_per_dimension[dim];
+    uint nz  = this->num_examples - nnz;
+
+    if ( nnz == 0 ) {
+      // all values are zero in this dimension :) and we can simply ignore the feature
+      continue;
+    }
+
+    sparseVectorElement *training_values_in_dim = examples_raw[dim];
+    for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
+    {
+      uint feat = training_values_in_dim->example_index;
+      uint inversePosition = cntNonzeroFeat;
+      double fval = training_values_in_dim->value;
+
+      double firstPart = this->table_A[dim][inversePosition];
+      double secondPart = this->table_B[dim][nnz-1] - this->table_B[dim][inversePosition];
+
+      _y[feat] += firstPart + fval * secondPart;
+    }
+  }
+
+  for (uint feat = 0; feat < this->num_examples; feat++)
+    _y[feat] += this->d_noise * _x[feat];
+
+
+}
+
+/** get the number of rows in A */
+uint GMHIKernelRaw::rows () const
+{
+  // return the number of examples
+  return num_examples;
+}
+
+/** get the number of columns in A */
+uint GMHIKernelRaw::cols () const
+{
+  // return the number of examples
+  return num_examples;
+}
+
+double **GMHIKernelRaw::getTableA() const
+{
+    double **t = allocateTable();
+    copyTable(this->table_A, t);
+    return t;
+}
+
+double **GMHIKernelRaw::getTableB() const
+{
+    double **t = allocateTable();
+    copyTable(this->table_B, t);
+    return t;
+}
+
+uint *GMHIKernelRaw::getNNZPerDimension() const
+{
+    uint *v = new uint[this->num_dimension];
+    for (uint i = 0; i < this->num_dimension; i++)
+        v[i] = this->nnz_per_dimension[i];
+    return v;
+}
+
+
+void NICE::GMHIKernelRaw::getDiagonalElements( NICE::Vector & _diagonalElements) const
+{
+    _diagonalElements = this->diagonalElements;
+}

+ 84 - 0
GMHIKernelRaw.h

@@ -0,0 +1,84 @@
+/**
+* @file GMHIKernelRaw.h
+* @author Erik Rodner, Alexander Freytag
+* @brief Fast multiplication with histogram intersection kernel matrices (Interface)
+
+*/
+#ifndef _NICE_GMHIKERNELRAWINCLUDE
+#define _NICE_GMHIKERNELRAWINCLUDE
+
+#include <vector>
+
+#include <core/algebra/GenericMatrix.h>
+
+namespace NICE {
+
+ /**
+ * @class GMHIKernel
+ * @brief Fast multiplication with histogram intersection kernel matrices
+ * @author Erik Rodner, Alexander Freytag
+ */
+
+class GMHIKernelRaw : public GenericMatrix
+{
+  public:
+    typedef struct sparseVectorElement {
+        uint example_index;
+        double value;
+
+        bool operator< (const sparseVectorElement & a) const
+        {
+            return value < a.value;
+        }
+
+    } sparseVectorElement;
+
+  protected:
+
+    sparseVectorElement **examples_raw;
+    double **table_A;
+    double **table_B;
+
+    NICE::Vector diagonalElements;
+
+    uint *nnz_per_dimension;
+    uint num_dimension;
+    uint num_examples;
+    double d_noise;
+
+    void initData ( const std::vector< const NICE::SparseVector *> & examples );
+    void cleanupData ();
+    double **allocateTable() const;
+    void copyTable(double **src, double **dst) const;
+
+  public:
+
+    /** simple constructor */
+    GMHIKernelRaw( const std::vector< const NICE::SparseVector *> & examples, const double d_noise = 0.1 );
+
+    /** multiply with a vector: A*x = y; this is not really const anymore!! */
+    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
+
+    /** get the number of rows in A */
+    virtual uint rows () const;
+
+    /** get the number of columns in A */
+    virtual uint cols () const;
+
+    double **getTableA() const;
+    double **getTableB() const;
+    uint *getNNZPerDimension() const;
+
+    /** simple destructor */
+    virtual ~GMHIKernelRaw();
+
+    sparseVectorElement **getDataMatrix() const { return examples_raw; };
+    void updateTables ( const NICE::Vector _x ) const;
+
+    /** get the diagonal elements of the current matrix */
+    void getDiagonalElements ( NICE::Vector & _diagonalElements ) const;
+
+};
+
+}
+#endif

+ 0 - 1
GPHIKClassifier.cpp

@@ -451,7 +451,6 @@ void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * _example,
     }
     }
     case APPROXIMATE_FINE:
     case APPROXIMATE_FINE:
     {
     {
-      std::cerr << "gphyper->computePredictiveVarianceApproximateFine" << std::endl;
       this->gphyper->computePredictiveVarianceApproximateFine( *_example, _uncertainty );
       this->gphyper->computePredictiveVarianceApproximateFine( *_example, _uncertainty );
       break;
       break;
     }    
     }    

+ 404 - 0
GPHIKRawClassifier.cpp

@@ -0,0 +1,404 @@
+/**
+* @file GPHIKRawClassifier.cpp
+* @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning) (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/01/2012
+
+*/
+
+// STL includes
+#include <iostream>
+
+// NICE-core includes
+#include <core/basics/numerictools.h>
+#include <core/basics/Timer.h>
+
+#include <core/algebra/ILSConjugateGradients.h>
+#include <core/algebra/EigValues.h>
+
+// gp-hik-core includes
+#include "GPHIKRawClassifier.h"
+#include "GMHIKernelRaw.h"
+
+using namespace std;
+using namespace NICE;
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PROTECTED METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+
+
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PUBLIC METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+GPHIKRawClassifier::GPHIKRawClassifier( )
+{
+  this->b_isTrained = false;
+  this->confSection = "";
+  this->nnz_per_dimension = NULL;
+  this->q = NULL;
+  this->gm = NULL;
+
+  // in order to be sure about all necessary variables be setup with default values, we
+  // run initFromConfig with an empty config
+  NICE::Config tmpConfEmpty ;
+  this->initFromConfig ( &tmpConfEmpty, this->confSection );
+
+}
+
+GPHIKRawClassifier::GPHIKRawClassifier( const Config *_conf,
+                                  const string & _confSection
+                                )
+{
+  ///////////
+  // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
+  ///////////
+
+  this->b_isTrained = false;
+  this->confSection = "";
+  this->nnz_per_dimension = NULL;
+  this->q = NULL;
+  this->gm = NULL;
+
+  ///////////
+  // here comes the new code part different from the empty constructor
+  ///////////
+
+  this->confSection = _confSection;
+
+  // if no config file was given, we either restore the classifier from an external file, or run ::init with
+  // an emtpy config (using default values thereby) when calling the train-method
+  if ( _conf != NULL )
+  {
+    this->initFromConfig( _conf, _confSection );
+  }
+  else
+  {
+    // if no config was given, we create an empty one
+    NICE::Config tmpConfEmpty ;
+    this->initFromConfig ( &tmpConfEmpty, this->confSection );
+  }
+
+}
+
+GPHIKRawClassifier::~GPHIKRawClassifier()
+{
+  delete this->solver;
+  this->solver = NULL;
+
+  if (gm != NULL)
+    delete gm;
+}
+
+void GPHIKRawClassifier::initFromConfig(const Config *_conf,
+                                     const string & _confSection
+                                    )
+{
+  this->d_noise     = _conf->gD( _confSection, "noise", 0.01);
+
+  this->confSection = _confSection;
+  this->b_verbose   = _conf->gB( _confSection, "verbose", false);
+  this->b_debug     = _conf->gB( _confSection, "debug", false);
+  this->f_tolerance = _conf->gD( _confSection, "f_tolerance", 1e-10);
+
+  //FIXME this is not used in that way for the standard GPHIKClassifier
+  //string ilssection = "FMKGPHyperparameterOptimization";
+  string ilssection       = _confSection;
+  uint ils_max_iterations = _conf->gI( ilssection, "ils_max_iterations", 1000 );
+  double ils_min_delta    = _conf->gD( ilssection, "ils_min_delta", 1e-7 );
+  double ils_min_residual = _conf->gD( ilssection, "ils_min_residual", 1e-7 );
+  bool ils_verbose        = _conf->gB( ilssection, "ils_verbose", false );
+  this->solver            = new ILSConjugateGradients( ils_verbose,
+                                                       ils_max_iterations,
+                                                       ils_min_delta,
+                                                       ils_min_residual
+                                                     );
+  if ( this->b_verbose )
+  {
+      std::cerr << "GPHIKRawClassifier::initFromConfig " <<std::endl;
+      std::cerr << "   confSection " << confSection << std::endl;
+      std::cerr << "   d_noise " << d_noise << std::endl;
+      std::cerr << "   f_tolerance " << f_tolerance << std::endl;
+      std::cerr << "   ils_max_iterations " << ils_max_iterations << std::endl;
+      std::cerr << "   ils_min_delta " << ils_min_delta << std::endl;
+      std::cerr << "   ils_min_residual " << ils_min_residual << std::endl;
+      std::cerr << "   ils_verbose " << ils_verbose << std::endl;
+  }
+}
+
+///////////////////// ///////////////////// /////////////////////
+//                         GET / SET
+///////////////////// ///////////////////// /////////////////////
+
+std::set<uint> GPHIKRawClassifier::getKnownClassNumbers ( ) const
+{
+  if ( ! this->b_isTrained )
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );
+
+  return this->knownClasses;
+}
+
+
+///////////////////// ///////////////////// /////////////////////
+//                      CLASSIFIER STUFF
+///////////////////// ///////////////////// /////////////////////
+
+
+
+void GPHIKRawClassifier::classify ( const NICE::SparseVector * _xstar,
+                                 uint & _result,
+                                 SparseVector & _scores
+                               ) const
+{
+  if ( ! this->b_isTrained )
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );
+  _scores.clear();
+
+  GMHIKernelRaw::sparseVectorElement **dataMatrix = gm->getDataMatrix();
+
+  uint maxClassNo = 0;
+  for ( std::map<uint, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
+  {
+    uint classno = i->first;
+    maxClassNo = std::max ( maxClassNo, classno );
+    double beta = 0;
+
+    if ( this->q != NULL ) {
+      std::map<uint, double *>::const_iterator j = this->precomputedT.find ( classno );
+      double *T = j->second;
+      for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++ )
+      {
+        uint dim = i->first;
+        double v = i->second;
+        uint qBin = q->quantize( v, dim );
+
+        beta += T[dim * q->getNumberOfBins() + qBin];
+      }
+    } else {
+      const PrecomputedType & A = i->second;
+      std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
+      const PrecomputedType & B = j->second;
+
+      for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++)
+      {
+        uint dim = i->first;
+        double fval = i->second;
+
+        uint nnz = this->nnz_per_dimension[dim];
+        uint nz = this->num_examples - nnz;
+
+        if ( nnz == 0 ) continue;
+        // useful
+        //if ( fval < this->f_tolerance ) continue;
+
+        uint position = 0;
+
+        //this->X_sorted.findFirstLargerInDimension(dim, fval, position);
+        GMHIKernelRaw::sparseVectorElement fval_element;
+        fval_element.value = fval;
+
+        //std::cerr << "value to search for " << fval << endl;
+        //std::cerr << "data matrix in dimension " << dim << endl;
+        //for (int j = 0; j < nnz; j++)
+        //    std::cerr << dataMatrix[dim][j].value << std::endl;
+
+        GMHIKernelRaw::sparseVectorElement *it = upper_bound ( dataMatrix[dim], dataMatrix[dim] + nnz, fval_element );
+        position = distance ( dataMatrix[dim], it );
+        // add zero elements
+        if ( fval_element.value > 0.0 )
+            position += nz;
+
+
+        bool posIsZero ( position == 0 );
+        if ( !posIsZero )
+            position--;
+
+
+        double firstPart = 0.0;
+        if ( !posIsZero && ((position-nz) < this->num_examples) )
+          firstPart = (A[dim][position-nz]);
+
+        double secondPart( B[dim][this->num_examples-1-nz]);
+        if ( !posIsZero && (position >= nz) )
+            secondPart -= B[dim][position-nz];
+
+        // but apply using the transformed one
+        beta += firstPart + secondPart* fval;
+      }
+    }
+
+    _scores[ classno ] = beta;
+  }
+  _scores.setDim ( *this->knownClasses.rbegin() + 1 );
+
+
+  if ( this->knownClasses.size() > 2 )
+  { // multi-class classification
+    _result = _scores.maxElement();
+  }
+  else if ( this->knownClasses.size() == 2 ) // binary setting
+  {
+    uint class1 = *(this->knownClasses.begin());
+    uint class2 = *(this->knownClasses.rbegin());
+    uint class_for_which_we_have_a_score = _scores.begin()->first;
+    uint class_for_which_we_dont_have_a_score = (class1 == class_for_which_we_have_a_score ? class2 : class1);
+
+    _scores[class_for_which_we_dont_have_a_score] = - _scores[class_for_which_we_have_a_score];
+
+    _result = _scores[class_for_which_we_have_a_score] > 0.0 ? class_for_which_we_have_a_score : class_for_which_we_dont_have_a_score;
+  }
+
+}
+
+
+/** training process */
+void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
+                              const NICE::Vector & _labels
+                            )
+{
+  // security-check: examples and labels have to be of same size
+  if ( _examples.size() != _labels.size() )
+  {
+    fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );
+  }
+  this->num_examples = _examples.size();
+
+  this->knownClasses.clear();
+  for ( uint i = 0; i < _labels.size(); i++ )
+    this->knownClasses.insert((uint)_labels[i]);
+
+  std::map<uint, NICE::Vector> binLabels;
+  for ( set<uint>::const_iterator j = knownClasses.begin(); j != knownClasses.end(); j++ )
+  {
+    uint current_class = *j;
+    Vector labels_binary ( _labels.size() );
+    for ( uint i = 0; i < _labels.size(); i++ )
+        labels_binary[i] = ( _labels[i] == current_class ) ? 1.0 : -1.0;
+
+    binLabels.insert ( pair<uint, NICE::Vector>( current_class, labels_binary) );
+  }
+
+  // handle special binary case
+  if ( knownClasses.size() == 2 )
+  {
+    std::map<uint, NICE::Vector>::iterator it = binLabels.begin();
+    it++;
+    binLabels.erase( binLabels.begin(), it );
+  }
+
+  this->train ( _examples, binLabels );
+}
+
+void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
+                              std::map<uint, NICE::Vector> & _binLabels
+                            )
+{
+  // security-check: examples and labels have to be of same size
+  for ( std::map< uint, NICE::Vector >::const_iterator binLabIt = _binLabels.begin();
+        binLabIt != _binLabels.end();
+        binLabIt++
+      )
+  {
+    if ( _examples.size() != binLabIt->second.size() )
+    {
+      fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );
+    }
+  }
+
+  if ( this->b_verbose )
+    std::cerr << "GPHIKRawClassifier::train" << std::endl;
+
+  Timer t;
+  t.start();
+
+  precomputedA.clear();
+  precomputedB.clear();
+  precomputedT.clear();
+
+  // sort examples in each dimension and "transpose" the feature matrix
+  // set up the GenericMatrix interface
+  if (gm != NULL)
+    delete gm;
+
+  gm = new GMHIKernelRaw ( _examples, this->d_noise );
+  nnz_per_dimension = gm->getNNZPerDimension();
+
+  // compute largest eigenvalue of our kernel matrix
+  // note: this guy is shared among all categories,
+  //       since the kernel matrix is shared as well
+  NICE::Vector eigenMax;
+  NICE::Matrix eigenMaxV;
+  // for reproducibility during debuggin
+  srand ( 0 );
+  srand48 ( 0 );
+  NICE::EigValues * eig = new EVArnoldi ( false /* verbose flag */,
+                                        10 /*_maxiterations*/
+                                      );
+  eig->getEigenvalues( *gm, eigenMax, eigenMaxV, 1 /*rank*/ );
+  delete eig;
+
+  // set simple jacobi pre-conditioning
+  NICE::Vector diagonalElements;
+  gm->getDiagonalElements ( diagonalElements );
+  solver->setJacobiPreconditioner ( diagonalElements );
+
+  // solve linear equations for each class
+  // be careful when parallising this!
+  for ( std::map<uint, NICE::Vector>::const_iterator i = _binLabels.begin();
+        i != _binLabels.end();
+        i++
+      )
+  {
+    uint classno = i->first;
+    if (b_verbose)
+        std::cerr << "Training for class " << classno << endl;
+    const NICE::Vector & y = i->second;
+    NICE::Vector alpha;
+
+
+  /** About finding a good initial solution (see also GPLikelihoodApproximation)
+    * K~ = K + sigma^2 I
+    *
+    * K~ \approx lambda_max v v^T
+    * \lambda_max v v^T * alpha = k_*     | multiply with v^T from left
+    * => \lambda_max v^T alpha = v^T k_*
+    * => alpha = k_* / lambda_max could be a good initial start
+    * If we put everything in the first equation this gives us
+    * v = k_*
+    *  This reduces the number of iterations by 5 or 8
+    */
+    alpha = (y * (1.0 / eigenMax[0]) );
+
+    solver->solveLin( *gm, y, alpha );
+
+    // TODO: get lookup tables, A, B, etc. and store them
+    gm->updateTables(alpha);
+    double **A = gm->getTableA();
+    double **B = gm->getTableB();
+
+    precomputedA.insert ( pair<uint, PrecomputedType> ( classno, A ) );
+    precomputedB.insert ( pair<uint, PrecomputedType> ( classno, B ) );
+  }
+
+
+  t.stop();
+  if ( this->b_verbose )
+    std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;
+
+
+  //indicate that we finished training successfully
+  this->b_isTrained = true;
+
+  // clean up all examples ??
+  if ( this->b_verbose )
+    std::cerr << "Learning finished" << std::endl;
+
+
+}
+
+

+ 180 - 0
GPHIKRawClassifier.h

@@ -0,0 +1,180 @@
+/**
+* @file GPHIKRawClassifier.h
+* @brief ..
+* @author Erik Rodner
+* @date 16-09-2015 (dd-mm-yyyy)
+*/
+#ifndef _NICE_GPHIKRAWCLASSIFIERINCLUDE
+#define _NICE_GPHIKRAWCLASSIFIERINCLUDE
+
+// STL includes
+#include <string>
+#include <limits>
+#include <set>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Persistent.h>
+#include <core/vector/SparseVectorT.h>
+#include <core/algebra/ILSConjugateGradients.h>
+
+//
+#include "quantization/Quantization.h"
+#include "GMHIKernelRaw.h"
+
+namespace NICE {
+
+ /**
+ * @class GPHIKClassifier
+ * @brief ...
+ * @author Erik Rodner
+ */
+
+class GPHIKRawClassifier //: public NICE::Persistent
+{
+
+  protected:
+
+    /////////////////////////
+    /////////////////////////
+    // PROTECTED VARIABLES //
+    /////////////////////////
+    /////////////////////////
+
+    ///////////////////////////////////
+    // output/debug related settings //
+    ///////////////////////////////////
+
+    /** verbose flag for useful output*/
+    bool b_verbose;
+    /** debug flag for several outputs useful for debugging*/
+    bool b_debug;
+
+    //////////////////////////////////////
+    //      general specifications      //
+    //////////////////////////////////////
+
+    /** Header in configfile where variable settings are stored */
+    std::string confSection;
+
+    //////////////////////////////////////
+    // classification related variables //
+    //////////////////////////////////////
+    /** memorize whether the classifier was already trained*/
+    bool b_isTrained;
+
+
+    /** Gaussian label noise for model regularization */
+    double d_noise;
+
+    ILSConjugateGradients *solver;
+    /** object performing feature quantization */
+    NICE::Quantization *q;
+
+    typedef double ** PrecomputedType;
+
+    /** precomputed arrays A (1 per class) needed for classification without quantization  */
+    std::map< uint, PrecomputedType > precomputedA;
+    /** precomputed arrays B (1 per class) needed for classification without quantization  */
+    std::map< uint, PrecomputedType > precomputedB;
+
+    /** precomputed LUTs (1 per class) needed for classification with quantization  */
+    std::map< uint, double * > precomputedT;
+
+    uint *nnz_per_dimension;
+    uint num_examples;
+
+    double f_tolerance;
+
+    GMHIKernelRaw *gm;
+    std::set<uint> knownClasses;
+
+    /////////////////////////
+    /////////////////////////
+    //  PROTECTED METHODS  //
+    /////////////////////////
+    /////////////////////////
+
+
+  public:
+
+    /**
+     * @brief default constructor
+     */
+    GPHIKRawClassifier( );
+
+
+    /**
+     * @brief standard constructor
+     */
+    GPHIKRawClassifier( const NICE::Config *_conf ,
+                     const std::string & s_confSection = "GPHIKRawClassifier"
+                   );
+
+    /**
+     * @brief simple destructor
+     */
+    ~GPHIKRawClassifier();
+
+    /**
+    * @brief Setup internal variables and objects used
+    * @param conf Config file to specify variable settings
+    * @param s_confSection
+    */
+    void initFromConfig(const NICE::Config *_conf,
+                        const std::string & s_confSection
+                       );
+
+    ///////////////////// ///////////////////// /////////////////////
+    //                         GET / SET
+    ///////////////////// ///////////////////// /////////////////////
+
+    /**
+     * @brief Return currently known class numbers
+     */
+    std::set<uint> getKnownClassNumbers ( ) const;
+
+
+
+    ///////////////////// ///////////////////// /////////////////////
+    //                      CLASSIFIER STUFF
+    ///////////////////// ///////////////////// /////////////////////
+
+    /**
+     * @brief classify a given example with the previously learned model
+     * @author Alexander Freytag, Erik Rodner
+     * @param example (SparseVector) to be classified given in a sparse representation
+     * @param result (int) class number of most likely class
+     * @param scores (SparseVector) classification scores for known classes
+     */
+    void classify ( const NICE::SparseVector * _example,
+                    uint & _result,
+                    NICE::SparseVector & _scores
+                  ) const;
+
+    /**
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors
+     * @date 18-10-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag, Erik Rodner
+     * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
+     * @param labels (Vector) class labels (multi-class)
+     */
+    void train ( const std::vector< const NICE::SparseVector *> & _examples,
+                 const NICE::Vector & _labels
+               );
+
+    /**
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors
+     * @author Alexander Freytag, Erik Rodner
+     * @param examples examples to use given in a sparse data structure
+     * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
+     */
+    void train ( const std::vector< const NICE::SparseVector *> & _examples,
+                 std::map<uint, NICE::Vector> & _binLabels
+               );
+
+};
+
+}
+
+#endif

+ 3 - 4
GPLikelihoodApprox.cpp

@@ -191,16 +191,15 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & _x
      *  This reduces the number of iterations by 5 or 8
      *  This reduces the number of iterations by 5 or 8
      */
      */
     NICE::Vector alpha;
     NICE::Vector alpha;
-    
     alpha = (binaryLabels[classCnt] * (1.0 / _eigenValues[0]) );
     alpha = (binaryLabels[classCnt] * (1.0 / _eigenValues[0]) );
-    
+
     if ( verbose )
     if ( verbose )
       std::cerr << "Using the standard solver ..." << std::endl;
       std::cerr << "Using the standard solver ..." << std::endl;
 
 
     t.start();
     t.start();
     linsolver->solveLin ( *ikm, binaryLabels[classCnt], alpha );
     linsolver->solveLin ( *ikm, binaryLabels[classCnt], alpha );
     t.stop();
     t.stop();
-   
+
     alphas.insert( std::pair<uint, NICE::Vector> ( classCnt, alpha) );
     alphas.insert( std::pair<uint, NICE::Vector> ( classCnt, alpha) );
   }  
   }  
   
   
@@ -429,4 +428,4 @@ void GPLikelihoodApprox::setVerbose( const bool & _verbose )
 void GPLikelihoodApprox::setDebug( const bool & _debug )
 void GPLikelihoodApprox::setDebug( const bool & _debug )
 {
 {
   this->debug = _debug;
   this->debug = _debug;
-}
+}

+ 1 - 1
SortedVectorSparse.h

@@ -61,7 +61,7 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     */
     */
     SortedVectorSparse() {
     SortedVectorSparse() {
       this->ui_n = 0;
       this->ui_n = 0;
-      this->tolerance = ( T ) 10e-10;
+      this->tolerance = ( T ) 0.0;
       this->b_verbose = false;
       this->b_verbose = false;
     }
     }
 
 

+ 2 - 2
matlab/ConverterMatlabToNICE.cpp

@@ -115,7 +115,7 @@ NICE::SparseVector MatlabConversion::convertSparseVectorToNice(
         {
         {
           //note: no complex data supported her
           //note: no complex data supported her
             double value ( pr[total++] );
             double value ( pr[total++] );
-            if ( b_adaptIndexMtoC ) 
+            if ( b_adaptIndexMtoC )
                 svec.insert( std::pair<uint, double>( row+1,  value ) );
                 svec.insert( std::pair<uint, double>( row+1,  value ) );
             else
             else
                 svec.insert( std::pair<uint, double>( row,  value ) );
                 svec.insert( std::pair<uint, double>( row,  value ) );
@@ -130,7 +130,7 @@ NICE::SparseVector MatlabConversion::convertSparseVectorToNice(
     {
     {
         //note: no complex data supported her
         //note: no complex data supported her
         double value ( pr[total++] );
         double value ( pr[total++] );
-        if ( b_adaptIndexMtoC ) 
+        if ( b_adaptIndexMtoC )
             svec.insert( std::pair<uint, double>( ir[colNonZero]+1, value  ) );
             svec.insert( std::pair<uint, double>( ir[colNonZero]+1, value  ) );
         else
         else
             svec.insert( std::pair<uint, double>( ir[colNonZero], value  ) );
             svec.insert( std::pair<uint, double>( ir[colNonZero], value  ) );

+ 4 - 4
matlab/ConverterNICEToMatlab.cpp

@@ -10,10 +10,10 @@ mxArray* MatlabConversion::convertSparseVectorFromNice( const NICE::SparseVector
 {
 {
     mxArray * matlabSparseVec;
     mxArray * matlabSparseVec;
     
     
-    if ( b_adaptIndexCtoM ) 
-       matlabSparseVec = mxCreateSparse( niceSvec.getDim() -1 /*m*/, 1/*n*/, niceSvec.size() -1 /*nzmax*/, mxREAL);
+    if ( b_adaptIndexCtoM )
+        matlabSparseVec = mxCreateSparse( niceSvec.getDim()-1 /*m*/, 1/*n*/, niceSvec.size() /*nzmax*/, mxREAL);
     else
     else
-      matlabSparseVec = mxCreateSparse( niceSvec.getDim() /*m*/, 1/*n*/, niceSvec.size() /*nzmax*/, mxREAL);
+        matlabSparseVec = mxCreateSparse( niceSvec.getDim() /*m*/, 1/*n*/, niceSvec.size() /*nzmax*/, mxREAL);
     
     
     
     
     // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
     // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
@@ -36,7 +36,7 @@ mxArray* MatlabConversion::convertSparseVectorFromNice( const NICE::SparseVector
     for ( NICE::SparseVector::const_iterator myIt = niceSvec.begin(); myIt != niceSvec.end(); myIt++, cnt++ )
     for ( NICE::SparseVector::const_iterator myIt = niceSvec.begin(); myIt != niceSvec.end(); myIt++, cnt++ )
     {
     {
         // set index
         // set index
-        if ( b_adaptIndexCtoM ) 
+        if ( b_adaptIndexCtoM )
             ir[cnt] = myIt->first-1;
             ir[cnt] = myIt->first-1;
         else
         else
             ir[cnt] = myIt->first;
             ir[cnt] = myIt->first;

+ 46 - 0
matlab/GPHIKRawClassifier.m

@@ -0,0 +1,46 @@
+% brief:    MATLAB class wrapper for the underlying Matlab-C++ Interface (GPHIKRawClassifierMex.cpp)
+% author:   Alexander Freytag
+% date:     07-01-2014 (dd-mm-yyyy)
+classdef GPHIKRawClassifier < handle
+
+    properties (SetAccess = private, Hidden = true)
+        % Handle to the underlying C++ class instance
+        objectHandle;
+    end
+
+    methods
+
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%      Constructor / Destructor    %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %% constructor - create object
+        function this = GPHIKRawClassifier(varargin)
+            this.objectHandle = GPHIKRawClassifierMex('new', varargin{:});
+        end
+
+        %% destructor - delete object
+        function delete(this)
+            GPHIKRawClassifierMex('delete', this.objectHandle);
+        end
+
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%       Classification stuff       %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %% train - standard train - assumes initialized object
+        function varargout = train(this, varargin)
+            [varargout{1:nargout}] = GPHIKRawClassifierMex('train', this.objectHandle, varargin{:});
+        end
+
+        %% classify
+        function varargout = classify(this, varargin)
+            [varargout{1:nargout}] = GPHIKRawClassifierMex('classify', this.objectHandle, varargin{:});
+        end
+
+
+        %% test - evaluate classifier on whole test set
+        function varargout = test(this, varargin)
+            [varargout{1:nargout}] = GPHIKRawClassifierMex('test', this.objectHandle, varargin{:});
+        end
+
+    end
+end

+ 497 - 0
matlab/GPHIKRawClassifierMex.cpp

@@ -0,0 +1,497 @@
+#ifdef NICE_USELIB_MEX
+/**
+* @file GPHIKRawClassifierMex.cpp
+* @author Alexander Freytag
+* @date 21-09-2015 (dd-mm-yyyy)
+* @brief Matlab-Interface of our GPHIKRawClassifier, allowing for training and classification without more advanced methods.
+*/
+
+// STL includes
+#include <math.h>
+#include <matrix.h>
+#include <mex.h>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+#include <core/vector/MatrixT.h>
+#include <core/vector/VectorT.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKRawClassifier.h"
+
+
+// Interface for conversion between Matlab and C objects
+#include "gp-hik-core/matlab/classHandleMtoC.h"
+#include "gp-hik-core/matlab/ConverterMatlabToNICE.h"
+#include "gp-hik-core/matlab/ConverterNICEToMatlab.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+
+NICE::Config parseParametersGPHIKRawClassifier(const mxArray *prhs[], int nrhs)
+{
+  NICE::Config conf;
+
+  // if first argument is the filename of an existing config file,
+  // read the config accordingly
+
+  int i_start ( 0 );
+  std::string variable = MatlabConversion::convertMatlabToString(prhs[i_start]);
+  if(variable == "conf")
+  {
+      conf = NICE::Config ( MatlabConversion::convertMatlabToString( prhs[i_start+1] )  );
+      i_start = i_start+2;
+  }
+
+  // now run over all given parameter specifications
+  // and add them to the config
+  for( int i=i_start; i < nrhs; i+=2 )
+  {
+    std::string variable = MatlabConversion::convertMatlabToString(prhs[i]);
+
+    /////////////////////////////////////////
+    // READ STANDARD BOOLEAN VARIABLES
+    /////////////////////////////////////////
+    if( (variable == "verbose") ||
+        (variable == "debug") ||
+        (variable == "ils_verbose")
+      )
+    {
+      if ( mxIsChar( prhs[i+1] ) )
+      {
+        string value = MatlabConversion::convertMatlabToString( prhs[i+1] );
+        if ( (value != "true") && (value != "false") )
+        {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. In string modus, \'true\' or \'false\' expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+        }
+
+        if( value == "true" )
+          conf.sB("GPHIKRawClassifier", variable, true);
+        else
+          conf.sB("GPHIKRawClassifier", variable, false);
+      }
+      else if ( mxIsLogical( prhs[i+1] ) )
+      {
+        bool value = MatlabConversion::convertMatlabToBool( prhs[i+1] );
+        conf.sB("GPHIKRawClassifier", variable, value);
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. \'true\', \'false\', or logical expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+      }
+    }
+
+    /////////////////////////////////////////
+    // READ STANDARD INT VARIABLES
+    /////////////////////////////////////////
+
+    /////////////////////////////////////////
+    // READ STRICT POSITIVE INT VARIABLES
+    /////////////////////////////////////////
+    if ( variable == "ils_max_iterations" )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = MatlabConversion::convertMatlabToDouble(prhs[i+1]);
+        if( value < 1 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+        }
+        conf.sI("GPHIKRawClassifier", variable, (int) value);
+      }
+      else if ( mxIsInt32( prhs[i+1] ) )
+      {
+        int value = MatlabConversion::convertMatlabToInt32(prhs[i+1]);
+        if( value < 1 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+        }
+        conf.sI("GPHIKRawClassifier", variable, value);
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Int32 or Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+      }
+    }
+
+    /////////////////////////////////////////
+    // READ STANDARD DOUBLE VARIABLES
+    /////////////////////////////////////////
+
+
+    /////////////////////////////////////////
+    // READ POSITIVE DOUBLE VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "f_tolerance") ||
+         (variable == "ils_min_delta") ||
+         (variable == "ils_min_residual") ||
+         (variable == "noise")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = MatlabConversion::convertMatlabToDouble(prhs[i+1]);
+        if( value < 0.0 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+        }
+        conf.sD("GPHIKRawClassifier", variable, value);
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+      }
+    }
+
+    /////////////////////////////////////////
+    // READ REMAINING SPECIFIC VARIABLES
+    /////////////////////////////////////////
+
+    if(variable == "ils_method")
+    {
+      string value = MatlabConversion::convertMatlabToString(prhs[i+1]);
+      if(value != "CG" && value != "CGL" && value != "SYMMLQ" && value != "MINRES")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_method\'. \'CG\', \'CGL\', \'SYMMLQ\' or \'MINRES\' expected.");
+        conf.sS("GPHIKRawClassifier", variable, value);
+    }
+
+  }
+
+
+  return conf;
+}
+
+// MAIN MATLAB FUNCTION
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{
+    // get the command string specifying what to do
+    if (nrhs < 1)
+        mexErrMsgTxt("No commands and options passed... Aborting!");
+
+    if( !mxIsChar( prhs[0] ) )
+        mexErrMsgTxt("First argument needs to be the command, ie.e, the class method to call... Aborting!");
+
+    std::string cmd = MatlabConversion::convertMatlabToString( prhs[0] );
+
+
+    // create object
+    if ( !strcmp("new", cmd.c_str() ) )
+    {
+        // check output variable
+        if (nlhs != 1)
+            mexErrMsgTxt("New: One output expected.");
+
+        // read config settings
+        NICE::Config conf = parseParametersGPHIKRawClassifier(prhs+1,nrhs-1);
+
+        // create class instance
+        NICE::GPHIKRawClassifier * classifier = new NICE::GPHIKRawClassifier ( &conf, "GPHIKRawClassifier" /*sectionName in config*/ );
+
+
+        // handle to the C++ instance
+        plhs[0] = MatlabConversion::convertPtr2Mat<NICE::GPHIKRawClassifier>( classifier );
+        return;
+    }
+
+    // in all other cases, there should be a second input,
+    // which the be the class instance handle
+    if (nrhs < 2)
+      mexErrMsgTxt("Second input should be a class instance handle.");
+
+    // delete object
+    if ( !strcmp("delete", cmd.c_str() ) )
+    {
+        // Destroy the C++ object
+        MatlabConversion::destroyObject<NICE::GPHIKRawClassifier>(prhs[1]);
+        return;
+    }
+
+    // get the class instance pointer from the second input
+    // every following function needs the classifier object
+    NICE::GPHIKRawClassifier * classifier = MatlabConversion::convertMat2Ptr<NICE::GPHIKRawClassifier>(prhs[1]);
+
+
+    ////////////////////////////////////////
+    //  Check which class method to call  //
+    ////////////////////////////////////////
+
+
+    // standard train - assumes initialized object
+    if (!strcmp("train", cmd.c_str() ))
+    {
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+        {
+            mexErrMsgTxt("Train: Unexpected arguments.");
+        }
+
+        //------------- read the data --------------
+
+        std::vector< const NICE::SparseVector *> examplesTrain;
+        NICE::Vector yMultiTrain;
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            examplesTrain = MatlabConversion::convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            NICE::Matrix dataTrain;
+            dataTrain = MatlabConversion::convertDoubleMatrixToNice(prhs[2]);
+
+            //----------------- convert data to sparse data structures ---------
+            examplesTrain.resize( dataTrain.rows() );
+
+
+            std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+            for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+            {
+                *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+            }
+        }
+
+        yMultiTrain = MatlabConversion::convertDoubleVectorToNice(prhs[3]);
+
+        //----------------- train our classifier -------------
+        classifier->train ( examplesTrain , yMultiTrain );
+
+        //----------------- clean up -------------
+        for(int i=0;i<examplesTrain.size();i++)
+            delete examplesTrain[i];
+
+        return;
+    }
+
+
+    // Classify
+    if ( !strcmp("classify", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+
+        //------------- read the data --------------
+
+        uint result;
+        NICE::SparseVector scores;
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( MatlabConversion::convertSparseVectorToNice( prhs[2] ) );
+            classifier->classify ( example,  result, scores );
+
+            //----------------- clean up -------------
+            delete example;
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( MatlabConversion::convertDoubleVectorToNice(prhs[2]) );
+            NICE::SparseVector * svec  = new NICE::SparseVector( *example );
+            delete example;
+
+            classifier->classify ( svec,  result, scores );
+
+            //----------------- clean up -------------
+            delete svec;
+
+        }
+
+
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( result );
+
+          if(nlhs >= 2)
+          {
+            plhs[1] = MatlabConversion::convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
+          }
+          return;
+    }
+
+
+    // Test - evaluate classifier on whole test set
+    if ( !strcmp("test", cmd.c_str() ) )
+    {
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        //------------- read the data --------------
+
+
+        bool dataIsSparse ( mxIsSparse( prhs[2] ) );
+
+        std::vector< const NICE::SparseVector *> dataTest_sparse;
+        NICE::Matrix dataTest_dense;
+
+        if ( dataIsSparse )
+        {
+            dataTest_sparse = MatlabConversion::convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            dataTest_dense = MatlabConversion::convertDoubleMatrixToNice(prhs[2]);
+        }
+
+        NICE::Vector yMultiTest;
+        yMultiTest = MatlabConversion::convertDoubleVectorToNice(prhs[3]);
+
+
+        // ------------------------------------------
+        // ------------- PREPARATION --------------
+        // ------------------------------------------
+
+        // determine classes known during training and corresponding mapping
+        // thereby allow for non-continous class labels
+        std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
+
+        uint noClassesKnownTraining ( classesKnownTraining.size() );
+        std::map< uint, uint > mapClNoToIdxTrain;
+        std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+        for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+            mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
+
+        // determine classes known during testing and corresponding mapping
+        // thereby allow for non-continous class labels
+        std::set< uint > classesKnownTest;
+        classesKnownTest.clear();
+
+
+        // determine which classes we have in our label vector
+        // -> MATLAB: myClasses = unique(y);
+        for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+        {
+          if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+          {
+            classesKnownTest.insert ( *it );
+          }
+        }
+
+        int noClassesKnownTest ( classesKnownTest.size() );
+        std::map< uint, uint> mapClNoToIdxTest;
+        std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+        for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+            mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  );
+
+
+
+        int i_numTestSamples;
+
+        if ( dataIsSparse )
+            i_numTestSamples = dataTest_sparse.size();
+        else
+            i_numTestSamples = (int) dataTest_dense.rows();
+
+        NICE::Matrix confusionMatrix( noClassesKnownTraining, noClassesKnownTest, 0.0);
+        NICE::Matrix scores( i_numTestSamples, noClassesKnownTraining, 0.0);
+
+
+
+        // ------------------------------------------
+        // ------------- CLASSIFICATION --------------
+        // ------------------------------------------
+
+        NICE::Timer t;
+        double testTime (0.0);
+
+
+
+        for (int i = 0; i < i_numTestSamples; i++)
+        {
+            //----------------- convert data to sparse data structures ---------
+
+
+            uint result;
+            NICE::SparseVector exampleScoresSparse;
+
+            if ( dataIsSparse )
+            {
+              // and classify
+              t.start();
+              classifier->classify( dataTest_sparse[ i ], result, exampleScoresSparse );
+              t.stop();
+              testTime += t.getLast();
+            }
+            else
+            {
+                NICE::Vector example ( dataTest_dense.getRow(i) );
+                NICE::SparseVector * svec = new NICE::SparseVector ( example );
+              // and classify
+              t.start();
+              classifier->classify( svec, result, exampleScoresSparse );
+              t.stop();
+              testTime += t.getLast();
+              delete svec;
+            }
+
+            confusionMatrix(  mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+            int scoreCnt ( 0 );
+            for ( NICE::SparseVector::const_iterator scoreIt = exampleScoresSparse.begin(); scoreIt != exampleScoresSparse.end(); scoreIt++, scoreCnt++ )
+            {
+              scores(i,scoreCnt) = scoreIt->second;
+            }
+
+        }
+
+        std::cerr << "Time for testing: " << testTime << std::endl;
+
+        // clean up
+        if ( dataIsSparse )
+        {
+            for ( std::vector<const NICE::SparseVector *>::iterator it = dataTest_sparse.begin(); it != dataTest_sparse.end(); it++)
+                delete *it;
+        }
+
+
+
+        confusionMatrix.normalizeColumnsL1();
+
+        double recRate = confusionMatrix.trace()/confusionMatrix.cols();
+
+
+        plhs[0] = mxCreateDoubleScalar( recRate );
+
+        if(nlhs >= 2)
+          plhs[1] = MatlabConversion::convertMatrixFromNice(confusionMatrix);
+        if(nlhs >= 3)
+          plhs[2] = MatlabConversion::convertMatrixFromNice(scores);
+
+
+        return;
+    }
+
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+
+   // not supported here
+
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+
+    // not supported here
+
+
+
+    // Got here, so command not recognized
+
+    std::string errorMsg (cmd.c_str() );
+    errorMsg += " -- command not recognized.";
+    mexErrMsgTxt( errorMsg.c_str() );
+
+}
+#endif
+

+ 3 - 3
matlab/Makefile

@@ -1,5 +1,5 @@
-MEX=/home/matlab/7.14/bin/mex
-#MEX=/home/matlab/8.2/academic/bin/mex
+#MEX=/home/matlab/7.14/bin/mex
+MEX=/home/matlab/8.2/academic/bin/mex
 NICEFLAGS1=$(shell pkg-config libgp-hik-core --cflags --libs)
 NICEFLAGS1=$(shell pkg-config libgp-hik-core --cflags --libs)
 NICEFLAGS=$(subst -fopenmp,,$(NICEFLAGS1))
 NICEFLAGS=$(subst -fopenmp,,$(NICEFLAGS1))
 
 
@@ -15,4 +15,4 @@ regression:
 	${MEX} ${NICEFLAGS} -largeArrayDims GPHIKRegressionMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp
 	${MEX} ${NICEFLAGS} -largeArrayDims GPHIKRegressionMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp
 
 
 unittest:        
 unittest:        
-	${MEX} ${NICEFLAGS} -largeArrayDims testMatlabConversionFunctionsMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp
+	${MEX} ${NICEFLAGS} -largeArrayDims testMatlabConversionFunctionsMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp

+ 15 - 12
matlab/testGPHIKClassifierMex.m

@@ -13,47 +13,50 @@ myLabels = [1,1,2,2,3,3];
 
 
 
 
 % init new GPHIKClassifier object
 % init new GPHIKClassifier object
-myGPHIKClassifier = GPHIKClassifierMex ( 'new', 'verbose', 'false', ...
-    'optimization_method', 'none', 'varianceApproximation', 'approximate_rough',...
+myGPHIKClassifier = GPHIKClassifier ( ...
+    'verbose', 'false', ...
+    'optimization_method', 'none', ...
+    'varianceApproximation', 'approximate_rough',...
     'nrOfEigenvaluesToConsiderForVarApprox',4,...
     'nrOfEigenvaluesToConsiderForVarApprox',4,...
     'uncertaintyPredictionForClassification', false ...
     'uncertaintyPredictionForClassification', false ...
     );
     );
 
 
 % run train method
 % run train method
-GPHIKClassifierMex ( 'train', myGPHIKClassifier, myData, myLabels);
+myGPHIKClassifier.train( myData, myLabels );
 
 
 myDataTest = [ 0.3 0.4 0.3
 myDataTest = [ 0.3 0.4 0.3
              ];
              ];
 myLabelsTest = [1];
 myLabelsTest = [1];
 
 
 % run single classification call
 % run single classification call
-[ classNoEst, score, uncertainty] = GPHIKClassifierMex ( 'classify', myGPHIKClassifier, myDataTest )
+[ classNoEst, score, uncertainty] = myGPHIKClassifier.classify( myDataTest )
 % compute predictive variance
 % compute predictive variance
-uncertainty = GPHIKClassifierMex ( 'uncertainty', myGPHIKClassifier, myDataTest )
+uncertainty = myGPHIKClassifier.uncertainty( myDataTest )
 % run test method evaluating arr potentially using multiple examples
 % run test method evaluating arr potentially using multiple examples
-[ arr, confMat, scores] = GPHIKClassifierMex ( 'test', myGPHIKClassifier, myDataTest, myLabelsTest )
+[ arr, confMat, scores] = myGPHIKClassifier.test( myDataTest, myLabelsTest )
 
 
 % add a single new example
 % add a single new example
 newExample = [ 0.5 0.5 0.0
 newExample = [ 0.5 0.5 0.0
              ];
              ];
 newLabel = [4];
 newLabel = [4];
-GPHIKClassifierMex ( 'addExample', myGPHIKClassifier, newExample, newLabel);
+myGPHIKClassifier.addExample( newExample, newLabel );
 
 
 % add mutiple new examples
 % add mutiple new examples
 newExamples = [ 0.3 0.3 0.4;
 newExamples = [ 0.3 0.3 0.4;
                 0.1, 0.2, 0.7
                 0.1, 0.2, 0.7
              ];
              ];
 newLabels = [1,3];
 newLabels = [1,3];
-GPHIKClassifierMex ( 'addMultipleExamples', myGPHIKClassifier, newExamples, newLabels );
+myGPHIKClassifier.addMultipleExamples( newExamples, newLabels );
 
 
 % perform evaluation again
 % perform evaluation again
 
 
 % run single classification call
 % run single classification call
-[ classNoEst, score, uncertainty] = GPHIKClassifierMex ( 'classify', myGPHIKClassifier, myDataTest )
+[ classNoEst, score, uncertainty] = myGPHIKClassifier.classify( myDataTest )
 % compute predictive variance
 % compute predictive variance
-uncertainty = GPHIKClassifierMex ( 'uncertainty', myGPHIKClassifier, myDataTest )
+uncertainty = myGPHIKClassifier.uncertainty( myDataTest )
 % run test method evaluating arr potentially using multiple examples
 % run test method evaluating arr potentially using multiple examples
-[ arr, confMat, scores] = GPHIKClassifierMex ( 'test', myGPHIKClassifier, myDataTest, myLabelsTest )
+[ arr, confMat, scores] = myGPHIKClassifier.test( myDataTest, myLabelsTest )
 
 
 % clean up and delete object
 % clean up and delete object
-GPHIKClassifierMex ( 'delete',myGPHIKClassifier);
+myGPHIKClassifier.delete();
+clear ( 'myGPHIKClassifier' );

+ 37 - 0
matlab/testGPHIKRawClassifier.m

@@ -0,0 +1,37 @@
+% brief:    Demo-program showing how to use the GPHIKRaw Interface (without a class wrapper)
+% author:   Alexander Freytag
+% date:     21-09-2015 (dd-mm-yyyy)
+
+myData = [ 0.2 0.3 0.5;
+           0.3 0.2 0.5;
+           0.9 0.0 0.1;
+           0.8 0.1 0.1;
+           0.1 0.1 0.8;
+           0.1 0.0 0.9
+          ];
+myLabels = [1,1,2,2,3,3];
+
+
+% init new GPHIKRawClassifier object
+myGPHIKRawClassifier = GPHIKRawClassifierMex ( ...
+     'new', ...
+     'verbose', 'false' ...
+    );
+
+% run train method
+GPHIKRawClassifierMex ( 'train', myGPHIKRawClassifier, myData, myLabels);
+
+myDataTest = [ 0.3 0.4 0.3
+             ];
+myLabelsTest = [1];
+
+% run single classification call
+[ classNoEst, score ]   = GPHIKRawClassifierMex ( 'classify', myGPHIKRawClassifier, myDataTest )
+
+% run test call which classifies entire data set
+[ arr, confMat, scores] = GPHIKRawClassifierMex ( 'test', myGPHIKRawClassifier, myDataTest, myLabelsTest )
+
+
+
+% clean up and delete object
+GPHIKRawClassifierMex ( 'delete',myGPHIKRawClassifier);

+ 36 - 0
matlab/testGPHIKRawClassifierMex.m

@@ -0,0 +1,36 @@
+% brief:    Demo-program showing how to use the GPHIKRawClassifier
+%           Interface ( with a class wrapper)
+% author:   Alexander Freytag
+% date:     21-09-2015 (dd-mm-yyyy)
+
+myData = [ 0.2 0.3 0.5;
+           0.3 0.2 0.5;
+           0.9 0.0 0.1;
+           0.8 0.1 0.1;
+           0.1 0.1 0.8;
+           0.1 0.0 0.9
+          ];
+myLabels = [1,1,2,2,3,3];
+
+
+% init new GPHIKRawClassifier object
+myGPHIKRawClassifier = GPHIKRawClassifier ( ...
+                       'verbose', 'false' ...
+                     );
+
+% run train method
+myGPHIKRawClassifier.train( myData, myLabels );
+
+myDataTest = [ 0.3 0.4 0.3
+             ];
+myLabelsTest = [1];
+
+% run single classification call
+[ classNoEst, score] = myGPHIKRawClassifier.classify ( myDataTest )
+% run test method evaluating arr potentially using multiple examples
+[ arr, confMat, scores] = myGPHIKRawClassifier.test( myDataTest, myLabelsTest )
+
+
+% clean up and delete object
+myGPHIKRawClassifier.delete();
+clear ( 'myGPHIKRawClassifier' );

+ 228 - 180
tests/TestFastHIK.cpp

@@ -12,19 +12,21 @@
 #include <gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.h>
 #include <gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.h>
 #include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
 #include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
 #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
 #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
-// 
-// 
+#include <gp-hik-core/GMHIKernelRaw.h>
+//
+//
 #include "gp-hik-core/quantization/Quantization.h"
 #include "gp-hik-core/quantization/Quantization.h"
 #include "gp-hik-core/quantization/Quantization1DAequiDist0To1.h"
 #include "gp-hik-core/quantization/Quantization1DAequiDist0To1.h"
 
 
 #include "TestFastHIK.h"
 #include "TestFastHIK.h"
 
 
 const bool b_debug = false;
 const bool b_debug = false;
-const bool verbose = false;
+const bool verbose = true;
 const bool verboseStartEnd = true;
 const bool verboseStartEnd = true;
+// this test seems to be broken
 const bool solveLinWithoutRand = false;
 const bool solveLinWithoutRand = false;
-const uint n = 30;//1500;//1500;//10;
-const uint d = 5;//200;//2;
+const uint n = 1500;//1500;//1500;//10;
+const uint d = 100;//200;//2;
 const uint numBins = 11;//1001;//1001;
 const uint numBins = 11;//1001;//1001;
 const uint solveLinMaxIterations = 1000;
 const uint solveLinMaxIterations = 1000;
 const double sparse_prob = 0.6;
 const double sparse_prob = 0.6;
@@ -33,42 +35,42 @@ const bool smallTest = false;
 bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
 bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
 {
 {
   bool result(true);
   bool result(true);
-  
+
 //   std::cerr << "A.size(): " << A.size() << " B.size(): " << B.size() << std::endl;
 //   std::cerr << "A.size(): " << A.size() << " B.size(): " << B.size() << std::endl;
-  
+
   NICE::VVector::const_iterator itA = A.begin();
   NICE::VVector::const_iterator itA = A.begin();
   NICE::VVector::const_iterator itB = B.begin();
   NICE::VVector::const_iterator itB = B.begin();
-  
+
   while ( (itA != A.end()) && ( itB != B.end()) )
   while ( (itA != A.end()) && ( itB != B.end()) )
   {
   {
     if (itA->size() != itB->size())
     if (itA->size() != itB->size())
     {
     {
       result = false;
       result = false;
       break;
       break;
-    } 
-    
+    }
+
     for(uint i = 0; (i < itA->size()) && (i < itB->size()); i++)
     for(uint i = 0; (i < itA->size()) && (i < itB->size()); i++)
     {
     {
       if (fabs((*itA)[i] - (*itB)[i]) > tolerance)
       if (fabs((*itA)[i] - (*itB)[i]) > tolerance)
       {
       {
         result = false;
         result = false;
-        break;        
+        break;
       }
       }
     }
     }
 
 
     if (result == false)
     if (result == false)
-          break;        
+          break;
     itA++;
     itA++;
     itB++;
     itB++;
   }
   }
-  
+
   return result;
   return result;
 }
 }
 
 
 bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const double & tolerance = 10e-8)
 bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const double & tolerance = 10e-8)
 {
 {
   bool result = true;
   bool result = true;
-  
+
   for (int i = 0; i < size; i++)
   for (int i = 0; i < size; i++)
   {
   {
     if ( fabs(LUT1[i] - LUT2[i]) > tolerance)
     if ( fabs(LUT1[i] - LUT2[i]) > tolerance)
@@ -78,7 +80,7 @@ bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const
       break;
       break;
     }
     }
   }
   }
-  
+
   return result;
   return result;
 }
 }
 
 
@@ -95,7 +97,7 @@ void TestFastHIK::setUp() {
 void TestFastHIK::tearDown() {
 void TestFastHIK::tearDown() {
 }
 }
 
 
-void TestFastHIK::testKernelMultiplication() 
+void TestFastHIK::testKernelMultiplication()
 {
 {
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplication ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelMultiplication ===================== " << std::endl;
@@ -107,54 +109,92 @@ void TestFastHIK::testKernelMultiplication()
   for ( uint i = 0 ; i < d; i++ )
   for ( uint i = 0 ; i < d; i++ )
   {
   {
     for ( uint k = 0; k < n; k++ )
     for ( uint k = 0; k < n; k++ )
-      if ( drand48() < sparse_prob ) 
+      if ( drand48() < sparse_prob )
       {
       {
         dataMatrix[i][k] = 0.0;
         dataMatrix[i][k] = 0.0;
         nrZeros++;
         nrZeros++;
       }
       }
   }
   }
 
 
-  if ( verbose ) {
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     printMatrix ( dataMatrix );
     cerr << endl;
     cerr << endl;
   }
   }
 
 
   double noise = 1.0;
   double noise = 1.0;
+  NICE::Timer t;
+  t.start();
   FastMinKernel fmk ( dataMatrix, noise );
   FastMinKernel fmk ( dataMatrix, noise );
-    
+  t.stop();
+  if (verbose)
+    std::cerr << "Time for FastMinKernel setup: " << t.getLast() << endl;
+
   if ( (n*d)>0)
   if ( (n*d)>0)
   {
   {
     CPPUNIT_ASSERT_DOUBLES_EQUAL(fmk.getSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
     CPPUNIT_ASSERT_DOUBLES_EQUAL(fmk.getSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
     if (verbose)
     if (verbose)
       std::cerr << "fmk.getSparsityRatio(): " << fmk.getSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
       std::cerr << "fmk.getSparsityRatio(): " << fmk.getSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
   }
   }
-  
+
   GMHIKernel gmk ( &fmk );
   GMHIKernel gmk ( &fmk );
   if (verbose)
   if (verbose)
-    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution
   else
   else
-    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution
 
 
   Vector y ( n );
   Vector y ( n );
   for ( uint i = 0; i < y.size(); i++ )
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
     y[i] = sin(i);
- 
+
+
+  // Test the GMHIKernel interface
   Vector alpha;
   Vector alpha;
-  
+
+  t.start();
   gmk.multiply ( alpha, y );
   gmk.multiply ( alpha, y );
-  
+  t.stop();
+  if (verbose)
+      std::cerr << "Time for kernel multiplication with GMHIKernel: " << t.getLast() << std::endl;
+
+
+  // convert data structures to test the GMHIKernelRaw interface
+  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+  transposeVectorOfVectors(dataMatrix_transposed);
+  std::vector< const NICE::SparseVector * > dataMatrix_sparse;
+  for ( std::vector< std::vector<double> >::const_iterator i = dataMatrix_transposed.begin(); i != dataMatrix_transposed.end(); i++ )
+  {
+    Vector w ( *i );
+    SparseVector *v = new SparseVector ( w );
+    dataMatrix_sparse.push_back(v);
+  }
+
+  t.start();
+  GMHIKernelRaw gmk_raw ( dataMatrix_sparse, noise );
+  t.stop();
+  if (verbose)
+    std::cerr << "Time for GMHIKernelRaw setup: " << t.getLast() << std::endl;
+
+  Vector alpha_raw;
+  t.start();
+  gmk_raw.multiply ( alpha_raw, y );
+  t.stop();
+  if (verbose)
+      std::cerr << "Time for kernel multiplication with GMHIKernelRaw: " << t.getLast() << std::endl;
+
+
+
+
+  // compute the kernel matrix multiplication exactly
   NICE::IntersectionKernelFunction<double> hikSlow;
   NICE::IntersectionKernelFunction<double> hikSlow;
-  
+
   // tic
   // tic
   time_t  slow_start = clock();
   time_t  slow_start = clock();
-  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
-  transposeVectorOfVectors(dataMatrix_transposed);
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
   //toc
   //toc
   float time_slowComputation = (float) (clock() - slow_start);
   float time_slowComputation = (float) (clock() - slow_start);
   if (verbose)
   if (verbose)
-    std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;  
+    std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;
 
 
   // tic
   // tic
   time_t  slow_sparse_start = clock();
   time_t  slow_sparse_start = clock();
@@ -163,10 +203,7 @@ void TestFastHIK::testKernelMultiplication()
   //toc
   //toc
   float time_slowComputation_usingSparsity = (float) (clock() - slow_sparse_start);
   float time_slowComputation_usingSparsity = (float) (clock() - slow_sparse_start);
   if (verbose)
   if (verbose)
-    std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;    
-
-  if ( verbose ) 
-    cerr << "K = " << K << endl;
+    std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;
 
 
   // check the trace calculation
   // check the trace calculation
   //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
   //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
@@ -175,10 +212,18 @@ void TestFastHIK::testKernelMultiplication()
   // let us compute the kernel multiplication with the slow version
   // let us compute the kernel multiplication with the slow version
   Vector alpha_slow = K*y;
   Vector alpha_slow = K*y;
 
 
-  if (verbose)
-    std::cerr << "Sparse multiplication [alpha, alpha_slow]: " << std::endl <<  alpha << std::endl << alpha_slow << std::endl << std::endl;
-  
+  if (b_debug)
+    std::cerr << "Sparse multiplication [alpha, alpha_slow, alpha_raw]: " << std::endl <<  alpha << std::endl << alpha_slow << std::endl << alpha_raw << std::endl << std::endl;
+
   CPPUNIT_ASSERT_DOUBLES_EQUAL((alpha-alpha_slow).normL1(), 0.0, 1e-8);
   CPPUNIT_ASSERT_DOUBLES_EQUAL((alpha-alpha_slow).normL1(), 0.0, 1e-8);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL((alpha_raw-alpha_slow).normL1(), 0.0, 1e-8);
+
+
+  Vector gmk_diag;
+  Vector gmk_raw_diag;
+  gmk.getDiagonalElements(gmk_diag);
+  gmk_raw.getDiagonalElements(gmk_raw_diag);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL((gmk_diag - gmk_raw_diag).normL1(), 0.0, 1e-8);
 
 
   // test the case, where we first transform and then use the multiply stuff
   // test the case, where we first transform and then use the multiply stuff
   NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
   NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
@@ -186,6 +231,7 @@ void TestFastHIK::testKernelMultiplication()
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   ParameterizedFunction *pf = new PFAbsExp( 1.2 );
   ParameterizedFunction *pf = new PFAbsExp( 1.2 );
   fmk.applyFunctionToFeatureMatrix( pf );
   fmk.applyFunctionToFeatureMatrix( pf );
+
 //   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 //   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 
 
   Vector galpha;
   Vector galpha;
@@ -196,18 +242,20 @@ void TestFastHIK::testKernelMultiplication()
   CPPUNIT_ASSERT_DOUBLES_EQUAL((galpha-galpha_slow).normL1(), 0.0, 1e-8);
   CPPUNIT_ASSERT_DOUBLES_EQUAL((galpha-galpha_slow).normL1(), 0.0, 1e-8);
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplication done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelMultiplication done ===================== " << std::endl;
+
+  delete pf;
 }
 }
 
 
-void TestFastHIK::testKernelMultiplicationFast() 
+void TestFastHIK::testKernelMultiplicationFast()
 {
 {
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast ===================== " << std::endl;
-  
+
   NICE::Quantization * q_gen;
   NICE::Quantization * q_gen;
-  q_gen = new Quantization1DAequiDist0To1 ( numBins );  
-  
+  q_gen = new Quantization1DAequiDist0To1 ( numBins );
+
   NICE::Quantization * q;
   NICE::Quantization * q;
-  q = new Quantization1DAequiDist0To1 ( 2*numBins -1 );   
+  q = new Quantization1DAequiDist0To1 ( 2*numBins -1 );
 
 
   // data is generated, such that there is no approximation error
   // data is generated, such that there is no approximation error
   vector< vector<double> > dataMatrix;
   vector< vector<double> > dataMatrix;
@@ -225,49 +273,41 @@ void TestFastHIK::testKernelMultiplicationFast()
 
 
     dataMatrix.push_back(v);
     dataMatrix.push_back(v);
   }
   }
-  
-  if ( verbose ) {
-    cerr << "data matrix: " << endl;
-    printMatrix ( dataMatrix );
-    cerr << endl;
-  }
 
 
   double noise = 1.0;
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise );
   FastMinKernel fmk ( dataMatrix, noise );
-  
+
   GMHIKernel gmk ( &fmk );
   GMHIKernel gmk ( &fmk );
   if (verbose)
   if (verbose)
-    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution
   else
   else
-    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution
 
 
   Vector y ( n );
   Vector y ( n );
   for ( uint i = 0; i < y.size(); i++ )
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
     y[i] = sin(i);
-   
+
   ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
   ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
   GMHIKernel gmkFast ( &fmk, pf, q );
   GMHIKernel gmkFast ( &fmk, pf, q );
 
 
+
 //   pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 //   pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
-    
+
   Vector alpha;
   Vector alpha;
-  
+
   gmk.multiply ( alpha, y );
   gmk.multiply ( alpha, y );
-  
+
   Vector alphaFast;
   Vector alphaFast;
-  
+
   gmkFast.multiply ( alphaFast, y );
   gmkFast.multiply ( alphaFast, y );
-  
+
   NICE::IntersectionKernelFunction<double> hikSlow;
   NICE::IntersectionKernelFunction<double> hikSlow;
-  
+
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   transposeVectorOfVectors(dataMatrix_transposed);
   transposeVectorOfVectors(dataMatrix_transposed);
 
 
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
 
 
-  if ( verbose ) 
-    cerr << "K = " << K << endl;
-
   // check the trace calculation
   // check the trace calculation
   //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
   //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
   CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
   CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
@@ -275,9 +315,9 @@ void TestFastHIK::testKernelMultiplicationFast()
   // let us compute the kernel multiplication with the slow version
   // let us compute the kernel multiplication with the slow version
   Vector alpha_slow = K*y;
   Vector alpha_slow = K*y;
 
 
-  if ( verbose )
+  if ( b_debug )
     std::cerr << "Sparse multiplication [alpha, alphaFast, alpha_slow]: " << std::endl <<  alpha << std::endl << alphaFast << std::endl << alpha_slow << std::endl << std::endl;
     std::cerr << "Sparse multiplication [alpha, alphaFast, alpha_slow]: " << std::endl <<  alpha << std::endl << alphaFast << std::endl << alpha_slow << std::endl << std::endl;
- 
+
   CPPUNIT_ASSERT_DOUBLES_EQUAL(0.0, (alphaFast-alpha_slow).normL1(), 1e-8);
   CPPUNIT_ASSERT_DOUBLES_EQUAL(0.0, (alphaFast-alpha_slow).normL1(), 1e-8);
 
 
   // test the case, where we first transform and then use the multiply stuff
   // test the case, where we first transform and then use the multiply stuff
@@ -286,36 +326,37 @@ void TestFastHIK::testKernelMultiplicationFast()
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   pf->parameters()[0] = 1.2;
   pf->parameters()[0] = 1.2;
   fmk.applyFunctionToFeatureMatrix( pf );
   fmk.applyFunctionToFeatureMatrix( pf );
-//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 
 
   Vector galphaFast;
   Vector galphaFast;
   gmkFast.multiply ( galphaFast, y );
   gmkFast.multiply ( galphaFast, y );
-  
+
   Vector galpha;
   Vector galpha;
-  
+
   gmk.multiply ( galpha, y );
   gmk.multiply ( galpha, y );
 
 
   Vector galpha_slow = gK * y;
   Vector galpha_slow = gK * y;
-  
-  if (verbose)
+
+  if ( b_debug )
     std::cerr << "Sparse multiplication [galpha, galphaFast, galpha_slow]: " << std::endl <<  galpha << std::endl << galphaFast << std::endl << galpha_slow << std::endl << std::endl;
     std::cerr << "Sparse multiplication [galpha, galphaFast, galpha_slow]: " << std::endl <<  galpha << std::endl << galphaFast << std::endl << galpha_slow << std::endl << std::endl;
 
 
   // clean-up
   // clean-up
   delete q_gen;
   delete q_gen;
   delete q;
   delete q;
-  
+
   // final assertion
   // final assertion
   CPPUNIT_ASSERT_DOUBLES_EQUAL((galphaFast-galpha_slow).normL1(), 0.0, 1e-8);
   CPPUNIT_ASSERT_DOUBLES_EQUAL((galphaFast-galpha_slow).normL1(), 0.0, 1e-8);
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelMultiplicationFast done ===================== " << std::endl;
+
+  delete pf;
 }
 }
 
 
 
 
-void TestFastHIK::testKernelSum() 
+void TestFastHIK::testKernelSum()
 {
 {
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSum ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelSum ===================== " << std::endl;
-  
+
   vector< vector<double> > dataMatrix;
   vector< vector<double> > dataMatrix;
   generateRandomFeatures ( d, n, dataMatrix );
   generateRandomFeatures ( d, n, dataMatrix );
 
 
@@ -323,14 +364,14 @@ void TestFastHIK::testKernelSum()
   for ( uint i = 0 ; i < d; i++ )
   for ( uint i = 0 ; i < d; i++ )
   {
   {
     for ( uint k = 0; k < n; k++ )
     for ( uint k = 0; k < n; k++ )
-      if ( drand48() < sparse_prob ) 
+      if ( drand48() < sparse_prob )
       {
       {
         dataMatrix[i][k] = 0.0;
         dataMatrix[i][k] = 0.0;
         nrZeros++;
         nrZeros++;
       }
       }
   }
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     printMatrix ( dataMatrix );
     cerr << endl;
     cerr << endl;
@@ -338,13 +379,13 @@ void TestFastHIK::testKernelSum()
 
 
   double noise = 1.0;
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise );
   FastMinKernel fmk ( dataMatrix, noise );
-  
+
   Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
   Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
 
 
   NICE::VVector ASparse;
   NICE::VVector ASparse;
   NICE::VVector BSparse;
   NICE::VVector BSparse;
-  fmk.hik_prepare_alpha_multiplications ( alpha, ASparse, BSparse ); 
-  
+  fmk.hik_prepare_alpha_multiplications ( alpha, ASparse, BSparse );
+
   Vector xstar (d);
   Vector xstar (d);
   for ( uint i = 0 ; i < d ; i++ )
   for ( uint i = 0 ; i < d ; i++ )
     if ( drand48() < sparse_prob ) {
     if ( drand48() < sparse_prob ) {
@@ -353,14 +394,14 @@ void TestFastHIK::testKernelSum()
       xstar[i] = rand();
       xstar[i] = rand();
     }
     }
   SparseVector xstarSparse ( xstar );
   SparseVector xstarSparse ( xstar );
-    
+
   double betaSparse;
   double betaSparse;
   fmk.hik_kernel_sum ( ASparse, BSparse, xstarSparse, betaSparse );
   fmk.hik_kernel_sum ( ASparse, BSparse, xstarSparse, betaSparse );
-  
+
   if (verbose)
   if (verbose)
     std::cerr << "kernelSumSparse done, now do the thing without exploiting sparsity" << std::endl;
     std::cerr << "kernelSumSparse done, now do the thing without exploiting sparsity" << std::endl;
 
 
-  
+
   // checking the result
   // checking the result
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   transposeVectorOfVectors(dataMatrix_transposed);
   transposeVectorOfVectors(dataMatrix_transposed);
@@ -378,17 +419,17 @@ void TestFastHIK::testKernelSum()
   if (verbose)
   if (verbose)
     std::cerr << "difference of beta_slow and betaSparse: " << fabs(beta_slow - betaSparse) << std::endl;
     std::cerr << "difference of beta_slow and betaSparse: " << fabs(beta_slow - betaSparse) << std::endl;
   CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, betaSparse, 1e-8);
   CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, betaSparse, 1e-8);
-  
+
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSum done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelSum done ===================== " << std::endl;
 }
 }
 
 
 
 
-void TestFastHIK::testKernelSumFast() 
+void TestFastHIK::testKernelSumFast()
 {
 {
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSumFast ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelSumFast ===================== " << std::endl;
-  
+
   NICE::Quantization * q;
   NICE::Quantization * q;
   q = new Quantization1DAequiDist0To1 ( numBins );
   q = new Quantization1DAequiDist0To1 ( numBins );
 
 
@@ -408,8 +449,8 @@ void TestFastHIK::testKernelSumFast()
 
 
     dataMatrix.push_back(v);
     dataMatrix.push_back(v);
   }
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     printMatrix ( dataMatrix );
     cerr << endl;
     cerr << endl;
@@ -418,7 +459,7 @@ void TestFastHIK::testKernelSumFast()
   double noise = 1.0;
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise );
   FastMinKernel fmk ( dataMatrix, noise );
   Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
   Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
-  if ( verbose )
+  if ( b_debug )
     std::cerr << "alpha = " << alpha << endl;
     std::cerr << "alpha = " << alpha << endl;
 
 
   // generate xstar
   // generate xstar
@@ -436,10 +477,10 @@ void TestFastHIK::testKernelSumFast()
   for ( uint i = 0 ; i < d; i++ )
   for ( uint i = 0 ; i < d; i++ )
     xstar_stl[i] = xstar[i];
     xstar_stl[i] = xstar[i];
 
 
-  if ( verbose ) 
+  if ( b_debug )
     cerr << "xstar = " << xstar << endl;
     cerr << "xstar = " << xstar << endl;
- 
-  for ( double gamma = 1.0 ; gamma < 2.0; gamma += 0.5 ) 
+
+  for ( double gamma = 1.0 ; gamma < 2.0; gamma += 0.5 )
   {
   {
     if (verbose)
     if (verbose)
       std::cerr << "testing hik_kernel_sum_fast with ghik parameter: " << gamma << endl;
       std::cerr << "testing hik_kernel_sum_fast with ghik parameter: " << gamma << endl;
@@ -453,17 +494,18 @@ void TestFastHIK::testKernelSumFast()
     NICE::VVector B;
     NICE::VVector B;
     if (verbose)
     if (verbose)
       std::cerr << "fmk.hik_prepare_alpha_multiplications ( alpha, A, B ) " << std::endl;
       std::cerr << "fmk.hik_prepare_alpha_multiplications ( alpha, A, B ) " << std::endl;
-    fmk.hik_prepare_alpha_multiplications ( alpha, A, B ); 
+    fmk.hik_prepare_alpha_multiplications ( alpha, A, B );
 
 
-    if (verbose)
+    if (b_debug)
       //std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast( A, B, q )" << std::endl;
       //std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast( A, B, q )" << std::endl;
       std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast_alltogether( alpha, q, &pf )" << std::endl;
       std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast_alltogether( alpha, q, &pf )" << std::endl;
-    double *TlookupOld = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, &pf ); 
-    double *TlookupNew = fmk.hikPrepareLookupTable( alpha, q, &pf ); 
-    
+
+    double *TlookupOld = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, &pf );
+    double *TlookupNew = fmk.hikPrepareLookupTable( alpha, q, &pf );
+
     int maxAcces(numBins*d);
     int maxAcces(numBins*d);
-    
-    if (verbose)
+
+    if (b_debug)
     {
     {
       std::cerr << "TlookupOld:  " << std::endl;
       std::cerr << "TlookupOld:  " << std::endl;
       for (int i = 0; i < maxAcces; i++)
       for (int i = 0; i < maxAcces; i++)
@@ -478,20 +520,20 @@ void TestFastHIK::testKernelSumFast()
         std::cerr << TlookupNew[i] << " ";
         std::cerr << TlookupNew[i] << " ";
         if ( (i%numBins) == (numBins-1))
         if ( (i%numBins) == (numBins-1))
           std::cerr << std::endl;
           std::cerr << std::endl;
-      }    
+      }
     }
     }
-    
+
     if (verbose)
     if (verbose)
       std::cerr << "fmk.hik_kernel_sum_fast ( Tlookup, q, xstar, beta_fast )" << std::endl;
       std::cerr << "fmk.hik_kernel_sum_fast ( Tlookup, q, xstar, beta_fast )" << std::endl;
-    
+
     double beta_fast;
     double beta_fast;
     fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar, beta_fast );
     fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar, beta_fast );
-    
+
     NICE::SparseVector xstar_sparse(xstar);
     NICE::SparseVector xstar_sparse(xstar);
-    
+
     double beta_fast_sparse;
     double beta_fast_sparse;
     fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar_sparse, beta_fast_sparse );
     fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar_sparse, beta_fast_sparse );
-    
+
     double betaSparse;
     double betaSparse;
     fmk.hik_kernel_sum ( A, B, xstar_sparse, betaSparse, &pf );
     fmk.hik_kernel_sum ( A, B, xstar_sparse, betaSparse, &pf );
 
 
@@ -505,22 +547,22 @@ void TestFastHIK::testKernelSumFast()
     for ( uint i = 0 ; i < n; i++ )
     for ( uint i = 0 ; i < n; i++ )
       beta_slow += kstar_stl[i] * alpha[i];
       beta_slow += kstar_stl[i] * alpha[i];
 
 
-    if (verbose)
+    if (b_debug)
       std::cerr << "beta_slow: " << beta_slow << std::endl << "beta_fast: " << beta_fast << std::endl << "beta_fast_sparse: " << beta_fast_sparse << std::endl << "betaSparse: " << betaSparse<< std::endl;
       std::cerr << "beta_slow: " << beta_slow << std::endl << "beta_fast: " << beta_fast << std::endl << "beta_fast_sparse: " << beta_fast_sparse << std::endl << "betaSparse: " << betaSparse<< std::endl;
-    
-    // clean-up 
+
+    // clean-up
     delete [] TlookupNew;
     delete [] TlookupNew;
-    delete [] TlookupOld;    
-    
-    // final assertion    
+    delete [] TlookupOld;
+
+    // final assertion
     CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, beta_fast_sparse, 1e-8);
     CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, beta_fast_sparse, 1e-8);
-  
+
 
 
   } // for-loop
   } // for-loop
-  
+
   // clean-up
   // clean-up
   delete q;
   delete q;
-  
+
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelSumFast done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelSumFast done ===================== " << std::endl;
 
 
@@ -550,8 +592,8 @@ void TestFastHIK::testLUTUpdate()
 
 
     dataMatrix.push_back(v);
     dataMatrix.push_back(v);
   }
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     cerr << "data matrix: " << endl;
     cerr << "data matrix: " << endl;
     printMatrix ( dataMatrix );
     printMatrix ( dataMatrix );
     cerr << endl;
     cerr << endl;
@@ -559,19 +601,19 @@ void TestFastHIK::testLUTUpdate()
 
 
   double noise = 1.0;
   double noise = 1.0;
   NICE::FastMinKernel fmk ( dataMatrix, noise );
   NICE::FastMinKernel fmk ( dataMatrix, noise );
-  
+
   NICE::ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
   NICE::ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
 
 
   NICE::Vector alpha ( n );
   NICE::Vector alpha ( n );
   for ( uint i = 0; i < alpha.size(); i++ )
   for ( uint i = 0; i < alpha.size(); i++ )
     alpha[i] = sin(i);
     alpha[i] = sin(i);
-  
+
   if (verbose)
   if (verbose)
     std::cerr << "prepare LUT" << std::endl;
     std::cerr << "prepare LUT" << std::endl;
   double * T = fmk.hikPrepareLookupTable(alpha, q, pf);
   double * T = fmk.hikPrepareLookupTable(alpha, q, pf);
   if (verbose)
   if (verbose)
     std::cerr << "preparation done -- printing T" << std::endl;
     std::cerr << "preparation done -- printing T" << std::endl;
-  
+
   int maxAcces(numBins*d);
   int maxAcces(numBins*d);
   if (verbose)
   if (verbose)
   {
   {
@@ -580,21 +622,21 @@ void TestFastHIK::testLUTUpdate()
       std::cerr << T[i] << " ";
       std::cerr << T[i] << " ";
       if ( (i%numBins) == (numBins-1))
       if ( (i%numBins) == (numBins-1))
         std::cerr << std::endl;
         std::cerr << std::endl;
-    }    
+    }
   }
   }
 
 
   //lets change index 2
   //lets change index 2
   int idx(2);
   int idx(2);
   double valAlphaOld(alpha[idx]);
   double valAlphaOld(alpha[idx]);
   double valAlphaNew(1.2); //this value is definitely different from the previous one
   double valAlphaNew(1.2); //this value is definitely different from the previous one
-      
+
   Vector alphaNew(alpha);
   Vector alphaNew(alpha);
   alphaNew[idx] = valAlphaNew;
   alphaNew[idx] = valAlphaNew;
-  
+
   double * TNew = fmk.hikPrepareLookupTable(alphaNew, q, pf);
   double * TNew = fmk.hikPrepareLookupTable(alphaNew, q, pf);
   if (verbose)
   if (verbose)
     std::cerr << "calculated the new LUT, no print it: " << std::endl;
     std::cerr << "calculated the new LUT, no print it: " << std::endl;
-  
+
   if (verbose)
   if (verbose)
   {
   {
     for (int i = 0; i < maxAcces; i++)
     for (int i = 0; i < maxAcces; i++)
@@ -602,7 +644,7 @@ void TestFastHIK::testLUTUpdate()
       std::cerr << TNew[i] << " ";
       std::cerr << TNew[i] << " ";
       if ( (i%numBins) == (numBins-1))
       if ( (i%numBins) == (numBins-1))
         std::cerr << std::endl;
         std::cerr << std::endl;
-    } 
+    }
   }
   }
 
 
   if (verbose)
   if (verbose)
@@ -610,7 +652,7 @@ void TestFastHIK::testLUTUpdate()
   fmk.hikUpdateLookupTable(T, valAlphaNew, valAlphaOld, idx, q, pf );
   fmk.hikUpdateLookupTable(T, valAlphaNew, valAlphaOld, idx, q, pf );
   if (verbose)
   if (verbose)
     std::cerr << "update is done, now print the updated version: " << std::endl;
     std::cerr << "update is done, now print the updated version: " << std::endl;
-  
+
   if (verbose)
   if (verbose)
   {
   {
     for (int i = 0; i < maxAcces; i++)
     for (int i = 0; i < maxAcces; i++)
@@ -618,12 +660,12 @@ void TestFastHIK::testLUTUpdate()
       std::cerr << T[i] << " ";
       std::cerr << T[i] << " ";
       if ( (i%numBins) == (numBins-1))
       if ( (i%numBins) == (numBins-1))
         std::cerr << std::endl;
         std::cerr << std::endl;
-    } 
+    }
   }
   }
-  
-  
+
+
   bool equal = compareLUTs(T, TNew, q->getNumberOfBins()*d, 10e-8);
   bool equal = compareLUTs(T, TNew, q->getNumberOfBins()*d, 10e-8);
-  
+
   if (verbose)
   if (verbose)
   {
   {
     if (equal)
     if (equal)
@@ -643,22 +685,22 @@ void TestFastHIK::testLUTUpdate()
         if ( (i % q->getNumberOfBins()) == 0)
         if ( (i % q->getNumberOfBins()) == 0)
           std::cerr << std::endl;
           std::cerr << std::endl;
         std::cerr << TNew[i] << " ";
         std::cerr << TNew[i] << " ";
-      }     
-    
-    }    
+      }
+
+    }
   }
   }
 
 
-  
-  
+
+
   // clean-up
   // clean-up
-  delete q;  
-  delete pf;    
+  delete q;
+  delete pf;
   delete [] T;
   delete [] T;
-  delete [] TNew;  
-    
-  // final assertion        
+  delete [] TNew;
+
+  // final assertion
   CPPUNIT_ASSERT(equal == true);
   CPPUNIT_ASSERT(equal == true);
-  
+
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testLUTUpdate done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testLUTUpdate done ===================== " << std::endl;
 
 
@@ -689,24 +731,27 @@ void TestFastHIK::testLinSolve()
 
 
     dataMatrix.push_back(v);
     dataMatrix.push_back(v);
   }
   }
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     std::cerr << "data matrix: " << std::endl;
     std::cerr << "data matrix: " << std::endl;
     printMatrix ( dataMatrix );
     printMatrix ( dataMatrix );
     std::cerr << std::endl;
     std::cerr << std::endl;
   }
   }
 
 
+  // Let's test the FastMinKernel solveLin core methods.
+  // Looks like their odd convergence makes it only possible
+  // to test this for smaller cases.
   double noise = 1.0;
   double noise = 1.0;
   NICE::FastMinKernel fmk ( dataMatrix, noise );
   NICE::FastMinKernel fmk ( dataMatrix, noise );
-  
+
   NICE::ParameterizedFunction *pf = new NICE::PFAbsExp ( 1.0 );
   NICE::ParameterizedFunction *pf = new NICE::PFAbsExp ( 1.0 );
   fmk.applyFunctionToFeatureMatrix( pf );
   fmk.applyFunctionToFeatureMatrix( pf );
 
 
-  NICE::Vector y ( n );  
+  NICE::Vector y ( n );
   for ( uint i = 0; i < y.size(); i++ )
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
     y[i] = sin(i);
-  
-  NICE::Vector alpha;
+
+
   NICE::Vector alphaRandomized;
   NICE::Vector alphaRandomized;
 
 
   if ( verbose )
   if ( verbose )
@@ -715,64 +760,67 @@ void TestFastHIK::testLinSolve()
   NICE::Timer t;
   NICE::Timer t;
   t.start();
   t.start();
   //let's try to do 10.000 iterations and sample in each iteration 30 examples randomly
   //let's try to do 10.000 iterations and sample in each iteration 30 examples randomly
-  fmk.solveLin(y,alphaRandomized,q,pf,true,solveLinMaxIterations,30);
+  fmk.solveLin(y, alphaRandomized, q, pf, true, solveLinMaxIterations, 30 /* random subset */);
   //toc
   //toc
   t.stop();
   t.stop();
-  float time_randomizedSolving = t.getLast();  
+  float time_randomizedSolving = t.getLast();
   if ( verbose )
   if ( verbose )
-    std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;  
-  
+    std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;
+
   // test the case, where we first transform and then use the multiply stuff
   // test the case, where we first transform and then use the multiply stuff
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
   transposeVectorOfVectors(dataMatrix_transposed);
   transposeVectorOfVectors(dataMatrix_transposed);
-  
+
   NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.0 );
   NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.0 );
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
   NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
-  
+
   NICE::Vector K_alphaRandomized;
   NICE::Vector K_alphaRandomized;
   K_alphaRandomized.multiply(gK, alphaRandomized);
   K_alphaRandomized.multiply(gK, alphaRandomized);
-  
+
+  NICE::Vector alpha;
   if (solveLinWithoutRand)
   if (solveLinWithoutRand)
   {
   {
     if ( verbose )
     if ( verbose )
       std::cerr << "solveLin without randomization" << std::endl;
       std::cerr << "solveLin without randomization" << std::endl;
-    fmk.solveLin(y,alpha,q,pf,false,1000);
+
+    fmk.solveLin(y, alpha, q, pf, false, 30 /* max iterations */);
     Vector K_alpha;
     Vector K_alpha;
     K_alpha.multiply(gK, alpha);
     K_alpha.multiply(gK, alpha);
-    
+
     if ( verbose )
     if ( verbose )
     {
     {
       std::cerr << "now assert that K_alpha == y" << std::endl;
       std::cerr << "now assert that K_alpha == y" << std::endl;
-      std::cerr << "(K_alpha-y).normL1(): " << (K_alpha-y).normL1() << std::endl;
+      std::cerr << "(K_alpha-y).normL1(): " << (K_alpha - y).normL1() << std::endl;
     }
     }
   }
   }
-   
+
 //   std::cerr << "alpha: " << alpha << std::endl;
 //   std::cerr << "alpha: " << alpha << std::endl;
 //   std::cerr << "K_times_alpha: " << K_alpha << std::endl;
 //   std::cerr << "K_times_alpha: " << K_alpha << std::endl;
 //   std::cerr << "y: " << y << std::endl;
 //   std::cerr << "y: " << y << std::endl;
-//   
+//
 //   Vector test_alpha;
 //   Vector test_alpha;
 //   ILSConjugateGradients cgm;
 //   ILSConjugateGradients cgm;
 //   cgm.solveLin( GMStandard(gK),y,test_alpha);
 //   cgm.solveLin( GMStandard(gK),y,test_alpha);
-//   
+//
 //   K_alpha.multiply( gK, test_alpha);
 //   K_alpha.multiply( gK, test_alpha);
-//   
+//
 //   std::cerr << "test_alpha (CGM): " << test_alpha << std::endl;
 //   std::cerr << "test_alpha (CGM): " << test_alpha << std::endl;
 //   std::cerr << "K_times_alpha (CGM): " << K_alpha << std::endl;
 //   std::cerr << "K_times_alpha (CGM): " << K_alpha << std::endl;
-  
+
   if ( verbose )
   if ( verbose )
   {
   {
     std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
     std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
-    std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl; 
+    // TODO: normL1 sometimes delivered a negative number here
+    std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized - y).normL1() << std::endl;
   }
   }
-  
+
   // clean-up
   // clean-up
-  delete q;  
+  delete q;
   delete pf;
   delete pf;
-    
-  // final assertion        
-  CPPUNIT_ASSERT_DOUBLES_EQUAL((K_alphaRandomized-y).normL1(), 0.0, 1e-6);
-  
+
+  // final assertion
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(0.0, (K_alphaRandomized-y).normL1(), 1e-6);
+
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testLinSolve done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testLinSolve done ===================== " << std::endl;
 }
 }
@@ -780,15 +828,15 @@ void TestFastHIK::testLinSolve()
 void TestFastHIK::testKernelVector()
 void TestFastHIK::testKernelVector()
 {
 {
   if (verboseStartEnd)
   if (verboseStartEnd)
-    std::cerr << "================== TestFastHIK::testKernelVector ===================== " << std::endl;  
-  
+    std::cerr << "================== TestFastHIK::testKernelVector ===================== " << std::endl;
+
   std::vector< std::vector<double> > dataMatrix;
   std::vector< std::vector<double> > dataMatrix;
-  
+
   std::vector<double> dim1; dim1.push_back(0.2);dim1.push_back(0.1);dim1.push_back(0.0);dim1.push_back(0.0);dim1.push_back(0.4); dataMatrix.push_back(dim1);
   std::vector<double> dim1; dim1.push_back(0.2);dim1.push_back(0.1);dim1.push_back(0.0);dim1.push_back(0.0);dim1.push_back(0.4); dataMatrix.push_back(dim1);
   std::vector<double> dim2; dim2.push_back(0.3);dim2.push_back(0.6);dim2.push_back(1.0);dim2.push_back(0.4);dim2.push_back(0.3); dataMatrix.push_back(dim2);
   std::vector<double> dim2; dim2.push_back(0.3);dim2.push_back(0.6);dim2.push_back(1.0);dim2.push_back(0.4);dim2.push_back(0.3); dataMatrix.push_back(dim2);
   std::vector<double> dim3; dim3.push_back(0.5);dim3.push_back(0.3);dim3.push_back(0.0);dim3.push_back(0.6);dim3.push_back(0.3); dataMatrix.push_back(dim3);
   std::vector<double> dim3; dim3.push_back(0.5);dim3.push_back(0.3);dim3.push_back(0.0);dim3.push_back(0.6);dim3.push_back(0.3); dataMatrix.push_back(dim3);
-  
-  if ( verbose ) {
+
+  if ( b_debug ) {
     std::cerr << "data matrix: " << std::endl;
     std::cerr << "data matrix: " << std::endl;
     printMatrix ( dataMatrix );
     printMatrix ( dataMatrix );
     std::cerr << endl;
     std::cerr << endl;
@@ -796,13 +844,13 @@ void TestFastHIK::testKernelVector()
 
 
   double noise = 1.0;
   double noise = 1.0;
   FastMinKernel fmk ( dataMatrix, noise, b_debug );
   FastMinKernel fmk ( dataMatrix, noise, b_debug );
-  
+
 
 
   std::vector<double> xStar; xStar.push_back(0.2);xStar.push_back(0.7);xStar.push_back(0.1);
   std::vector<double> xStar; xStar.push_back(0.2);xStar.push_back(0.7);xStar.push_back(0.1);
   NICE::Vector xStarVec (xStar);
   NICE::Vector xStarVec (xStar);
   std::vector<double> x2; x2.push_back(0.7);x2.push_back(0.3);xStar.push_back(0.0);
   std::vector<double> x2; x2.push_back(0.7);x2.push_back(0.3);xStar.push_back(0.0);
   NICE::Vector x2Vec (x2);
   NICE::Vector x2Vec (x2);
-  
+
   NICE::SparseVector xStarsparse( xStarVec );
   NICE::SparseVector xStarsparse( xStarVec );
   NICE::SparseVector x2sparse( x2Vec );
   NICE::SparseVector x2sparse( x2Vec );
 
 
@@ -812,35 +860,35 @@ void TestFastHIK::testKernelVector()
     fmk.store ( std::cerr );
     fmk.store ( std::cerr );
     xStarsparse.store ( std::cerr );
     xStarsparse.store ( std::cerr );
   }
   }
-  
+
   NICE::Vector k1;
   NICE::Vector k1;
   fmk.hikComputeKernelVector( xStarsparse, k1 );
   fmk.hikComputeKernelVector( xStarsparse, k1 );
 
 
-  
+
   NICE::Vector k2;
   NICE::Vector k2;
   fmk.hikComputeKernelVector( x2sparse, k2 );
   fmk.hikComputeKernelVector( x2sparse, k2 );
-   
+
   NICE::Vector k1GT(5); k1GT[0] = 0.6; k1GT[1] = 0.8; k1GT[2] = 0.7; k1GT[3] = 0.5; k1GT[4] = 0.6;
   NICE::Vector k1GT(5); k1GT[0] = 0.6; k1GT[1] = 0.8; k1GT[2] = 0.7; k1GT[3] = 0.5; k1GT[4] = 0.6;
   NICE::Vector k2GT(5); k2GT[0] = 0.5; k2GT[1] = 0.4; k2GT[2] = 0.3; k2GT[3] = 0.3; k2GT[4] = 0.7;
   NICE::Vector k2GT(5); k2GT[0] = 0.5; k2GT[1] = 0.4; k2GT[2] = 0.3; k2GT[3] = 0.3; k2GT[4] = 0.7;
-  
-  if (verbose)
+
+  if (b_debug)
   {
   {
     std::cerr << "k1: " << k1 << std::endl;
     std::cerr << "k1: " << k1 << std::endl;
     std::cerr << "GT: " << k1GT << std::endl;
     std::cerr << "GT: " << k1GT << std::endl;
     std::cerr << "k2: " << k2 << std::endl;
     std::cerr << "k2: " << k2 << std::endl;
     std::cerr << "GT: " << k2GT << std::endl;
     std::cerr << "GT: " << k2GT << std::endl;
   }
   }
-    
+
   for (int i = 0; i < 5; i++)
   for (int i = 0; i < 5; i++)
   {
   {
     CPPUNIT_ASSERT_DOUBLES_EQUAL(k1[i]-k1GT[i], 0.0, 1e-6);
     CPPUNIT_ASSERT_DOUBLES_EQUAL(k1[i]-k1GT[i], 0.0, 1e-6);
     CPPUNIT_ASSERT_DOUBLES_EQUAL(k2[i]-k2GT[i], 0.0, 1e-6);
     CPPUNIT_ASSERT_DOUBLES_EQUAL(k2[i]-k2GT[i], 0.0, 1e-6);
   }
   }
 
 
-  
+
   if (verboseStartEnd)
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testKernelVector done ===================== " << std::endl;
     std::cerr << "================== TestFastHIK::testKernelVector done ===================== " << std::endl;
-  
+
 }
 }
 
 
-#endif
+#endif

+ 88 - 6
tests/TestGPHIKOnlineLearnable.cpp

@@ -17,6 +17,7 @@
 
 
 // gp-hik-core includes
 // gp-hik-core includes
 #include "gp-hik-core/GPHIKClassifier.h"
 #include "gp-hik-core/GPHIKClassifier.h"
+#include "gp-hik-core/GPHIKRawClassifier.h"
 
 
 #include "TestGPHIKOnlineLearnable.h"
 #include "TestGPHIKOnlineLearnable.h"
 
 
@@ -116,6 +117,33 @@ void evaluateClassifier ( NICE::Matrix & confusionMatrix,
   }
   }
 }
 }
 
 
+void evaluateClassifierRaw ( NICE::Matrix & confusionMatrix,
+                          const NICE::GPHIKRawClassifier * classifier,
+                          const NICE::Matrix & data,
+                          const NICE::Vector & yMulti,
+                          const std::map< uint,uint > & mapClNoToIdxTrain,
+                          const std::map< uint,uint > & mapClNoToIdxTest
+                        )
+{
+  int i_loopEnd  ( (int)data.rows() );
+
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example_nonsparse ( data.getRow(i) );
+    NICE::SparseVector example (example_nonsparse);
+    NICE::SparseVector scores;
+    uint result;
+
+    // classify with incrementally trained classifier
+    classifier->classify( &example, result, scores );
+
+    uint gtlabel = mapClNoToIdxTest.find(yMulti[i])->second;
+    uint predlabel = mapClNoToIdxTrain.find(result)->second;
+    confusionMatrix( gtlabel, predlabel ) += 1.0;
+  }
+}
+
+
 void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
 void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
                                 const NICE::GPHIKClassifier * classifierScratch, 
                                 const NICE::GPHIKClassifier * classifierScratch, 
                                 const NICE::Matrix & data
                                 const NICE::Matrix & data
@@ -144,7 +172,7 @@ void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
     NICE::SparseVector::const_iterator itScoresScratch = scoresScratch.begin();
     NICE::SparseVector::const_iterator itScoresScratch = scoresScratch.begin();
     for ( ; itScores != scores.end(); itScores++, itScoresScratch++)
     for ( ; itScores != scores.end(); itScores++, itScoresScratch++)
     {
     {
-      if ( fabs( itScores->second - itScores->second ) > 10e-3)
+      if ( fabs( itScores->second - itScoresScratch->second ) > 10e-3)
       {
       {
         std::cerr << " itScores->second: " << itScores->second << " itScores->second: " << itScores->second << std::endl;
         std::cerr << " itScores->second: " << itScores->second << " itScores->second: " << itScores->second << std::endl;
         equal = false;
         equal = false;
@@ -156,6 +184,47 @@ void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
   }  
   }  
 }
 }
 
 
+void compareClassifierOutputsRaw ( const NICE::GPHIKClassifier * classifier,
+                                const NICE::GPHIKRawClassifier * classifierRaw,
+                                const NICE::Matrix & data
+                              )
+{
+  int i_loopEnd  ( (int)data.rows() );
+
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example ( data.getRow(i) );
+
+    NICE::SparseVector scores;
+    uint result;
+
+    // classify with incrementally trained classifier
+    classifier->classify( &example, result, scores );
+
+
+    NICE::SparseVector scoresRaw;
+    uint resultRaw;
+    NICE::SparseVector example_sparse(example);
+    classifierRaw->classify( &example_sparse, resultRaw, scoresRaw );
+
+
+    bool equal(true);
+    NICE::SparseVector::const_iterator itScores        = scores.begin();
+    NICE::SparseVector::const_iterator itScoresScratch = scoresRaw.begin();
+    for ( ; itScores != scores.end(); itScores++, itScoresScratch++)
+    {
+      if ( fabs( itScores->second - itScoresScratch->second ) > 10e-6)
+      {
+        std::cerr << " itScores->second: " << itScores->second << " itScores->second: " << itScores->second << std::endl;
+        equal = false;
+        break;
+      }
+    }
+
+    CPPUNIT_ASSERT_EQUAL ( equal, true );
+  }
+}
+
 void TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty()
 void TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty()
 {
 {
   if (verboseStartEnd)
   if (verboseStartEnd)
@@ -164,7 +233,7 @@ void TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty()
   NICE::Config conf;
   NICE::Config conf;
   
   
   conf.sB ( "GPHIKClassifier", "eig_verbose", false);
   conf.sB ( "GPHIKClassifier", "eig_verbose", false);
-  conf.sS ( "GPHIKClassifier", "optimization_method", "downhillsimplex");
+  conf.sS ( "GPHIKClassifier", "optimization_method", "none");
   
   
   std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
   std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
   
   
@@ -278,7 +347,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
   NICE::Config conf;
   NICE::Config conf;
   
   
   conf.sB ( "GPHIKClassifier", "eig_verbose", false);
   conf.sB ( "GPHIKClassifier", "eig_verbose", false);
-  conf.sS ( "GPHIKClassifier", "optimization_method", "downhillsimplex");
+  conf.sS ( "GPHIKClassifier", "optimization_method", "none");
+  conf.sB ( "GPHIKClassifier", "verbose", true);
   
   
   std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
   std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
   
   
@@ -335,6 +405,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
   NICE::GPHIKClassifier * classifierScratch = new NICE::GPHIKClassifier ( &conf );
   NICE::GPHIKClassifier * classifierScratch = new NICE::GPHIKClassifier ( &conf );
   classifierScratch->train ( examplesTrain, yBinTrain );
   classifierScratch->train ( examplesTrain, yBinTrain );
   
   
+  NICE::GPHIKRawClassifier * classifierScratchRaw = new NICE::GPHIKRawClassifier ( &conf );
+  classifierScratchRaw->train ( examplesTrain, yBinTrain );
     
     
   // TEST both classifiers to produce equal results
   // TEST both classifiers to produce equal results
   
   
@@ -363,17 +435,19 @@ void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
   
   
   NICE::Matrix confusionMatrix         ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
   NICE::Matrix confusionMatrix         ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
   NICE::Matrix confusionMatrixScratch  ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
   NICE::Matrix confusionMatrixScratch  ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
-  
+  NICE::Matrix confusionMatrixScratchRaw  ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
     
     
   // ------------------------------------------
   // ------------------------------------------
   // ------------- CLASSIFICATION --------------
   // ------------- CLASSIFICATION --------------
   // ------------------------------------------  
   // ------------------------------------------  
   evaluateClassifier ( confusionMatrix, classifier, dataTest, yBinTest,
   evaluateClassifier ( confusionMatrix, classifier, dataTest, yBinTest,
-                          mapClNoToIdxTrain,mapClNoToIdxTest ); 
+                          mapClNoToIdxTrain, mapClNoToIdxTest );
   
   
   evaluateClassifier ( confusionMatrixScratch, classifierScratch, dataTest, yBinTest,
   evaluateClassifier ( confusionMatrixScratch, classifierScratch, dataTest, yBinTest,
-                          mapClNoToIdxTrain,mapClNoToIdxTest );  
+                          mapClNoToIdxTrain, mapClNoToIdxTest );
   
   
+  evaluateClassifierRaw ( confusionMatrixScratchRaw, classifierScratchRaw, dataTest, yBinTest,
+                          mapClNoToIdxTrain, mapClNoToIdxTest );
     
     
   // post-process confusion matrices
   // post-process confusion matrices
   confusionMatrix.normalizeColumnsL1();
   confusionMatrix.normalizeColumnsL1();
@@ -382,20 +456,28 @@ void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
   confusionMatrixScratch.normalizeColumnsL1();
   confusionMatrixScratch.normalizeColumnsL1();
   double arrScratch ( confusionMatrixScratch.trace()/confusionMatrixScratch.cols() );
   double arrScratch ( confusionMatrixScratch.trace()/confusionMatrixScratch.cols() );
 
 
+  confusionMatrixScratchRaw.normalizeColumnsL1();
+  double arrScratchRaw ( confusionMatrixScratchRaw.trace()/confusionMatrixScratchRaw.cols() );
   
   
   if ( verbose ) 
   if ( verbose ) 
   {
   {
     std::cerr << "confusionMatrix: " << confusionMatrix  << std::endl;
     std::cerr << "confusionMatrix: " << confusionMatrix  << std::endl;
   
   
     std::cerr << "confusionMatrixScratch: " << confusionMatrixScratch << std::endl;
     std::cerr << "confusionMatrixScratch: " << confusionMatrixScratch << std::endl;
+
+    std::cerr << "confusionMatrixScratchRaw: " << confusionMatrixScratchRaw << std::endl;
   } 
   } 
   
   
   CPPUNIT_ASSERT_DOUBLES_EQUAL( arr, arrScratch, 1e-8);
   CPPUNIT_ASSERT_DOUBLES_EQUAL( arr, arrScratch, 1e-8);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( arrScratch, arrScratchRaw, 1e-8);
+
+  compareClassifierOutputsRaw(classifier, classifierScratchRaw, dataTest);
   
   
   // don't waste memory
   // don't waste memory
   
   
   delete classifier;
   delete classifier;
   delete classifierScratch;  
   delete classifierScratch;  
+  delete classifierScratchRaw;
   
   
   for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
   for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
   {
   {