Browse Source

Merge branch 'master' of dbv.inf-cv.uni-jena.de:nice/nice-gp-hik-core into incnoveltysemseg

Johannes Ruehle 11 years ago
parent
commit
2db08fe3ff

+ 72 - 35
FMKGPHyperparameterOptimization.cpp

@@ -55,7 +55,15 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
   std::map<int, NICE::Vector> binaryLabels;
   std::set<int> classesToUse;
   //TODO this could be made faster when storing the previous binary label vectors...
-  this->prepareBinaryLabels ( binaryLabels, this->labels , classesToUse );
+  
+  if ( this->b_performRegression )
+  {
+    // for regression, we are not interested in regression scores, rather than in any "label" 
+    int regressionLabel ( 1 );  
+    binaryLabels.insert ( std::pair< int, NICE::Vector> ( regressionLabel, this->labels ) );
+  }
+  else
+    this->prepareBinaryLabels ( binaryLabels, this->labels , classesToUse );
   
   if ( this->verbose )
     std::cerr << "labels.size() after increment: " << this->labels.size() << std::endl;
@@ -71,7 +79,6 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
   if ( this->verboseTime )
     std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
 
-
   t1.start();
   if ( this->b_usePreviousAlphas && ( this->previousAlphas.size() > 0) )
   {
@@ -116,7 +123,7 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
     //if we do not use previous alphas, we do not have to set up anything here
     gplike->setInitialAlphaGuess ( NULL );
   }
-  
+    
   t1.stop();
   if ( this->verboseTime )
     std::cerr << "Time used for setting up the alpha-objects: " << t1.getLast() << std::endl;
@@ -136,8 +143,7 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
   //////////////////////  //////////////////////
   //   RE-RUN THE OPTIMIZATION, IF DESIRED    //
   //////////////////////  //////////////////////    
-  
-  
+    
   if ( this->verbose )
     std::cerr << "resulting eigenvalues for first class: " << eigenMax[0] << std::endl;
   
@@ -154,7 +160,7 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
     
   if ( this->verbose )
     std::cerr << "perform optimization after increment " << std::endl;
-  
+   
   int optimizationMethodTmpCopy;
   if ( !performOptimizationAfterIncrement )
   {
@@ -163,7 +169,7 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
     optimizationMethodTmpCopy = this->optimizationMethod;
     this->optimizationMethod = OPT_NONE;
   }
-      
+  
   t1.start();
   this->performOptimization ( *gplike, parameterVectorSize);
 
@@ -206,7 +212,7 @@ void FMKGPHyperparameterOptimization::updateAfterIncrement (
 /////////////////////////////////////////////////////
 /////////////////////////////////////////////////////
 
-FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization()
+FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization( const bool & b_performRegression )
 {
   // initialize pointer variables
   pf = NULL;
@@ -227,6 +233,7 @@ FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization()
   binaryLabelNegative = -2;
   
   this->b_usePreviousAlphas = false;
+  this->b_performRegression = b_performRegression;
 }
 
 FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization ( const Config *_conf, ParameterizedFunction *_pf, FastMinKernel *_fmk, const string & _confSection )
@@ -244,7 +251,7 @@ FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization ( const Config
   binaryLabelPositive = -1;
   binaryLabelNegative = -2;  
   knownClasses.clear();
-
+  
   if ( _fmk == NULL )
     this->initialize ( _conf, _pf ); //then the confSection is also the default value
   else
@@ -287,6 +294,7 @@ void FMKGPHyperparameterOptimization::initialize ( const Config *_conf, Paramete
   }
   
   this->pf = _pf;
+ 
   
   this->verbose = _conf->gB ( _confSection, "verbose", false );
   this->verboseTime = _conf->gB ( _confSection, "verboseTime", false );
@@ -298,6 +306,9 @@ void FMKGPHyperparameterOptimization::initialize ( const Config *_conf, Paramete
     std::cerr << "|  set-up  |" << std::endl;
     std::cerr << "------------" << std::endl;
   }
+  
+  this->b_performRegression = _conf->gB ( _confSection, "b_performRegression", false );
+
 
   // this->eig = new EigValuesTRLAN();
   // My time measurements show that both methods use equal time, a comparision
@@ -584,7 +595,7 @@ void FMKGPHyperparameterOptimization::computeMatricesAndLUTs ( const GPLikelihoo
   {
     this->prepareVarianceApproximationRough();
   }
-  else if ( this->precomputedAForVarEst.size() > 0) 
+  else if ( this->nrOfEigenvaluesToConsiderForVarApprox > 0) 
   {
      this->prepareVarianceApproximationFine();
   }
@@ -713,11 +724,24 @@ void FMKGPHyperparameterOptimization::optimize ( const NICE::Vector & y )
   this->labels  = y;
   
   std::map<int, NICE::Vector> binaryLabels;
-  prepareBinaryLabels ( binaryLabels, y , knownClasses );
+  
+  if ( this->b_performRegression )
+  {
+    // for regression, we are not interested in regression scores, rather than in any "label" 
+    int regressionLabel ( 1 );  
+    binaryLabels.insert ( std::pair< int, NICE::Vector> ( regressionLabel, y ) );
+    this->knownClasses.clear();
+    this->knownClasses.insert ( regressionLabel );
+  }
+  else
+  {
+    this->prepareBinaryLabels ( binaryLabels, y , knownClasses );    
+  }
   
   //now call the main function :)
   this->optimize(binaryLabels);
 }
+
   
 void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & binaryLabels )
 {
@@ -837,9 +861,10 @@ void FMKGPHyperparameterOptimization::prepareVarianceApproximationRough()
 
 void FMKGPHyperparameterOptimization::prepareVarianceApproximationFine()
 {
-  if ( this->eigenMax.size() != this->nrOfEigenvaluesToConsiderForVarApprox) 
+  if ( this->eigenMax.size() < (uint) this->nrOfEigenvaluesToConsiderForVarApprox ) 
   {
-    std::cerr << "not enough eigenvectors computed for fine approximation of predictive variance. Compute missing ones!" << std::endl;
+    std::cerr << "not enough eigenvectors computed for fine approximation of predictive variance. " <<std::endl;
+    std::cerr << "Current number of EV: " <<  this->eigenMax.size() << " but required: " << (uint) this->nrOfEigenvaluesToConsiderForVarApprox << std::endl;
     this->updateEigenDecomposition(  this->nrOfEigenvaluesToConsiderForVarApprox ); 
   }
 }
@@ -860,7 +885,7 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar
     double beta;
 
     if ( q != NULL ) {
-      map<int, double *>::const_iterator j = precomputedT.find ( classno );
+      std::map<int, double *>::const_iterator j = precomputedT.find ( classno );
       double *T = j->second;
       fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
     } else {
@@ -886,11 +911,15 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar
   { // multi-class classification
     return scores.maxElement();
   }
-  else
-  {  // binary setting    
+  else if ( this->knownClasses.size() == 2 ) // binary setting
+  {      
     scores[binaryLabelNegative] = -scores[binaryLabelPositive];     
     return scores[ binaryLabelPositive ] <= 0.0 ? binaryLabelNegative : binaryLabelPositive;
   }
+  else //OCC or regression setting
+  {
+    return 1;
+  }
 }
 
 int FMKGPHyperparameterOptimization::classify ( const NICE::Vector & xstar, NICE::SparseVector & scores ) const
@@ -935,12 +964,15 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::Vector & xstar, NICE
   { // multi-class classification
     return scores.maxElement();
   }
-  else 
-  { // binary setting
-   
+  else if ( this->knownClasses.size() == 2 ) // binary setting
+  {      
     scores[binaryLabelNegative] = -scores[binaryLabelPositive];     
     return scores[ binaryLabelPositive ] <= 0.0 ? binaryLabelNegative : binaryLabelPositive;
   }
+  else //OCC or regression setting
+  {
+    return 1;
+  }
 }
 
     //////////////////////////////////////////
@@ -1793,9 +1825,9 @@ void FMKGPHyperparameterOptimization::clear ( ) {};
 ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
 
 void FMKGPHyperparameterOptimization::addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement
-			   )
+                                                  const double & label, 
+                                                  const bool & performOptimizationAfterIncrement
+                                                )
 {
   if ( this->verbose )
     std::cerr << " --- FMKGPHyperparameterOptimization::addExample --- " << std::endl;  
@@ -1807,7 +1839,7 @@ void FMKGPHyperparameterOptimization::addExample( const NICE::SparseVector * exa
   
   this->labels.append ( label );
   //have we seen this class already?
-  if ( this->knownClasses.find( label ) == this->knownClasses.end() )
+  if ( !this->b_performRegression && ( this->knownClasses.find( label ) == this->knownClasses.end() ) )
   {
     this->knownClasses.insert( label );
     newClasses.insert( label );
@@ -1851,9 +1883,9 @@ void FMKGPHyperparameterOptimization::addExample( const NICE::SparseVector * exa
 }
 
 void FMKGPHyperparameterOptimization::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement
-				    )
+                                                           const NICE::Vector & newLabels,
+                                                           const bool & performOptimizationAfterIncrement
+                                                         )
 {
   if ( this->verbose )
     std::cerr << " --- FMKGPHyperparameterOptimization::addMultipleExamples --- " << std::endl;  
@@ -1865,16 +1897,21 @@ void FMKGPHyperparameterOptimization::addMultipleExamples( const std::vector< co
   
   this->labels.append ( newLabels );
   //have we seen this class already?
-  for ( NICE::Vector::const_iterator vecIt = newLabels.begin(); 
-       vecIt != newLabels.end(); vecIt++
-      )
-  {  
-      if ( this->knownClasses.find( *vecIt ) == this->knownClasses.end() )
-    {
-      this->knownClasses.insert( *vecIt );
-      newClasses.insert( *vecIt );
-    } 
+  if ( !this->b_performRegression)
+  {
+    for ( NICE::Vector::const_iterator vecIt = newLabels.begin(); 
+	vecIt != newLabels.end(); vecIt++
+	)
+    {  
+	if ( this->knownClasses.find( *vecIt ) == this->knownClasses.end() )
+      {
+	this->knownClasses.insert( *vecIt );
+	newClasses.insert( *vecIt );
+      } 
+    }
   }
+  // in a regression setting, we do not have to remember any "class labels"
+  else{}
   
   // add the new example to our data structure
   // It is necessary to do this already here and not lateron for internal reasons (see GMHIKernel for more details)

+ 221 - 90
FMKGPHyperparameterOptimization.h

@@ -1,9 +1,8 @@
 /** 
 * @file FMKGPHyperparameterOptimization.h
 * @brief Heart of the framework to set up everything, perform optimization, classification, and variance prediction (Interface)
-* @author Erik Rodner, Alexander Freytag
-* @date 01/02/2012
-
+* @author Alexander Freytag, Erik Rodner
+* @date 01-02-2012 (dd-mm-yyyy)
 */
 #ifndef _NICE_FMKGPHYPERPARAMETEROPTIMIZATIONINCLUDE
 #define _NICE_FMKGPHYPERPARAMETEROPTIMIZATIONINCLUDE
@@ -39,122 +38,206 @@ namespace NICE {
   /** 
  * @class FMKGPHyperparameterOptimization
  * @brief Heart of the framework to set up everything, perform optimization, classification, and variance prediction
- * @author Erik Rodner, Alexander Freytag
+ * @author Alexander Freytag, Erik Rodner
  */
   
 class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::OnlineLearnable
 {
   protected:
-    enum {
-      OPT_GREEDY = 0,
-      OPT_DOWNHILLSIMPLEX,
-      OPT_NONE,
-      OPT_NUMBEROFMETHODS
-    };
-
-    /** optimization method used */
-    int optimizationMethod;
-
-    /** the parameterized function we use within the minimum kernel */
-    ParameterizedFunction *pf;
-
-    /** method computing eigenvalues */
-    EigValues *eig;
-
-    /** method for solving linear equation systems */
-    IterativeLinearSolver *linsolver;
-
-    /** object which stores our sorted data and provides fast hik functions */
-    FastMinKernel *fmk;
-
-    /** object which stores our quantization object */
-    Quantization *q;
-
+    
+    /////////////////////////
+    /////////////////////////
+    // PROTECTED VARIABLES //
+    /////////////////////////
+    ///////////////////////// 
+    
+    ///////////////////////////////////
+    // output/debug related settings //   
+    ///////////////////////////////////
+    
     /** verbose flag */
     bool verbose;    
     /** verbose flag for time measurement outputs */
     bool verboseTime;        
     /** debug flag for several outputs useful for debugging*/
     bool debug;    
+    
+    //////////////////////////////////////
+    // classification related variables //
+    //////////////////////////////////////
+    
+    /** per default, we perform classification, if not stated otherwise */
+    bool b_performRegression;
+    
+    /** object storing sorted data and providing fast hik methods */
+    NICE::FastMinKernel *fmk;
+
+    /** object performing feature quantization */
+    NICE::Quantization *q;
+    
+    /** the parameterized function we use within the minimum kernel */
+    NICE::ParameterizedFunction *pf;
+
+    /** method for solving linear equation systems - needed to compute K^-1 \times y */
+    IterativeLinearSolver *linsolver;
+    
+    /** Max. number of iterations the iterative linear solver is allowed to run */
+    int ils_max_iterations;    
+    
+    /** Simple type definition for precomputation matrices used for fast classification */
+    typedef VVector PrecomputedType;
 
-    /** optimization parameters */
+    /** precomputed arrays A (1 per class) needed for classification without quantization  */
+    std::map< int, PrecomputedType > precomputedA;    
+    /** precomputed arrays B (1 per class) needed for classification without quantization  */
+    std::map< int, PrecomputedType > precomputedB;
+    
+    /** precomputed LUTs (1 per class) needed for classification with quantization  */
+    std::map< int, double * > precomputedT;  
+    
+    //! storing the labels is needed for Incremental Learning (re-optimization)
+    NICE::Vector labels; 
+    
+    //! store the class number of the positive class (i.e., larger class no), only used in binary settings
+    int binaryLabelPositive;
+    //! store the class number of the negative class (i.e., smaller class no), only used in binary settings
+    int binaryLabelNegative;
+    
+    //! contains all class numbers of the currently known classes
+    std::set<int> knownClasses;
+    
+    //! container for multiple kernel matrices (e.g., a data-containing kernel matrix (GMHIKernel) and a noise matrix (IKMNoise) )
+    NICE::IKMLinearCombination * ikmsum;    
+    
+  
+    /////////////////////////////////////
+    // optimization related parameters //
+    /////////////////////////////////////
+    
+    enum {
+      OPT_GREEDY = 0,
+      OPT_DOWNHILLSIMPLEX,
+      OPT_NONE
+    };
+
+    /** specify the optimization method used (see corresponding enum) */
+    int optimizationMethod;
+    
+    //! whether or not to optimize noise with the GP likelihood
+    bool optimizeNoise;     
+    
+    /** upper bound for hyper parameters to optimize */
     double parameterUpperBound;
+    
+    /** lower bound for hyper parameters to optimize */
     double parameterLowerBound;
+    
+        // specific to greedy optimization
+    /** step size used in grid based greedy optimization technique */
     double parameterStepSize;
-    int ils_max_iterations;
-
+    
+        // specific to downhill simplex optimization
+    /** Max. number of iterations the downhill simplex optimizer is allowed to run */
     int downhillSimplexMaxIterations;
+    
+    /** Max. time the downhill simplex optimizer is allowed to run */
     double downhillSimplexTimeLimit;
+    
+    /** Max. number of iterations the iterative linear solver is allowed to run */
     double downhillSimplexParamTol;
+    
+    
+      // likelihood computation related variables
 
-    /** whether to compute the likelihood with the usual method */
+    /** whether to compute the exact likelihood by computing the exact kernel matrix (not recommended - only for debugging/comparison purpose) */
     bool verifyApproximation;
+
+    /** method computing eigenvalues and eigenvectors*/
+    NICE::EigValues *eig;
     
-    /** number of Eigenvalues to consider in the approximation of |K|_F */
+    /** number of Eigenvalues to consider in the approximation of |K|_F used for approximating the likelihood */
     int nrOfEigenvaluesToConsider;
     
-    /** number of Eigenvalues to consider in the fine approximation of the predictive variance */
-    int nrOfEigenvaluesToConsiderForVarApprox;
-
-    typedef VVector PrecomputedType;
-
-    /** precomputed arrays and lookup tables */
-    std::map< int, PrecomputedType > precomputedA;
-    std::map< int, PrecomputedType > precomputedB;
-    std::map< int, double * > precomputedT;
-
-    PrecomputedType precomputedAForVarEst;
-    double * precomputedTForVarEst;
-
-    //! optimize noise with the GP likelihood
-    bool optimizeNoise;     
-       
     //! k largest eigenvalues of the kernel matrix (k == nrOfEigenvaluesToConsider)
     NICE::Vector eigenMax;
 
     //! eigenvectors corresponding to k largest eigenvalues (k == nrOfEigenvaluesToConsider) -- format: nxk
     NICE::Matrix eigenMaxVectors;
     
-    //! needed for optimization and variance approximation
-    IKMLinearCombination * ikmsum;
+
+    ////////////////////////////////////////////
+    // variance computation related variables //
+    ////////////////////////////////////////////
     
-    //! storing the labels is needed for Incremental Learning (re-optimization)
-    NICE::Vector labels;
+    /** number of Eigenvalues to consider in the fine approximation of the predictive variance (fine approximation only) */
+    int nrOfEigenvaluesToConsiderForVarApprox;
+    
+    /** precomputed array needed for rough variance approximation without quantization */ 
+    PrecomputedType precomputedAForVarEst;
+    
+    /** precomputed LUT needed for rough variance approximation with quantization  */
+    double * precomputedTForVarEst;    
+    
+    /////////////////////////////////////////////////////
+    // online / incremental learning related variables //
+    /////////////////////////////////////////////////////
+
+    /** whether or not to use previous alpha solutions as initialization after adding new examples*/
+    bool b_usePreviousAlphas;
+    
+    //! store alpha vectors for good initializations in the IL setting, if activated
+    std::map<int, NICE::Vector> previousAlphas;     
+
+    
+    /////////////////////////
+    /////////////////////////
+    //  PROTECTED METHODS  //
+    /////////////////////////
+    /////////////////////////
     
 
-    //! calculate binary label vectors using a multi-class label vector
+    /**
+    * @brief calculate binary label vectors using a multi-class label vector
+    * @author Alexander Freytag
+    */    
     int prepareBinaryLabels ( std::map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses);     
     
-    //! prepare the GPLike object for given binary labels and already given ikmsum-object
+    /**
+    * @brief prepare the GPLike object for given binary labels and already given ikmsum-object
+    * @author Alexander Freytag
+    */
     inline void setupGPLikelihoodApprox( GPLikelihoodApprox * & gplike, const std::map<int, NICE::Vector> & binaryLabels, uint & parameterVectorSize);    
     
-    //! update eigenvectors and eigenvalues for given ikmsum-objects and a method to compute eigenvalues
+    /**
+    * @brief update eigenvectors and eigenvalues for given ikmsum-objects and a method to compute eigenvalues
+    * @author Alexander Freytag
+    */
     inline void updateEigenDecomposition( const int & i_noEigenValues );
     
-    //! core of the optimize-functions
+    /**
+    * @brief core of the optimize-functions
+    * @author Alexander Freytag
+    */
     inline void performOptimization( GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
     
-    //! apply the optimized transformation values to the underlying features
+    /**
+    * @brief apply the optimized transformation values to the underlying features
+    * @author Alexander Freytag
+    */    
     inline void transformFeaturesWithOptimalParameters(const GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
     
-    //! build the resulting matrices A and B as well as lookup tables T for fast evaluations using the optimized parameter settings
+    /**
+    * @brief build the resulting matrices A and B as well as lookup tables T for fast evaluations using the optimized parameter settings
+    * @author Alexander Freytag
+    */
     inline void computeMatricesAndLUTs( const GPLikelihoodApprox & gplike);
     
      
-    //! store the class number of the positive class (i.e., larger class no), only used in binary settings
-    int binaryLabelPositive;
-    //! store the class number of the negative class (i.e., smaller class no), only used in binary settings
-    int binaryLabelNegative;
-    
-    //! contains all class numbers of the currently known classes
-    std::set<int> knownClasses;
-    
-    bool b_usePreviousAlphas;
-    
-    //! we store the alpha vectors for good initializations in the IL setting
-    std::map<int, NICE::Vector> previousAlphas;  
 
-    //! Update matrices (A, B, LUTs) and optionally find optimal parameters after adding (a) new example(s).  
+    /**
+    * @brief Update matrices (A, B, LUTs) and optionally find optimal parameters after adding (a) new example(s).  
+    * @author Alexander Freytag
+    */           
     void updateAfterIncrement (
       const std::set<int> newClasses,
       const bool & performOptimizationAfterIncrement = false
@@ -164,9 +247,12 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     
   public:  
     
-
-    FMKGPHyperparameterOptimization();
-    
+    /**
+    * @brief simple constructor
+    * @author Alexander Freytag
+    */
+    FMKGPHyperparameterOptimization( const bool & b_performRegression = false);
+        
     /**
     * @brief standard constructor
     *
@@ -176,27 +262,48 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     */
     FMKGPHyperparameterOptimization( const Config *conf, ParameterizedFunction *pf, FastMinKernel *fmk = NULL, const std::string & confSection = "GPHIKClassifier" );
       
-    /** simple destructor */
+    /**
+    * @brief standard destructor
+    * @author Alexander Freytag
+    */
     virtual ~FMKGPHyperparameterOptimization();
     
     ///////////////////// ///////////////////// /////////////////////
     //                         GET / SET
-    ///////////////////// ///////////////////// ///////////////////// 
+    ///////////////////// ///////////////////// /////////////////////
+    
+    /**
+    * @brief Set lower bound for hyper parameters to optimize
+    * @author Alexander Freytag
+    */    
     void setParameterUpperBound(const double & _parameterUpperBound);
+    /**
+    * @brief Set upper bound for hyper parameters to optimize
+    * @author Alexander Freytag
+    */    
     void setParameterLowerBound(const double & _parameterLowerBound);  
     
+    /**
+    * @brief Get the currently known class numbers
+    * @author Alexander Freytag
+    */    
     std::set<int> getKnownClassNumbers ( ) const;
     
     ///////////////////// ///////////////////// /////////////////////
     //                      CLASSIFIER STUFF
     ///////////////////// ///////////////////// /////////////////////  
     
+    /**
+    * @brief Set variables and parameters to default or config-specified values
+    * @author Alexander Freytag
+    */       
     void initialize( const Config *conf, ParameterizedFunction *pf, FastMinKernel *fmk = NULL, const std::string & confSection = "GPHIKClassifier" );
        
 #ifdef NICE_USELIB_MATIO
     /**
     * @brief Perform hyperparameter optimization
-    *
+    * @author Alexander Freytag
+    * 
     * @param data MATLAB data structure, like a feature matrix loaded from ImageNet
     * @param y label vector (arbitrary), will be converted into a binary label vector
     * @param positives set of positive examples (indices)
@@ -206,7 +313,8 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
 
     /**
     * @brief Perform hyperparameter optimization for GP multi-class or binary problems
-    *
+    * @author Alexander Freytag
+    * 
     * @param data MATLAB data structure, like a feature matrix loaded from ImageNet
     * @param y label vector with multi-class labels
     * @param examples mapping of example index to new index
@@ -216,6 +324,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
 
     /**
     * @brief Perform hyperparameter optimization (multi-class or binary) assuming an already initialized fmk object
+    * @author Alexander Freytag
     *
     * @param y label vector (multi-class as well as binary labels supported)
     */
@@ -226,8 +335,8 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @param binLabels vector of binary label vectors (1,-1) and corresponding class no.
     */
-    void optimize ( std::map<int, NICE::Vector> & binaryLabels );    
-    
+    void optimize ( std::map<int, NICE::Vector> & binaryLabels );  
+   
     /**
     * @brief Compute the necessary variables for appxorimations of predictive variance (LUTs), assuming an already initialized fmk object
     * @author Alexander Freytag
@@ -322,7 +431,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @author Alexander Freytag
     * @date 19-12-2013 (dd-mm-yyyy)
     * @param x input example
-     * @param predVariance contains the approximation of the predictive variance
+    * @param predVariance contains the approximation of the predictive variance
     *
     */    
     void computePredictiveVarianceApproximateFine(const NICE::Vector & x, double & predVariance ) const;      
@@ -347,23 +456,45 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     // interface specific methods for store and restore
     ///////////////////// INTERFACE PERSISTENT ///////////////////// 
     
+    /** 
+     * @brief Load current object from external file (stream)
+     * @author Alexander Freytag
+     */     
     void restore ( std::istream & is, int format = 0 );
+    
+    /** 
+     * @brief Save current object to external file (stream)
+     * @author Alexander Freytag
+     */      
     void store ( std::ostream & os, int format = 0 ) const;
+    
+    /** 
+     * @brief Clear current object
+     * @author Alexander Freytag
+     */      
     void clear ( ) ;
     
     ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
     // interface specific methods for incremental extensions
     ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////    
     
+    /** 
+     * @brief add a new example
+     * @author Alexander Freytag
+     */       
     virtual void addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement = true
-			   );
-			   
+                             const double & label, 
+                             const bool & performOptimizationAfterIncrement = true
+                           );
+
+    /** 
+     * @brief add several new examples
+     * @author Alexander Freytag
+     */    
     virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement = true
-				    );         
+                                      const NICE::Vector & newLabels,
+                                      const bool & performOptimizationAfterIncrement = true
+                                    );         
 };
 
 }

+ 6 - 6
FastMinKernel.cpp

@@ -422,25 +422,25 @@ void FastMinKernel::hikUpdateLookupTable(double * T, const double & alphaNew, co
   double diffOfAlpha(alphaNew - alphaOld);
   
   // loop through all dimensions
-  for (int dim = 0; dim < this->d; dim++)
+  for ( int dim = 0; dim < this->d; dim++ )
   {  
     double x_i ( (X_sorted(dim,idx)) );
     
     //TODO we could also check wether x_i < tol, if we would store the tol explicitely
-    if (x_i == 0.0) //nothing to do in this dimension
+    if ( x_i == 0.0 ) //nothing to do in this dimension
       continue;
 
-    //TODO we could speed up this with first do a binary search for the position where the min changes, and then do two separate for-loops
+    //TODO we could speed up this by first doing a binary search for the position where the min changes, and then do two separate for-loops
     for (uint j = 0; j < hmax; j++)
     {
         double fval;
         int q_bin = q.quantize(x_i);
-        if (q_bin > j)
+        
+        if ( q_bin > (int) j )
           fval = prototypes[j];
         else
           fval = x_i;      
         
-//       double fval = std::min(prototypes[j],x_i);      
       T[ dim*hmax + j ] += diffOfAlpha*fval;
     }
   }
@@ -900,7 +900,7 @@ double FastMinKernel::getFrobNormApprox()
       }
       secondTerm /= 3.0;
       secondTerm = pow(secondTerm, 2);
-      secondTerm *= (pow(this->n,2) - this->n);
+      secondTerm *= (this->n * ( this->n - 1 ));
       frobNormApprox += secondTerm;
       
       

+ 110 - 77
GPHIKClassifier.cpp

@@ -30,12 +30,25 @@ using namespace NICE;
 
 void GPHIKClassifier::init(const Config *conf, const string & s_confSection)
 {
-  double parameterUpperBound = conf->gD(confSection, "parameter_upper_bound", 5.0 );
-  double parameterLowerBound = conf->gD(confSection, "parameter_lower_bound", 1.0 );  
+  //copy the given config to have it accessible lateron
+  if ( this->confCopy != conf )
+  {
+    if ( this->confCopy != NULL )
+      delete this->confCopy;
+    
+    this->confCopy = new Config ( *conf );
+    //we do not want to read until end of file for restoring    
+    this->confCopy->setIoUntilEndOfFile(false);        
+  }
+  
+
+  
+  double parameterUpperBound = confCopy->gD(confSection, "parameter_upper_bound", 5.0 );
+  double parameterLowerBound = confCopy->gD(confSection, "parameter_lower_bound", 1.0 );  
 
-  this->noise = conf->gD(confSection, "noise", 0.01);
+  this->noise = confCopy->gD(confSection, "noise", 0.01);
 
-  string transform = conf->gS(confSection, "transform", "absexp" );
+  string transform = confCopy->gS(confSection, "transform", "absexp" );
   
   if (pf == NULL)
   {
@@ -57,34 +70,41 @@ void GPHIKClassifier::init(const Config *conf, const string & s_confSection)
     //we already know the pf from the restore-function
   }
   this->confSection = confSection;
-  this->verbose = conf->gB(confSection, "verbose", false);
-  this->debug = conf->gB(confSection, "debug", false);
-  this->uncertaintyPredictionForClassification = conf->gB( confSection, "uncertaintyPredictionForClassification", false );
+  this->verbose = confCopy->gB(confSection, "verbose", false);
+  this->debug = confCopy->gB(confSection, "debug", false);
+  this->uncertaintyPredictionForClassification = confCopy->gB( confSection, "uncertaintyPredictionForClassification", false );
   
-  if (confCopy != conf)
-  {
-    this->confCopy = new Config ( *conf );
-    //we do not want to read until end of file for restoring    
-    confCopy->setIoUntilEndOfFile(false);    
-  }
+
    
   //how do we approximate the predictive variance for classification uncertainty?
-  string s_varianceApproximation = conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
+  string s_varianceApproximation = confCopy->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
   if ( (s_varianceApproximation.compare("approximate_rough") == 0) || ((s_varianceApproximation.compare("1") == 0)) )
   {
     this->varianceApproximation = APPROXIMATE_ROUGH;
+    
+    //no additional eigenvalue is needed here at all.
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 0 );
   }
   else if ( (s_varianceApproximation.compare("approximate_fine") == 0) || ((s_varianceApproximation.compare("2") == 0)) )
   {
     this->varianceApproximation = APPROXIMATE_FINE;
+    
+    //security check - compute at least one eigenvalue for this approximation strategy
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", std::max( confCopy->gI(confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 ), 1) );
   }
   else if ( (s_varianceApproximation.compare("exact") == 0)  || ((s_varianceApproximation.compare("3") == 0)) )
   {
     this->varianceApproximation = EXACT;
+    
+    //no additional eigenvalue is needed here at all.
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 );    
   }
   else
   {
     this->varianceApproximation = NONE;
+    
+    //no additional eigenvalue is needed here at all.
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 );
   } 
   
   if ( this->verbose )
@@ -110,7 +130,9 @@ GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSecti
   // if no config file was given, we either restore the classifier from an external file, or run ::init with 
   // an emtpy config (using default values thereby) when calling the train-method
   if ( conf != NULL )
+  {
     this->init(conf, confSection);
+  }
 }
 
 GPHIKClassifier::~GPHIKClassifier()
@@ -161,14 +183,12 @@ void GPHIKClassifier::classify ( const SparseVector * example,  int & result, Sp
   
   scores.clear();
   
-  int classno = gphyper->classify ( *example, scores );
+  result = gphyper->classify ( *example, scores );
 
   if ( scores.size() == 0 ) {
     fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
   }
   
-  result = scores.maxElement();
-   
   if (uncertaintyPredictionForClassification)
   {
     if (varianceApproximation != NONE)
@@ -195,14 +215,12 @@ void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, Sp
   
   scores.clear();
   
-  int classno = gphyper->classify ( *example, scores );
+  result = gphyper->classify ( *example, scores );
 
   if ( scores.size() == 0 ) {
     fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
   }
-  
-  result = scores.maxElement();
-  
+    
   if (uncertaintyPredictionForClassification)
   {
     if (varianceApproximation != NONE)
@@ -225,6 +243,12 @@ void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, Sp
 /** training process */
 void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels )
 {
+  // security-check: examples and labels have to be of same size
+  if ( examples.size() != labels.size() ) 
+  {
+    fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
+  }  
+  
   if (verbose)
   {
     std::cerr << "GPHIKClassifier::train" << std::endl;
@@ -297,6 +321,18 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
 /** training process */
 void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
 { 
+  // security-check: examples and labels have to be of same size
+  for ( std::map< int, NICE::Vector >::const_iterator binLabIt = binLabels.begin();
+        binLabIt != binLabels.end();
+        binLabIt++ 
+      )
+  {
+    if ( examples.size() != binLabIt->second.size() ) 
+    {
+      fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
+    }
+  }
+  
   if (verbose)
     std::cerr << "GPHIKClassifier::train" << std::endl;
   
@@ -378,7 +414,6 @@ void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, do
     }
     case APPROXIMATE_FINE:
     {
-        std::cerr << "predict uncertainty fine" << std::endl;
       gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
       break;
     }    
@@ -411,7 +446,6 @@ void GPHIKClassifier::predictUncertainty( const NICE::Vector * example, double &
     }
     case APPROXIMATE_FINE:
     {
-        std::cerr << "predict uncertainty fine" << std::endl;
       gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
       break;
     }    
@@ -453,8 +487,8 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
     
     if ( ! this->isStartTag( tmp, "GPHIKClassifier" ) )
     {
-        std::cerr << " WARNING - attempt to restore GPHIKClassifier, but start flag " << tmp << " does not match! Aborting... " << std::endl;
-	throw;
+      std::cerr << " WARNING - attempt to restore GPHIKClassifier, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+      throw;
     }   
     
     if (pf != NULL)
@@ -490,77 +524,76 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
       tmp = this->removeStartTag ( tmp );
       
       if ( b_restoreVerbose )
-	std::cerr << " currently restore section " << tmp << " in GPHIKClassifier" << std::endl;
+        std::cerr << " currently restore section " << tmp << " in GPHIKClassifier" << std::endl;
       
       if ( tmp.compare("confSection") == 0 )
       {
         is >> confSection;        
-	is >> tmp; // end of block 
-	tmp = this->removeEndTag ( tmp );	
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("pf") == 0 )
       {
-	
-	is >> tmp; // start of block 
-	if ( this->isEndTag( tmp, "pf" ) )
-	{
-	  std::cerr << " ParameterizedFunction object can not be restored. Aborting..." << std::endl;
-	  throw;
-	} 
-	
-	std::string transform = this->removeStartTag ( tmp );
-	
-
-	if ( transform == "PFAbsExp" )
-	{
-	  this->pf = new PFAbsExp ();
-	} else if ( transform == "PFExp" ) {
-	  this->pf = new PFExp ();
-	} else {
-	  fthrow(Exception, "Transformation type is unknown " << transform);
-	}
-	
-	pf->restore(is, format);
-	
-	is >> tmp; // end of block 
-	tmp = this->removeEndTag ( tmp );	
+      
+        is >> tmp; // start of block 
+        if ( this->isEndTag( tmp, "pf" ) )
+        {
+          std::cerr << " ParameterizedFunction object can not be restored. Aborting..." << std::endl;
+          throw;
+        } 
+        
+        std::string transform = this->removeStartTag ( tmp );
+        
+
+        if ( transform == "PFAbsExp" )
+        {
+          this->pf = new PFAbsExp ();
+        } else if ( transform == "PFExp" ) {
+          this->pf = new PFExp ();
+        } else {
+          fthrow(Exception, "Transformation type is unknown " << transform);
+        }
+        
+        pf->restore(is, format);
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
       } 
       else if ( tmp.compare("ConfigCopy") == 0 )
       {
-	// possibly obsolete safety checks
-	if ( confCopy == NULL )
-	  confCopy = new Config;
-	confCopy->clear();
-	
-	
-	//we do not want to read until the end of the file
-	confCopy->setIoUntilEndOfFile( false );
-	//load every options we determined explicitely
-	confCopy->restore(is, format);
-	
-	is >> tmp; // end of block 
-	tmp = this->removeEndTag ( tmp );	
+        // possibly obsolete safety checks
+        if ( confCopy == NULL )
+          confCopy = new Config;
+        confCopy->clear();
+        
+        
+        //we do not want to read until the end of the file
+        confCopy->setIoUntilEndOfFile( false );
+        //load every options we determined explicitely
+        confCopy->restore(is, format);
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("gphyper") == 0 )
       {
-	if ( gphyper == NULL )
-	  gphyper = new NICE::FMKGPHyperparameterOptimization();
-	
-	//then, load everything that we stored explicitely,
-	// including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
-	gphyper->restore(is, format);  
-		
-	is >> tmp; // end of block 
-	tmp = this->removeEndTag ( tmp );	
+        if ( gphyper == NULL )
+          gphyper = new NICE::FMKGPHyperparameterOptimization();
+        
+        //then, load everything that we stored explicitely,
+        // including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
+        gphyper->restore(is, format);  
+          
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
       }       
       else
       {
-	std::cerr << "WARNING -- unexpected GPHIKClassifier object -- " << tmp << " -- for restoration... aborting" << std::endl;
-	throw;	
+      std::cerr << "WARNING -- unexpected GPHIKClassifier object -- " << tmp << " -- for restoration... aborting" << std::endl;
+      throw;
       }
     }
 
-	
     //load every settings as well as default options
     std::cerr << "run this->init" << std::endl;
     this->init(confCopy, confSection);    

+ 90 - 23
GPHIKClassifier.h

@@ -1,9 +1,8 @@
 /** 
 * @file GPHIKClassifier.h
-* @author Erik Rodner, Alexander Freytag
 * @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning) (Interface)
-* @date 02/01/2012
-
+* @author Alexander Freytag, Erik Rodner
+* @date 01-02-2012 (dd-mm-yyyy)
 */
 #ifndef _NICE_GPHIKCLASSIFIERINCLUDE
 #define _NICE_GPHIKCLASSIFIERINCLUDE
@@ -28,14 +27,46 @@ namespace NICE {
  /** 
  * @class GPHIKClassifier
  * @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning)
- * @author Erik Rodner, Alexander Freytag
+ * @author Alexander Freytag, Erik Rodner
  */
  
 class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
 {
 
   protected:
+    
+    /////////////////////////
+    /////////////////////////
+    // PROTECTED VARIABLES //
+    /////////////////////////
+    /////////////////////////
+    
+    // output/debug related settings
+    
+    /** verbose flag for useful output*/
+    bool verbose;
+    /** debug flag for several outputs useful for debugging*/
+    bool debug;
+    
+    // general specifications
+    
+    /** Header in configfile where variable settings are stored */
     std::string confSection;
+    /** Configuration file specifying variable settings */
+    NICE::Config *confCopy; 
+    
+    // internal objects 
+    
+    /** Main object doing all the jobs: training, classification, optimization, ... */
+    NICE::FMKGPHyperparameterOptimization *gphyper;    
+    
+    /** Possibility for transforming feature values, parameters can be optimized */
+    NICE::ParameterizedFunction *pf;    
+    
+    
+    
+    
+    /** Gaussian label noise for model regularization */
     double noise;
 
     enum VarianceApproximation{
@@ -45,39 +76,49 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
       NONE
     };
     
+    /** Which technique for variance approximations shall be used */
     VarianceApproximation varianceApproximation;
     
     /**compute the uncertainty prediction during classification?*/
     bool uncertaintyPredictionForClassification;
     
-    NICE::Config *confCopy;
-    NICE::ParameterizedFunction *pf;
-    NICE::FMKGPHyperparameterOptimization *gphyper;
-    
-    /** verbose flag for useful output*/
-    bool verbose;
-    /** debug flag for several outputs useful for debugging*/
-    bool debug;
+    /////////////////////////
+    /////////////////////////
+    //  PROTECTED METHODS  //
+    /////////////////////////
+    /////////////////////////
     
     /** 
-    * @brief classify a given example with the previously learnt model
-    * @param pe example to be classified given in a sparse representation
+    * @brief Setup internal variables and objects used
+    * @author Alexander Freytag
+    * @param conf Config file to specify variable settings
+    * @param s_confSection
     */    
     void init(const NICE::Config *conf, const std::string & s_confSection);
        
 
   public:
 
-    /** simple constructor */
+    /** 
+     * @brief standard constructor
+     * @author Alexander Freytag
+     */
     GPHIKClassifier( const NICE::Config *conf = NULL, const std::string & s_confSection = "GPHIKClassifier" );
       
-    /** simple destructor */
+    /**
+     * @brief simple destructor
+     * @author Alexander Freytag
+     */
     ~GPHIKClassifier();
     
     ///////////////////// ///////////////////// /////////////////////
     //                         GET / SET
     ///////////////////// ///////////////////// /////////////////////      
     
+    /**
+     * @brief Return currently known class numbers
+     * @author Alexander Freytag
+     */    
     std::set<int> getKnownClassNumbers ( ) const;    
    
     ///////////////////// ///////////////////// /////////////////////
@@ -146,6 +187,10 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      */
     void train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
     
+    /**
+     * @brief Clone classifier object
+     * @author Alexander Freytag
+     */    
     GPHIKClassifier *clone () const;
 
     /** 
@@ -172,8 +217,22 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
     // interface specific methods for store and restore
     ///////////////////// INTERFACE PERSISTENT /////////////////////   
     
+    /** 
+     * @brief Load classifier from external file (stream)
+     * @author Alexander Freytag
+     */     
     void restore ( std::istream & is, int format = 0 );
+    
+    /** 
+     * @brief Save classifier to external file (stream)
+     * @author Alexander Freytag
+     */     
     void store ( std::ostream & os, int format = 0 ) const;
+    
+    /** 
+     * @brief Clear classifier object
+     * @author Alexander Freytag
+     */     
     void clear ();
     
     
@@ -181,15 +240,23 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
     // interface specific methods for incremental extensions
     ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
     
+    /** 
+     * @brief add a new example
+     * @author Alexander Freytag
+     */    
     virtual void addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement = true
-			   );
-			   
+                              const double & label, 
+                              const bool & performOptimizationAfterIncrement = true
+                            );
+                          
+    /** 
+     * @brief add several new examples
+     * @author Alexander Freytag
+     */    
     virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement = true
-				    );       
+                                      const NICE::Vector & newLabels,
+                                      const bool & performOptimizationAfterIncrement = true
+                                    );       
 
 
 

+ 646 - 0
GPHIKRegression.cpp

@@ -0,0 +1,646 @@
+/** 
+* @file GPHIKRegression.cpp
+* @brief Main interface for our GP HIK regression implementation (Implementation)
+* @author Alexander Freytag
+* @date 15-01-2014 (dd-mm-yyyy)
+*/
+
+// STL includes
+#include <iostream>
+
+// NICE-core includes
+#include <core/basics/numerictools.h>
+#include <core/basics/Timer.h>
+
+// gp-hik-core includes
+#include "GPHIKRegression.h"
+#include "gp-hik-core/parameterizedFunctions/PFAbsExp.h"
+#include "gp-hik-core/parameterizedFunctions/PFExp.h"
+#include "gp-hik-core/parameterizedFunctions/PFMKL.h"
+
+using namespace std;
+using namespace NICE;
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PROTECTED METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+
+void GPHIKRegression::init(const Config *conf, const string & s_confSection)
+{
+  //copy the given config to have it accessible lateron
+  if ( this->confCopy != conf )
+  {
+    if ( this->confCopy != NULL )
+      delete this->confCopy;
+    
+    this->confCopy = new Config ( *conf );
+    //we do not want to read until end of file for restoring    
+    this->confCopy->setIoUntilEndOfFile(false);        
+  }
+  
+
+  
+  double parameterUpperBound = confCopy->gD(confSection, "parameter_upper_bound", 5.0 );
+  double parameterLowerBound = confCopy->gD(confSection, "parameter_lower_bound", 1.0 );  
+
+  this->noise = confCopy->gD(confSection, "noise", 0.01);
+
+  string transform = confCopy->gS(confSection, "transform", "absexp" );
+  
+  if (pf == NULL)
+  {
+    if ( transform == "absexp" )
+    {
+      this->pf = new PFAbsExp( 1.0, parameterLowerBound, parameterUpperBound );
+    } else if ( transform == "exp" ) {
+      this->pf = new PFExp( 1.0, parameterLowerBound, parameterUpperBound );
+    }else if ( transform == "MKL" ) {
+      //TODO generic, please :) load from a separate file or something like this!
+      std::set<int> steps; steps.insert(4000); steps.insert(6000); //specific for VISAPP
+      this->pf = new PFMKL( steps, parameterLowerBound, parameterUpperBound );
+    } else {
+      fthrow(Exception, "Transformation type is unknown " << transform);
+    }
+  }
+  else
+  {
+    //we already know the pf from the restore-function
+  }
+  this->confSection = confSection;
+  this->verbose = confCopy->gB(confSection, "verbose", false);
+  this->debug = confCopy->gB(confSection, "debug", false);
+  this->uncertaintyPredictionForRegression = confCopy->gB( confSection, "uncertaintyPredictionForRegression", false );
+  
+
+   
+  //how do we approximate the predictive variance for regression uncertainty?
+  string s_varianceApproximation = confCopy->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
+  if ( (s_varianceApproximation.compare("approximate_rough") == 0) || ((s_varianceApproximation.compare("1") == 0)) )
+  {
+    this->varianceApproximation = APPROXIMATE_ROUGH;
+    
+    //no additional eigenvalue is needed here at all.
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 0 );
+  }
+  else if ( (s_varianceApproximation.compare("approximate_fine") == 0) || ((s_varianceApproximation.compare("2") == 0)) )
+  {
+    this->varianceApproximation = APPROXIMATE_FINE;
+    
+    //security check - compute at least one eigenvalue for this approximation strategy
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", std::max( confCopy->gI(confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 ), 1) );
+  }
+  else if ( (s_varianceApproximation.compare("exact") == 0)  || ((s_varianceApproximation.compare("3") == 0)) )
+  {
+    this->varianceApproximation = EXACT;
+    
+    //no additional eigenvalue is needed here at all.
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 );    
+  }
+  else
+  {
+    this->varianceApproximation = NONE;
+    
+    //no additional eigenvalue is needed here at all.
+    this->confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 );
+  } 
+  
+  if ( this->verbose )
+    std::cerr << "varianceApproximationStrategy: " << s_varianceApproximation  << std::endl;
+}
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PUBLIC METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+GPHIKRegression::GPHIKRegression( const Config *conf, const string & s_confSection ) 
+{
+  //default settings, may be overwritten lateron
+  gphyper = NULL;
+  pf = NULL;
+  confCopy = NULL;
+  //just a default value
+  uncertaintyPredictionForRegression = false;
+  
+  this->confSection = s_confSection;
+  
+  // if no config file was given, we either restore the classifier from an external file, or run ::init with 
+  // an emtpy config (using default values thereby) when calling the train-method
+  if ( conf != NULL )
+  {
+    this->init(conf, confSection);
+  }
+}
+
+GPHIKRegression::~GPHIKRegression()
+{
+  if ( gphyper != NULL )
+    delete gphyper;
+  
+  if (pf != NULL)
+    delete pf;
+
+  if ( confCopy != NULL )
+    delete confCopy;
+}
+
+///////////////////// ///////////////////// /////////////////////
+//                         GET / SET
+///////////////////// ///////////////////// ///////////////////// 
+
+
+
+///////////////////// ///////////////////// /////////////////////
+//                      REGRESSION STUFF
+///////////////////// ///////////////////// /////////////////////
+
+void GPHIKRegression::estimate ( const SparseVector * example,  double & result ) const
+{
+  double tmpUncertainty;
+  this->estimate( example, result, tmpUncertainty );
+}
+
+void GPHIKRegression::estimate ( const NICE::Vector * example,  double & result ) const
+{
+  double tmpUncertainty;
+  this->estimate( example, result, tmpUncertainty );
+}
+
+void GPHIKRegression::estimate ( const SparseVector * example,  double & result, double & uncertainty ) const
+{
+  if (gphyper == NULL)
+     fthrow(Exception, "Regression object not trained yet -- aborting!" );
+  
+  NICE::SparseVector scores;
+  scores.clear();
+  
+  gphyper->classify ( *example, scores );
+  
+  if ( scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  }
+  
+  // the internal gphyper object returns for regression a sparse vector with a single entry only
+  result = scores.begin()->second;
+  
+  if (uncertaintyPredictionForRegression)
+  {
+    if (varianceApproximation != NONE)
+    {
+      this->predictUncertainty( example, uncertainty );
+    }  
+    else
+    {
+      //do nothing
+      uncertainty = std::numeric_limits<double>::max();
+    }
+  }
+  else
+  {
+    //do nothing
+    uncertainty = std::numeric_limits<double>::max();
+  }    
+}
+
+void GPHIKRegression::estimate ( const NICE::Vector * example,  double & result, double & uncertainty ) const
+{
+  if (gphyper == NULL)
+     fthrow(Exception, "Regression object not trained yet -- aborting!" );  
+  
+  NICE::SparseVector scores;
+  scores.clear();
+  
+  gphyper->classify ( *example, scores );
+
+  if ( scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  }
+  
+  // the internal gphyper object returns for regression a sparse vector with a single entry only  
+  result = scores.begin()->second;
+    
+  if (uncertaintyPredictionForRegression)
+  {
+    if (varianceApproximation != NONE)
+    {
+      this->predictUncertainty( example, uncertainty );
+    }  
+    else
+    {
+      //do nothing
+      uncertainty = std::numeric_limits<double>::max();
+    }
+  }
+  else
+  {
+    //do nothing
+    uncertainty = std::numeric_limits<double>::max();
+  }  
+}
+
+/** training process */
+void GPHIKRegression::train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels )
+{
+  // security-check: examples and labels have to be of same size
+  if ( examples.size() != labels.size() ) 
+  {
+    fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
+  }  
+  
+  if (verbose)
+  {
+    std::cerr << "GPHIKRegression::train" << std::endl;
+  }
+  
+  if ( this->confCopy == NULL )
+  {
+    std::cerr << "WARNING -- No config used so far, initialize values with empty config file now..." << std::endl;
+    NICE::Config tmpConfEmpty ;
+    this->init ( &tmpConfEmpty, this->confSection );
+  }
+
+  Timer t;
+  t.start();
+  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
+  
+  t.stop();
+  if (verbose)
+    std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
+  
+  if (gphyper != NULL)
+     delete gphyper;
+  
+  
+  if ( ( varianceApproximation != APPROXIMATE_FINE) )
+    confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 0);
+
+  // add flag for gphyper that only regression is performed
+  // thereby, all the binary-label-stuff should be skipped :)  
+  confCopy->sB ( confSection, "b_performRegression", true );
+  gphyper = new FMKGPHyperparameterOptimization ( confCopy, pf, fmk, confSection ); 
+
+  if (verbose)
+    cerr << "Learning ..." << endl;
+
+  // go go go
+  gphyper->optimize ( labels );
+  if (verbose)
+    std::cerr << "optimization done" << std::endl;
+  
+  if ( ( varianceApproximation != NONE ) )
+  {    
+    switch (varianceApproximation)    
+    {
+      case APPROXIMATE_ROUGH:
+      {
+        gphyper->prepareVarianceApproximationRough();
+        break;
+      }
+      case APPROXIMATE_FINE:
+      {
+        gphyper->prepareVarianceApproximationFine();
+        break;
+      }    
+      case EXACT:
+      {
+       //nothing to prepare
+        break;
+      }
+      default:
+      {
+       //nothing to prepare
+      }
+    }
+  }
+
+
+  // clean up all examples ??
+  if (verbose)
+    std::cerr << "Learning finished" << std::endl;
+}
+
+
+GPHIKRegression *GPHIKRegression::clone () const
+{
+  fthrow(Exception, "GPHIKRegression: clone() not yet implemented" );
+
+  return NULL;
+}
+  
+void GPHIKRegression::predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const
+{  
+  if (gphyper == NULL)
+     fthrow(Exception, "Regression object not trained yet -- aborting!" );  
+  
+  switch (varianceApproximation)    
+  {
+    case APPROXIMATE_ROUGH:
+    {
+      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      break;
+    }
+    case APPROXIMATE_FINE:
+    {
+      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      break;
+    }    
+    case EXACT:
+    {
+      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      break;
+    }
+    default:
+    {
+      fthrow(Exception, "GPHIKRegression - your settings disabled the variance approximation needed for uncertainty prediction.");
+    }
+  }
+}
+
+void GPHIKRegression::predictUncertainty( const NICE::Vector * example, double & uncertainty ) const
+{  
+  if (gphyper == NULL)
+     fthrow(Exception, "Regression object not trained yet -- aborting!" );  
+  
+  switch (varianceApproximation)    
+  {
+    case APPROXIMATE_ROUGH:
+    {
+      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      break;
+    }
+    case APPROXIMATE_FINE:
+    {
+      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      break;
+    }    
+    case EXACT:
+    {
+      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      break;
+    }
+    default:
+    {
+      fthrow(Exception, "GPHIKRegression - your settings disabled the variance approximation needed for uncertainty prediction.");
+    }
+  }
+}
+
+///////////////////// INTERFACE PERSISTENT /////////////////////
+// interface specific methods for store and restore
+///////////////////// INTERFACE PERSISTENT ///////////////////// 
+
+void GPHIKRegression::restore ( std::istream & is, int format )
+{
+  //delete everything we knew so far...
+  this->clear();
+  
+  bool b_restoreVerbose ( false );
+#ifdef B_RESTOREVERBOSE
+  b_restoreVerbose = true;
+#endif  
+  
+  if ( is.good() )
+  {
+    if ( b_restoreVerbose ) 
+      std::cerr << " restore GPHIKRegression" << std::endl;
+    
+    std::string tmp;
+    is >> tmp; //class name 
+    
+    if ( ! this->isStartTag( tmp, "GPHIKRegression" ) )
+    {
+      std::cerr << " WARNING - attempt to restore GPHIKRegression, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+      throw;
+    }   
+    
+    if (pf != NULL)
+    {
+      delete pf;
+      pf = NULL;
+    }
+    if ( confCopy != NULL )
+    {
+      delete confCopy;
+      confCopy = NULL;
+    }
+    if (gphyper != NULL)
+    {
+      delete gphyper;
+      gphyper = NULL;
+    }    
+    
+    is.precision (numeric_limits<double>::digits10 + 1);
+    
+    bool b_endOfBlock ( false ) ;
+    
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "GPHIKRegression" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }      
+      
+      tmp = this->removeStartTag ( tmp );
+      
+      if ( b_restoreVerbose )
+        std::cerr << " currently restore section " << tmp << " in GPHIKRegression" << std::endl;
+      
+      if ( tmp.compare("confSection") == 0 )
+      {
+        is >> confSection;        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("pf") == 0 )
+      {
+      
+        is >> tmp; // start of block 
+        if ( this->isEndTag( tmp, "pf" ) )
+        {
+          std::cerr << " ParameterizedFunction object can not be restored. Aborting..." << std::endl;
+          throw;
+        } 
+        
+        std::string transform = this->removeStartTag ( tmp );
+        
+
+        if ( transform == "PFAbsExp" )
+        {
+          this->pf = new PFAbsExp ();
+        } else if ( transform == "PFExp" ) {
+          this->pf = new PFExp ();
+        } else {
+          fthrow(Exception, "Transformation type is unknown " << transform);
+        }
+        
+        pf->restore(is, format);
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      } 
+      else if ( tmp.compare("ConfigCopy") == 0 )
+      {
+        // possibly obsolete safety checks
+        if ( confCopy == NULL )
+          confCopy = new Config;
+        confCopy->clear();
+        
+        
+        //we do not want to read until the end of the file
+        confCopy->setIoUntilEndOfFile( false );
+        //load every options we determined explicitely
+        confCopy->restore(is, format);
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("gphyper") == 0 )
+      {
+        if ( gphyper == NULL )
+          gphyper = new NICE::FMKGPHyperparameterOptimization();
+        
+        //then, load everything that we stored explicitely,
+        // including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
+        gphyper->restore(is, format);  
+          
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }       
+      else
+      {
+      std::cerr << "WARNING -- unexpected GPHIKRegression object -- " << tmp << " -- for restoration... aborting" << std::endl;
+      throw;
+      }
+    }
+
+    //load every settings as well as default options
+    std::cerr << "run this->init" << std::endl;
+    this->init(confCopy, confSection);    
+    std::cerr << "run gphyper->initialize" << std::endl;
+    gphyper->initialize ( confCopy, pf, NULL, confSection );
+  }
+  else
+  {
+    std::cerr << "GPHIKRegression::restore -- InStream not initialized - restoring not possible!" << std::endl;
+    throw;
+  }
+}
+
+void GPHIKRegression::store ( std::ostream & os, int format ) const
+{
+  if (gphyper == NULL)
+     fthrow(Exception, "Regression object not trained yet -- aborting!" );  
+  
+  if (os.good())
+  {
+    // show starting point
+    os << this->createStartTag( "GPHIKRegression" ) << std::endl;    
+    
+    os.precision (numeric_limits<double>::digits10 + 1);
+    
+    os << this->createStartTag( "confSection" ) << std::endl;
+    os << confSection << std::endl;
+    os << this->createEndTag( "confSection" ) << std::endl; 
+    
+    os << this->createStartTag( "pf" ) << std::endl;
+    pf->store(os, format);
+    os << this->createEndTag( "pf" ) << std::endl; 
+
+    os << this->createStartTag( "ConfigCopy" ) << std::endl;
+    //we do not want to read until end of file for restoring    
+    confCopy->setIoUntilEndOfFile(false);
+    confCopy->store(os,format);
+    os << this->createEndTag( "ConfigCopy" ) << std::endl; 
+    
+    os << this->createStartTag( "gphyper" ) << std::endl;
+    //store the underlying data
+    //will be done in gphyper->store(of,format)
+    //store the optimized parameter values and all that stuff
+    gphyper->store(os, format);
+    os << this->createEndTag( "gphyper" ) << std::endl;   
+    
+    
+    // done
+    os << this->createEndTag( "GPHIKRegression" ) << std::endl;    
+  }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }
+}
+
+void GPHIKRegression::clear ()
+{
+  if ( gphyper != NULL )
+  {
+    delete gphyper;
+    gphyper = NULL;
+  }
+  
+  if (pf != NULL)
+  {
+    delete pf;
+    pf = NULL;
+  }
+
+  if ( confCopy != NULL )
+  {
+    delete confCopy; 
+    confCopy = NULL;
+  } 
+}
+
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+// interface specific methods for incremental extensions
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+
+void GPHIKRegression::addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement
+			   )
+{
+  
+  if ( this->gphyper == NULL )
+  {
+    //call train method instead
+    std::cerr << "Regression object not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
+     
+    std::vector< const NICE::SparseVector *> examplesVec;
+    examplesVec.push_back ( example );
+    
+    NICE::Vector labelsVec ( 1 , label );
+    
+    this->train ( examplesVec, labelsVec );
+  }
+  else
+  {
+    this->gphyper->addExample( example, label, performOptimizationAfterIncrement );  
+  }
+}
+
+void GPHIKRegression::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement
+				    )
+{
+  //are new examples available? If not, nothing has to be done
+  if ( newExamples.size() < 1)
+    return;
+
+  if ( this->gphyper == NULL )
+  {
+    //call train method instead
+    std::cerr << "Regression object not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
+    
+    this->train ( newExamples, newLabels );    
+  }
+  else
+  {
+    this->gphyper->addMultipleExamples( newExamples, newLabels, performOptimizationAfterIncrement );     
+  }
+}

+ 251 - 0
GPHIKRegression.h

@@ -0,0 +1,251 @@
+/** 
+* @file GPHIKRegression.h
+* @brief Main interface for our GP HIK regression implementation (Interface)
+* @author Alexander Freytag
+* @date 15-01-2014 (dd-mm-yyyy)
+*/
+#ifndef _NICE_GPHIKREGRESSIONINCLUDE
+#define _NICE_GPHIKREGRESSIONINCLUDE
+
+// STL includes
+#include <string>
+#include <limits>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Persistent.h>
+// 
+#include <core/vector/SparseVectorT.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/FMKGPHyperparameterOptimization.h"
+#include "gp-hik-core/OnlineLearnable.h"
+#include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
+
+namespace NICE {
+  
+ /** 
+ * @class GPHIKRegression
+ * @brief Main interface for our GP HIK regression implementation (Interface)
+ * @author Alexander Freytag
+ */
+ 
+class GPHIKRegression : public NICE::Persistent, public NICE::OnlineLearnable
+{
+
+  protected:
+    
+    /////////////////////////
+    /////////////////////////
+    // PROTECTED VARIABLES //
+    /////////////////////////
+    /////////////////////////
+    
+    // output/debug related settings
+    
+    /** verbose flag for useful output*/
+    bool verbose;
+    /** debug flag for several outputs useful for debugging*/
+    bool debug;
+    
+    // general specifications
+    
+    /** Header in configfile where variable settings are stored */
+    std::string confSection;
+    /** Configuration file specifying variable settings */
+    NICE::Config *confCopy; 
+    
+    // internal objects 
+    
+    /** Main object doing all the jobs: training, regression, optimization, ... */
+    NICE::FMKGPHyperparameterOptimization *gphyper;    
+    
+    /** Possibility for transforming feature values, parameters can be optimized */
+    NICE::ParameterizedFunction *pf;    
+    
+    
+    
+    
+    /** Gaussian label noise for model regularization */
+    double noise;
+
+    enum VarianceApproximation{
+      APPROXIMATE_ROUGH,
+      APPROXIMATE_FINE,
+      EXACT,
+      NONE
+    };
+    
+    /** Which technique for variance approximations shall be used */
+    VarianceApproximation varianceApproximation;
+    
+    /**compute the uncertainty prediction during regression?*/
+    bool uncertaintyPredictionForRegression;
+    
+    /////////////////////////
+    /////////////////////////
+    //  PROTECTED METHODS  //
+    /////////////////////////
+    /////////////////////////
+    
+    /** 
+    * @brief Setup internal variables and objects used
+    * @author Alexander Freytag
+    * @param conf Config file to specify variable settings
+    * @param s_confSection
+    */    
+    void init(const NICE::Config *conf, const std::string & s_confSection);
+       
+
+  public:
+
+    /** 
+     * @brief standard constructor
+     * @author Alexander Freytag
+     */
+    GPHIKRegression( const NICE::Config *conf = NULL, const std::string & s_confSection = "GPHIKRegression" );
+      
+    /**
+     * @brief simple destructor
+     * @author Alexander Freytag
+     */
+    ~GPHIKRegression();
+    
+    ///////////////////// ///////////////////// /////////////////////
+    //                         GET / SET
+    ///////////////////// ///////////////////// /////////////////////      
+    
+   
+   
+    ///////////////////// ///////////////////// /////////////////////
+    //                      REGRESSION STUFF
+    ///////////////////// ///////////////////// /////////////////////      
+    
+    /** 
+     * @brief Estimate output of a given example with the previously learnt model
+     * @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param example (SparseVector) for which regression shall be performed, given in a sparse representation
+     * @param result (double) regression result
+     */        
+    void estimate ( const NICE::SparseVector * example,  double & result ) const;
+    
+    /** 
+     * @brief Estimate output of a given example with the previously learnt model
+     ** @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param example (SparseVector) for which regression shall be performed, given in a sparse representation
+     * @param result (double) regression result
+     * @param uncertainty (double*) predictive variance of the regression result, if computed
+     */    
+    void estimate ( const NICE::SparseVector * example,  double & result, double & uncertainty ) const;
+    
+    /** 
+     * @brief Estimate output of a given example with the previously learnt model
+     * NOTE: whenever possible, you should the sparse version to obtain significantly smaller computation times* 
+     * @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param example (non-sparse Vector) for which regression shall be performed, given in a non-sparse representation
+     * @param result (double) regression result
+     */        
+    void estimate ( const NICE::Vector * example,  double & result ) const;
+    
+    /** 
+     * @brief Estimate output of a given example with the previously learnt model
+     * NOTE: whenever possible, you should the sparse version to obtain significantly smaller computation times
+     * @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param example (non-sparse Vector) for which regression shall be performed, given in a non-sparse representation
+     * @param result (double)regression result
+     * @param uncertainty (double*) predictive variance of the regression result, if computed
+     */    
+    void estimate ( const NICE::Vector * example,  double & result, double & uncertainty ) const;    
+
+    /**
+     * @brief train this regression method using a given set of examples and corresponding labels
+     * @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
+     * @param labels (Vector) labels
+     */
+    void train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels );
+    
+    
+    /**
+     * @brief Clone regression object
+     * @author Alexander Freytag
+     */    
+    GPHIKRegression *clone () const;
+
+    /** 
+     * @brief prediction of regression uncertainty
+     * @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples example for which the regression uncertainty shall be predicted, given in a sparse representation
+     * @param uncertainty contains the resulting regression uncertainty
+     */       
+    void predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const;
+    
+    /** 
+     * @brief prediction of regression uncertainty
+     * @date 15-01-2014 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples example for which the regression uncertainty shall be predicted, given in a non-sparse representation
+     * @param uncertainty contains the resulting regression uncertainty
+     */       
+    void predictUncertainty( const NICE::Vector * example, double & uncertainty ) const;    
+    
+
+
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////   
+    
+    /** 
+     * @brief Load regression object from external file (stream)
+     * @author Alexander Freytag
+     */     
+    void restore ( std::istream & is, int format = 0 );
+    
+    /** 
+     * @brief Save regression object to external file (stream)
+     * @author Alexander Freytag
+     */     
+    void store ( std::ostream & os, int format = 0 ) const;
+    
+    /** 
+     * @brief Clear regression object
+     * @author Alexander Freytag
+     */     
+    void clear ();
+    
+    
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    
+    /** 
+     * @brief add a new example
+     * @author Alexander Freytag
+     */    
+    virtual void addExample( const NICE::SparseVector * example, 
+                              const double & label, 
+                              const bool & performOptimizationAfterIncrement = true
+                            );
+                          
+    /** 
+     * @brief add several new examples
+     * @author Alexander Freytag
+     */    
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+                                      const NICE::Vector & newLabels,
+                                      const bool & performOptimizationAfterIncrement = true
+                                    );       
+
+
+
+};
+
+}
+
+#endif

+ 1 - 1
IKMLinearCombination.cpp

@@ -24,7 +24,7 @@ IKMLinearCombination::~IKMLinearCombination()
 {
   if ( this->matrices.size() != 0)
   {
-    for (int i = 0; i < this->matrices.size(); i++)
+    for (int i = 0; (uint)i < this->matrices.size(); i++)
       delete this->matrices[i];
   }
 }

+ 33 - 9
OnlineLearnable.h

@@ -21,20 +21,44 @@ class OnlineLearnable {
  
   public:
     // Interface specifications
-    virtual void addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement = true
-			   ) = 0;
-			   
+    
+    /** 
+     * @brief Interface method to add a single example to the current object
+     * @author Alexander Freytag
+     * @param newExample example to be added
+     * @param newLabel corresponding class labels
+     * @param performOptimizationAfterIncrement (optional) whether or not to run a hyper parameter optimization after adding new examples
+     */    
+    virtual void addExample( const NICE::SparseVector * newExample, 
+                              const double & newLabel, 
+                              const bool & performOptimizationAfterIncrement = true
+                            ) = 0;
+
+    /** 
+     * @brief Interface method to add multiple example to the current object
+     * @author Alexander Freytag
+     * @param newExamples vector of example to be added
+     * @param newLabels vector of corresponding class labels
+     * @param performOptimizationAfterIncrement (optional) whether or not to run a hyper parameter optimization after adding new examples
+     */                                       
+                            
     virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement = true
-				    ) = 0;    
+                                      const NICE::Vector & newLabels,
+                                      const bool & performOptimizationAfterIncrement = true
+                                    ) = 0;    
 
 
-    // Provided functions and overloaded stream operators
+    /** 
+     * @brief simple destructor
+     * @author Alexander Freytag
+     */                                       
     virtual ~OnlineLearnable () {};
     
+    
+    /** 
+     * @brief simple destructor
+     * @author Alexander Freytag
+     */    
     // just to prevent senseless compiler warnings
     OnlineLearnable() {};   
 

+ 242 - 0
matlab/ConverterMatlabToNICE.cpp

@@ -0,0 +1,242 @@
+
+#include "ConverterMatlabToNICE.h"
+
+using namespace NICE;
+
+ConverterMatlabToNICE::ConverterMatlabToNICE()
+{
+}
+
+ConverterMatlabToNICE::~ConverterMatlabToNICE()
+{
+}
+
+/* Pass analyze_sparse a pointer to a sparse mxArray.  A sparse mxArray
+   only stores its nonzero elements.  The values of the nonzero elements 
+   are stored in the pr and pi arrays.  The tricky part of analyzing
+   sparse mxArray's is figuring out the indices where the nonzero
+   elements are stored.  (See the mxSetIr and mxSetJc reference pages
+   for details. */  
+std::vector< const NICE::SparseVector * > ConverterMatlabToNICE::convertSparseMatrixToNice( const mxArray *array_ptr ) const
+{
+  double   *pr;//, *pi;
+  mwIndex  *ir, *jc;
+  mwSize   col, total=0;
+  mwIndex  starting_row_index, stopping_row_index, current_row_index;
+  mwSize   i_numExamples, i_numDim;
+  
+  /* Get the starting positions of all four data arrays. */ 
+  pr = mxGetPr( array_ptr );
+  // no complex data supported here
+  // pi = mxGetPi(array_ptr);
+  ir = mxGetIr( array_ptr );
+  jc = mxGetJc( array_ptr );
+  
+  // dimenions of the matrix -> feature dimension and number of examples
+  i_numExamples = mxGetM( array_ptr );  
+  i_numDim = mxGetN( array_ptr );
+    
+  // initialize output variable -- don't use const pointers here since the content of the vectors will change 
+  // in the following loop. We reinterprete the vector lateron into a const version
+  std::vector< NICE::SparseVector * > sparseMatrix;
+  sparseMatrix.resize ( i_numExamples );
+    
+  for ( std::vector< NICE::SparseVector * >::iterator matIt = sparseMatrix.begin(); 
+        matIt != sparseMatrix.end(); matIt++)
+  {
+      *matIt = new NICE::SparseVector( i_numDim );
+  }  
+  
+  // now copy the data
+  for ( col = 0; col < i_numDim; col++ )
+  { 
+    starting_row_index = jc[col]; 
+    stopping_row_index = jc[col+1]; 
+    
+    // empty column?
+    if (starting_row_index == stopping_row_index)
+      continue;
+    else
+    {
+      for ( current_row_index = starting_row_index; 
+            current_row_index < stopping_row_index; 
+            current_row_index++
+          )
+      {
+          //note: no complex data supported her
+          sparseMatrix[ ir[current_row_index] ]->insert( std::pair<int, double>( col, pr[total++] ) );
+      } // for-loop
+      
+    }
+  } // for-loop over columns
+  
+  //NOTE
+  // Compiler doesn't know how to automatically convert
+  // std::vector<T*> to std::vector<T const*> because the way
+  // the template system works means that in theory the two may
+  // be specialised differently.  This is an explicit conversion.
+  return reinterpret_cast< std::vector< const NICE::SparseVector *> &>( sparseMatrix );
+}
+
+// b_adaptIndexMtoC: if true, dim k will be inserted as k, not as k-1 (which would be the default for  M->C)
+NICE::SparseVector ConverterMatlabToNICE::convertSparseVectorToNice(
+               const mxArray* array_ptr,
+               const bool & b_adaptIndexMtoC
+    )  const
+{
+  double   *pr, *pi;
+  mwIndex  *ir, *jc;
+  mwSize   col, total=0;
+  mwIndex  starting_row_index, stopping_row_index, current_row_index;
+  mwSize   dimy, dimx;
+  
+  /* Get the starting positions of all four data arrays. */ 
+  pr = mxGetPr( array_ptr );
+  pi = mxGetPi( array_ptr );
+  ir = mxGetIr( array_ptr );
+  jc = mxGetJc( array_ptr );
+  
+  // dimenions of the matrix -> feature dimension and number of examples
+  dimy = mxGetM( array_ptr );  
+  dimx = mxGetN( array_ptr );
+  
+  double* ptr = mxGetPr( array_ptr );
+
+  if( (dimx != 1) && (dimy != 1) )
+    mexErrMsgIdAndTxt("mexnice:error","Vector expected");
+  
+
+  NICE::SparseVector svec( std::max(dimx, dimy) );
+   
+  
+  if ( dimx > 1)
+  {
+    for ( mwSize row=0; row < dimx; row++)
+    { 
+        // empty column?
+        if (jc[row] == jc[row+1])
+        {
+          continue;
+        }
+        else
+        {
+          //note: no complex data supported her
+            double value ( pr[total++] );
+            if ( b_adaptIndexMtoC ) 
+                svec.insert( std::pair<int, double>( row+1,  value ) );
+            else
+                svec.insert( std::pair<int, double>( row,  value ) );
+        }
+    } // for loop over cols      
+  }
+  else
+  {
+    mwSize numNonZero = jc[1]-jc[0];
+    
+    for ( mwSize colNonZero=0; colNonZero < numNonZero; colNonZero++)
+    {
+        //note: no complex data supported her
+        double value ( pr[total++] );
+        if ( b_adaptIndexMtoC ) 
+            svec.insert( std::pair<int, double>( ir[colNonZero]+1, value  ) );
+        else
+            svec.insert( std::pair<int, double>( ir[colNonZero], value  ) );
+    }          
+  }
+
+  return svec;
+}
+
+NICE::Matrix ConverterMatlabToNICE::convertDoubleMatrixToNice( const mxArray* matlabMatrix ) const
+{
+  if( !mxIsDouble( matlabMatrix ) )
+    mexErrMsgIdAndTxt( "mexnice:error","Expected double in convertDoubleMatrixToNice" );
+
+  const mwSize *dims;
+  int dimx, dimy, numdims;
+  
+  //figure out dimensions
+  dims = mxGetDimensions( matlabMatrix );
+  numdims = mxGetNumberOfDimensions( matlabMatrix );
+  dimy = (int)dims[0];
+  dimx = (int)dims[1];
+  
+  double* ptr = mxGetPr( matlabMatrix );
+
+  NICE::Matrix niceMatrix(ptr, dimy, dimx, NICE::Matrix::external); 
+
+  return niceMatrix;
+}
+
+
+NICE::Vector ConverterMatlabToNICE::convertDoubleVectorToNice( const mxArray* matlabMatrix ) const
+{
+  if( !mxIsDouble( matlabMatrix ) )
+    mexErrMsgIdAndTxt( "mexnice:error","Expected double in convertDoubleVectorToNice" );
+
+  const mwSize *dims;
+  int dimx, dimy, numdims;
+  
+  //figure out dimensions
+  dims = mxGetDimensions( matlabMatrix );
+  numdims = mxGetNumberOfDimensions( matlabMatrix );
+  dimy = (int)dims[0];
+  dimx = (int)dims[1];
+  
+  double* ptr = mxGetPr( matlabMatrix );
+
+  if( (dimx != 1) && (dimy != 1) )
+    mexErrMsgIdAndTxt("mexnice:error","Vector expected");
+
+  int dim = std::max(dimx, dimy);    
+
+  NICE::Vector niceVector( dim, 0.0 );
+  
+  for( int i = 0; i < dim; i++ )
+  {
+      niceVector(i) = ptr[i];
+  }
+
+  return niceVector;
+}
+
+
+
+std::string ConverterMatlabToNICE::convertMatlabToString( const mxArray *matlabString ) const
+{
+  if( !mxIsChar( matlabString ) )
+    mexErrMsgIdAndTxt("mexnice:error","Expected string");
+
+  char *cstring = mxArrayToString( matlabString );
+  std::string s( cstring );
+  mxFree(cstring);
+  return s;
+}
+
+
+int ConverterMatlabToNICE::convertMatlabToInt32( const mxArray *matlabInt32 ) const
+{
+  if( !mxIsInt32( matlabInt32 ) )
+    mexErrMsgIdAndTxt("mexnice:error","Expected int32");
+
+  int* ptr = (int*) mxGetData( matlabInt32 );
+  return ptr[0];
+}
+
+double ConverterMatlabToNICE::convertMatlabToDouble( const mxArray *matlabDouble ) const
+{
+  if( !mxIsDouble(matlabDouble) )
+    mexErrMsgIdAndTxt("mexnice:error","Expected double");
+
+  double* ptr = (double*) mxGetData( matlabDouble );
+  return ptr[0];
+}
+
+bool ConverterMatlabToNICE::convertMatlabToBool( const mxArray *matlabBool ) const
+{
+  if( !mxIsLogical( matlabBool ) )
+    mexErrMsgIdAndTxt("mexnice:error","Expected bool");
+
+  bool* ptr = (bool*) mxGetData( matlabBool );
+  return ptr[0];
+}

+ 118 - 0
matlab/ConverterMatlabToNICE.h

@@ -0,0 +1,118 @@
+/** 
+* @file ConverterMatlabToNICE.h
+* @author Alexander Freytag
+* @brief Several methods for converting Matlab data into NICE containers (Interface)
+* @date 15-01-2014 ( dd-mm-yyyy)
+
+*/
+#ifndef _NICE_CONVERTERMATLABTONICEINCLUDE
+#define _NICE_CONVERTERMATLABTONICEINCLUDE
+
+// STL includes
+#include "mex.h"
+
+// NICE-core includes
+#include <core/vector/MatrixT.h>
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+namespace NICE {
+
+ /** 
+ * @class ConverterMatlabToNICE
+ * @author Alexander Freytag
+ * @brief Several methods for converting Matlab data into NICE containers
+ */
+
+class ConverterMatlabToNICE
+{
+
+  protected:
+  
+  public:
+
+    /**
+     * @brief Default constructor
+     **/
+    ConverterMatlabToNICE();
+
+    /**
+     *@brief Default destructor
+     **/    
+    ~ConverterMatlabToNICE();
+  
+    /**
+     * @brief Convert a sparse matlab matrix into an std::vector of NICE::SparseVectors *
+     * @TODO could be also converted into VVector!
+     * 
+     * @param array_ptr Sparse MxD Matlab matrix
+     * @return std::vector< NICE::SparseVector * >
+     **/  
+    std::vector< const NICE::SparseVector * > convertSparseMatrixToNice( const mxArray *array_ptr ) const;
+
+    /**
+     * @brief Convert a sparse 1xD Matlab matrix into a SparseVector
+     *
+     * @param array_ptr Sparse 1xD Matlab matrix
+     * @param b_adaptIndexMtoC if true, dim k will be inserted as k, not as k-1 (which would be the default for  M->C). Defaults to false.
+     * @return NICE::SparseVector
+     **/
+    NICE::SparseVector convertSparseVectorToNice(
+		  const mxArray* array_ptr,
+		  const bool & b_adaptIndexMtoC = false
+	) const;
+
+
+    /**
+     * @brief Convert a MxD Matlab matrix into a NICE::Matrix
+     *
+     * @param matlabMatrix a matlab MxD matrix
+     * @return NICE::Matrix
+     **/
+    NICE::Matrix convertDoubleMatrixToNice( const mxArray* matlabMatrix ) const;
+    
+    /**
+     * @brief Convert a 1xD Matlab matrix into a NICE::Vector
+     *
+     * @param matlabMatrix a matlab 1xD matrix
+     * @return  NICE::Vector
+     **/
+    NICE::Vector convertDoubleVectorToNice( const mxArray* matlabMatrix ) const;
+
+    /**
+     * @brief Convert a Matlab char array into an std::string
+     *
+     * @param matlabString a matlab char array variable
+     * @return std::string
+     **/
+    std::string convertMatlabToString( const mxArray *matlabString ) const;
+
+    /**
+     * @brief Convert a Matlab int32 variable into an std::int
+     *
+     * @param matlabInt32 a matlab int32 variable
+     * @return int
+     **/
+    int convertMatlabToInt32( const mxArray *matlabInt32 ) const;
+    
+    /**
+     * @brief Convert a Matlab double variable into an std::double
+     *
+     * @param matlabDouble a matlab double variable
+     * @return double
+     **/
+    double convertMatlabToDouble( const mxArray *matlabDouble ) const;
+    
+    /**
+     * @brief Convert a Matlab bool variable into an std::bool
+     *
+     * @param matlabBool a matlab bool variable
+     * @return bool
+     **/    
+    bool convertMatlabToBool( const mxArray *matlabBool ) const;
+
+};
+
+}
+
+#endif

+ 78 - 0
matlab/ConverterNICEToMatlab.cpp

@@ -0,0 +1,78 @@
+
+#include "ConverterNICEToMatlab.h"
+
+using namespace NICE;
+
+ConverterNICEToMatlab::ConverterNICEToMatlab()
+{
+}
+
+ConverterNICEToMatlab::~ConverterNICEToMatlab()
+{
+}
+
+
+// b_adaptIndexCtoM: if true, dim k will be inserted as k, not as k+1 (which would be the default for C->M)
+mxArray* ConverterNICEToMatlab::convertSparseVectorFromNice( const NICE::SparseVector & niceSvec, const bool & b_adaptIndexCtoM ) const
+{
+    mxArray * matlabSparseVec = mxCreateSparse( niceSvec.getDim() /*m*/, 1/*n*/, niceSvec.size()/*nzmax*/, mxREAL);
+    
+    // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
+    // mxCreateSparse allocates space for:
+    // 
+    // A pr array of length nzmax.
+    // A pi array of length nzmax, but only if ComplexFlag is mxCOMPLEX in C (1 in Fortran).
+    // An ir array of length nzmax.
+    // A jc array of length n+1.  
+  
+    double* prPtr = mxGetPr(matlabSparseVec);
+    mwIndex * ir = mxGetIr( matlabSparseVec );
+    
+    mwIndex * jc = mxGetJc( matlabSparseVec );
+    jc[1] = niceSvec.size(); jc[0] = 0; 
+    
+    
+    mwSize cnt = 0;
+        
+    for ( NICE::SparseVector::const_iterator myIt = niceSvec.begin(); myIt != niceSvec.end(); myIt++, cnt++ )
+    {
+        // set index
+        if ( b_adaptIndexCtoM ) 
+            ir[cnt] = myIt->first-1;
+        else
+            ir[cnt] = myIt->first;
+        
+        // set value
+        prPtr[cnt] = myIt->second;
+    }
+    
+    return matlabSparseVec;
+}
+
+mxArray* ConverterNICEToMatlab::convertMatrixFromNice( const NICE::Matrix & niceMatrix ) const
+{
+  mxArray *matlabMatrix = mxCreateDoubleMatrix( niceMatrix.rows(), niceMatrix.cols(), mxREAL );
+  double* matlabMatrixPtr = mxGetPr( matlabMatrix );
+
+  for( int i = 0; i < niceMatrix.rows(); i++ )
+  {
+    for( int j = 0; j < niceMatrix.cols(); j++ )
+    {
+      matlabMatrixPtr[i + j*niceMatrix.rows()] = niceMatrix(i,j);
+    }
+  }
+  
+  return matlabMatrix;
+}
+
+mxArray* ConverterNICEToMatlab::convertVectorFromNice( const NICE::Vector & niceVector ) const
+{
+  mxArray *matlabVector = mxCreateDoubleMatrix( niceVector.size(), 1, mxREAL );
+  double* matlabVectorPtr = mxGetPr( matlabVector );
+
+  for( int i = 0; i < niceVector.size(); i++ )
+  {
+    matlabVectorPtr[i] = niceVector[i];
+  }
+  return matlabVector;
+}

+ 80 - 0
matlab/ConverterNICEToMatlab.h

@@ -0,0 +1,80 @@
+/** 
+* @file ConverterNICEToMatlab.h
+* @author Alexander Freytag
+* @brief Several methods for converting NICE containers into Matlab data (Interface)
+* @date 15-01-2014 ( dd-mm-yyyy)
+*/
+#ifndef _NICE_CONVERTERNICETOMATLABINCLUDE
+#define _NICE_CONVERTERNICETOMATLABINCLUDE
+
+// STL includes
+#include "mex.h"
+
+// NICE-core includes
+#include <core/vector/MatrixT.h>
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+namespace NICE {
+
+ /** 
+ * @class ConverterNICEToMatlab
+ * @author Alexander Freytag
+ * @brief Several methods for converting Matlab data into NICE containers
+ */
+
+class ConverterNICEToMatlab
+{
+
+  protected:
+  
+  public:
+
+    /**
+     * @brief Default constructor
+     **/
+    ConverterNICEToMatlab();
+
+    /**
+     *@brief Default destructor
+     **/    
+    ~ConverterNICEToMatlab();
+  
+
+    /**
+     * @brief Convert a SparseVector into a Matlab 1xD sparse matrix
+     * @author Alexander Freytag
+     * @date 15-01-2014 ( dd-mm-yyyy)
+     *
+     * @param niceSvec a NIC::SparseVector
+     * @param b_adaptIndexCtoM if true, dim k will be inserted as k, not as k+1 (which would be the default for C->M) Defaults to false.
+     * @return mxArray*
+     **/
+    mxArray* convertSparseVectorFromNice( const NICE::SparseVector & niceSvec, const bool & b_adaptIndexCtoM = false ) const;
+
+    /**
+     * @brief Convert a NICE::Matrix into a full Matlab MxD matrix
+     * @author Alexander Freytag
+     * @date 15-01-2014 ( dd-mm-yyyy)
+     *
+     * @param niceMatrix a NICE::Matrix
+     * @return mxArray*
+     **/
+    mxArray* convertMatrixFromNice( const NICE::Matrix & niceMatrix ) const;
+    
+    /**
+     * @brief Convert a NICE::Vector into a full Matlab 1xD matrix
+     * @author Alexander Freytag
+     * @date 15-01-2014 ( dd-mm-yyyy)
+     *
+     * @param niceVector a NICE::Vector
+     * @return mxArray*
+     **/
+    
+    mxArray* convertVectorFromNice( const NICE::Vector & niceVector ) const;
+
+};
+
+}
+
+#endif

+ 0 - 854
matlab/GPHIK.cpp

@@ -1,854 +0,0 @@
-#include <math.h>
-#include <matrix.h>
-#include "mex.h"
-#include "classHandleMtoC.h"
-
-// NICE-core includes
-#include <core/basics/Config.h>
-#include <core/basics/Timer.h>
-#include <core/vector/MatrixT.h>
-#include <core/vector/VectorT.h>
-
-// gp-hik-core includes
-#include "gp-hik-core/GPHIKClassifier.h"
-
-using namespace std; //C basics
-using namespace NICE;  // nice-core
-
-/* Pass analyze_sparse a pointer to a sparse mxArray.  A sparse mxArray
-   only stores its nonzero elements.  The values of the nonzero elements 
-   are stored in the pr and pi arrays.  The tricky part of analyzing
-   sparse mxArray's is figuring out the indices where the nonzero
-   elements are stored.  (See the mxSetIr and mxSetJc reference pages
-   for details. */  
-std::vector< NICE::SparseVector * > convertSparseMatrixToNice(const mxArray *array_ptr)
-{
-  double  *pr;//, *pi;
-  mwIndex  *ir, *jc;
-  mwSize      col, total=0;
-  mwIndex   starting_row_index, stopping_row_index, current_row_index;
-  mwSize      i_numExamples, i_numDim;
-  
-  /* Get the starting positions of all four data arrays. */ 
-  pr = mxGetPr(array_ptr);
-//   pi = mxGetPi(array_ptr);
-  ir = mxGetIr(array_ptr);
-  jc = mxGetJc(array_ptr);
-  
-  // dimenions of the matrix -> feature dimension and number of examples
-  i_numExamples = mxGetM(array_ptr);  
-  i_numDim = mxGetN(array_ptr);
-    
-  // initialize output variable
-  std::vector< NICE::SparseVector * > sparseMatrix;
-  sparseMatrix.resize ( i_numExamples );
-    
-  for ( std::vector< NICE::SparseVector * >::iterator matIt = sparseMatrix.begin(); 
-        matIt != sparseMatrix.end(); matIt++)
-  {
-      *matIt = new NICE::SparseVector( i_numDim );
-  }  
-  
-  // now copy the data
-  for (col=0; col < i_numDim; col++)
-  { 
-    starting_row_index = jc[col]; 
-    stopping_row_index = jc[col+1]; 
-    
-    // empty column?
-    if (starting_row_index == stopping_row_index)
-      continue;
-    else
-    {
-      for ( current_row_index = starting_row_index; 
-            current_row_index < stopping_row_index; 
-	        current_row_index++)
-      {
-          //note: no complex data supported her
-          sparseMatrix[ ir[current_row_index] ]->insert( std::pair<int, double>( col, pr[total++] ) );
-      } // for-loop
-      
-    }
-  } // for-loop over columns
-  
-  return sparseMatrix;
-}
-
-
-// b_adaptIndexMtoC: if true, dim k will be inserted as k, not as k-1 (which would be the default for  M->C)
-NICE::SparseVector convertSparseVectorToNice(const mxArray* array_ptr, const bool & b_adaptIndexMtoC = false )
-{
-  double  *pr, *pi;
-  mwIndex  *ir, *jc;
-  mwSize      col, total=0;
-  mwIndex   starting_row_index, stopping_row_index, current_row_index;
-  mwSize      dimy, dimx;
-  
-  /* Get the starting positions of all four data arrays. */ 
-  pr = mxGetPr(array_ptr);
-  pi = mxGetPi(array_ptr);
-  ir = mxGetIr(array_ptr);
-  jc = mxGetJc(array_ptr);
-  
-  // dimenions of the matrix -> feature dimension and number of examples
-  dimy = mxGetM(array_ptr);  
-  dimx = mxGetN(array_ptr);
-  
-  double* ptr = mxGetPr(array_ptr);
-
-  if(dimx != 1 && dimy != 1)
-    mexErrMsgIdAndTxt("mexnice:error","Vector expected");
-  
-
-  NICE::SparseVector svec( std::max(dimx, dimy) );
-   
-  
-  if ( dimx > 1)
-  {
-    for ( mwSize row=0; row < dimx; row++)
-    { 
-        // empty column?
-        if (jc[row] == jc[row+1])
-        {
-          continue;
-        }
-        else
-        {
-          //note: no complex data supported her
-            double value ( pr[total++] );
-            if ( b_adaptIndexMtoC ) 
-                svec.insert( std::pair<int, double>( row+1,  value ) );
-            else
-                svec.insert( std::pair<int, double>( row,  value ) );
-        }
-    } // for loop over cols      
-  }
-  else
-  {
-    mwSize numNonZero = jc[1]-jc[0];
-    
-    for ( mwSize colNonZero=0; colNonZero < numNonZero; colNonZero++)
-    {
-        //note: no complex data supported her
-        double value ( pr[total++] );
-        if ( b_adaptIndexMtoC ) 
-            svec.insert( std::pair<int, double>( ir[colNonZero]+1, value  ) );
-        else
-            svec.insert( std::pair<int, double>( ir[colNonZero], value  ) );
-    }          
-  }
-
-  return svec;
-}
-
-// b_adaptIndexCtoM: if true, dim k will be inserted as k, not as k+1 (which would be the default for C->M)
-mxArray* convertSparseVectorFromNice( const NICE::SparseVector & scores, const bool & b_adaptIndexCtoM = false)
-{
-    mxArray * matlabSparseVec = mxCreateSparse( scores.getDim() /*m*/, 1/*n*/, scores.size()/*nzmax*/, mxREAL);
-    
-    // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
-    // mxCreateSparse allocates space for:
-    // 
-    // A pr array of length nzmax.
-    // A pi array of length nzmax, but only if ComplexFlag is mxCOMPLEX in C (1 in Fortran).
-    // An ir array of length nzmax.
-    // A jc array of length n+1.  
-  
-    double* prPtr = mxGetPr(matlabSparseVec);
-    mwIndex * ir = mxGetIr( matlabSparseVec );
-    
-    mwIndex * jc = mxGetJc( matlabSparseVec );
-    jc[1] = scores.size(); jc[0] = 0; 
-    
-    
-    mwSize cnt = 0;
-        
-    for ( NICE::SparseVector::const_iterator myIt = scores.begin(); myIt != scores.end(); myIt++, cnt++ )
-    {
-        // set index
-        if ( b_adaptIndexCtoM ) 
-            ir[cnt] = myIt->first-1;
-        else
-            ir[cnt] = myIt->first;
-        
-        // set value
-        prPtr[cnt] = myIt->second;
-    }
-    
-    return matlabSparseVec;
-}
-
-
-mxArray* convertMatrixFromNice(NICE::Matrix & niceMatrix)
-{
-	mxArray *matlabMatrix = mxCreateDoubleMatrix(niceMatrix.rows(),niceMatrix.cols(),mxREAL);
-	double* matlabMatrixPtr = mxGetPr(matlabMatrix);
-
-	for(int i=0; i<niceMatrix.rows(); i++)
-    {
-		for(int j=0; j<niceMatrix.cols(); j++)
-		{
-			matlabMatrixPtr[i + j*niceMatrix.rows()] = niceMatrix(i,j);
-		}
-    }
-	return matlabMatrix;
-}
-
-NICE::Matrix convertMatrixToNice(const mxArray* matlabMatrix)
-{
-	//todo: do not assume double
-
-  const mwSize *dims;
-  int dimx, dimy, numdims;
-    //figure out dimensions
-  dims = mxGetDimensions(matlabMatrix);
-  numdims = mxGetNumberOfDimensions(matlabMatrix);
-  dimy = (int)dims[0]; dimx = (int)dims[1];
-  double* ptr = mxGetPr(matlabMatrix);
-
-  NICE::Matrix niceMatrix(ptr, dimy, dimx, NICE::Matrix::external); 
-
-  return niceMatrix;
-}
-
-mxArray* convertVectorFromNice(NICE::Vector & niceVector)
-{
-	//cout << "start convertVectorFromNice" << endl;
-	mxArray *matlabVector = mxCreateDoubleMatrix(niceVector.size(), 1, mxREAL);
-	double* matlabVectorPtr = mxGetPr(matlabVector);
-
-	for(int i=0;i<niceVector.size(); i++)
-    {
-        matlabVectorPtr[i] = niceVector[i];
-    }
-	return matlabVector;
-}
-
-NICE::Vector convertVectorToNice(const mxArray* matlabMatrix)
-{
-	//todo: do not assume double
-
-  const mwSize *dims;
-  int dimx, dimy, numdims;
-    //figure out dimensions
-  dims = mxGetDimensions(matlabMatrix);
-  numdims = mxGetNumberOfDimensions(matlabMatrix);
-  dimy = (int)dims[0]; dimx = (int)dims[1];
-  double* ptr = mxGetPr(matlabMatrix);
-
-  if(dimx != 1 && dimy != 1)
-    mexErrMsgIdAndTxt("mexnice:error","Vector expected");
-
-  int dim = max(dimx, dimy);    
-
-  NICE::Vector niceVector(dim, 0.0);
-  
-  for(int i=0;i<dim;i++)
-  {
-      niceVector(i) = ptr[i];
-  }
-
-  return niceVector;
-}
-
-
-
-std::string convertMatlabToString(const mxArray *matlabString)
-{
-  if(!mxIsChar(matlabString))
-    mexErrMsgIdAndTxt("mexnice:error","Expected string");
-
-  char *cstring = mxArrayToString(matlabString);
-  std::string s(cstring);
-  mxFree(cstring);
-  return s;
-}
-
-
-int convertMatlabToInt32(const mxArray *matlabInt32)
-{
-  if(!mxIsInt32(matlabInt32))
-    mexErrMsgIdAndTxt("mexnice:error","Expected int32");
-
-  int* ptr = (int*)mxGetData(matlabInt32);
-  return ptr[0];
-}
-
-double convertMatlabToDouble(const mxArray *matlabDouble)
-{
-  if(!mxIsDouble(matlabDouble))
-    mexErrMsgIdAndTxt("mexnice:error","Expected double");
-
-  double* ptr = (double*)mxGetData(matlabDouble);
-  return ptr[0];
-}
-
-NICE::Config parseParameters(const mxArray *prhs[], int nrhs)
-{
-  NICE::Config conf;
-  
-  // if first argument is the filename of an existing config file,
-  // read the config accordingly
-  
-  int i_start ( 0 );
-  std::string variable = convertMatlabToString(prhs[i_start]);
-  if(variable == "conf")
-  {
-      conf = NICE::Config ( convertMatlabToString( prhs[i_start+1] )  );
-      i_start = i_start+2;
-  }
-  
-  // now run over all given parameter specifications
-  // and add them to the config
-  for( int i=i_start; i < nrhs; i+=2 )
-  {
-    std::string variable = convertMatlabToString(prhs[i]);
-    if(variable == "ils_verbose")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "true" && value != "false")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_verbose\'. \'true\' or \'false\' expected.");
-      if(value == "true")
-        conf.sB("GPHIKClassifier", variable, true);
-      else
-        conf.sB("GPHIKClassifier", variable, false);
-    }
-
-    if(variable == "ils_max_iterations")
-    {
-      int value = convertMatlabToInt32(prhs[i+1]);
-      if(value < 1)
-        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'ils_max_iterations\'.");
-      conf.sI("GPHIKClassifier", variable, value);
-    }
-
-    if(variable == "ils_method")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "CG" && value != "CGL" && value != "SYMMLQ" && value != "MINRES")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_method\'. \'CG\', \'CGL\', \'SYMMLQ\' or \'MINRES\' expected.");
-        conf.sS("GPHIKClassifier", variable, value);
-    }
-
-    if(variable == "ils_min_delta")
-    {
-      double value = convertMatlabToDouble(prhs[i+1]);
-      if(value < 0.0)
-        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'ils_min_delta\'.");
-      conf.sD("GPHIKClassifier", variable, value);
-    }
-
-    if(variable == "ils_min_residual")
-    {
-      double value = convertMatlabToDouble(prhs[i+1]);
-      if(value < 0.0)
-        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'ils_min_residual\'.");
-      conf.sD("GPHIKClassifier", variable, value);
-    }
-
-
-    if(variable == "optimization_method")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "greedy" && value != "downhillsimplex" && value != "none")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'optimization_method\'. \'greedy\', \'downhillsimplex\' or \'none\' expected.");
-        conf.sS("GPHIKClassifier", variable, value);
-    }
-
-    if(variable == "use_quantization")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "true" && value != "false")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'use_quantization\'. \'true\' or \'false\' expected.");
-      if(value == "true")
-        conf.sB("GPHIKClassifier", variable, true);
-      else
-        conf.sB("GPHIKClassifier", variable, false);
-    }
-
-    if(variable == "num_bins")
-    {
-      int value = convertMatlabToInt32(prhs[i+1]);
-      if(value < 1)
-        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'num_bins\'.");
-      conf.sI("GPHIKClassifier", variable, value);
-    }
-
-    if(variable == "transform")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "absexp" && value != "exp" && value != "MKL" && value != "WeightedDim")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'transform\'. \'absexp\', \'exp\' , \'MKL\' or \'WeightedDim\' expected.");
-        conf.sS("GPHIKClassifier", variable, value);
-    }
-
-    if(variable == "verboseTime")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "true" && value != "false")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'verboseTime\'. \'true\' or \'false\' expected.");
-      if(value == "true")
-        conf.sB("GPHIKClassifier", variable, true);
-      else
-        conf.sB("GPHIKClassifier", variable, false);
-    }
-
-    if(variable == "verbose")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "true" && value != "false")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'verbose\'. \'true\' or \'false\' expected.");
-      if(value == "true")
-        conf.sB("GPHIKClassifier", variable, true);
-      else
-        conf.sB("GPHIKClassifier", variable, false);
-    }
-
-    if(variable == "noise")
-    {
-      double value = convertMatlabToDouble(prhs[i+1]);
-      if(value < 0.0)
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value larger than 0 for \'noise\'.");
-      conf.sD("GPHIKClassifier", variable, value);
-    }
-
-
-    if(variable == "optimize_noise")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "true" && value != "false")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'optimize_noise\'. \'true\' or \'false\' expected.");
-      if(value == "true")
-        conf.sB("GPHIKClassifier", variable, true);
-      else
-        conf.sB("GPHIKClassifier", variable, false);
-    }
-    
-    if(variable == "varianceApproximation")
-    {
-      string value = convertMatlabToString(prhs[i+1]);
-      if(value != "approximate_fine" && value != "approximate_rough" && value != "exact" && value != "none")
-        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'varianceApproximation\'. \'approximate_fine\', \'approximate_rough\', \'none\' or \'exact\' expected.");
-        conf.sS("GPHIKClassifier", variable, value);
-    }
-    
-    if(variable == "nrOfEigenvaluesToConsiderForVarApprox")
-    {
-      double value = convertMatlabToDouble(prhs[i+1]);
-      conf.sI("GPHIKClassifier", variable, (int) value);
-    }    
-    
-  }
-
-
-  return conf;
-}
-
-// MAIN MATLAB FUNCTION
-void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
-{    
-    // get the command string specifying what to do
-    if (nrhs < 1)
-        mexErrMsgTxt("No commands and options passed... Aborting!");        
-    
-    if( !mxIsChar( prhs[0] ) )
-        mexErrMsgTxt("First argument needs to be the command, ie.e, the class method to call... Aborting!");        
-    
-    std::string cmd = convertMatlabToString( prhs[0] );
-      
-        
-    // create object
-    if ( !strcmp("new", cmd.c_str() ) )
-    {
-        // check output variable
-        if (nlhs != 1)
-            mexErrMsgTxt("New: One output expected.");
-        
-        // read config settings
-        NICE::Config conf = parseParameters(prhs+1,nrhs-1);
-        
-        // create class instance
-        NICE::GPHIKClassifier * classifier = new NICE::GPHIKClassifier ( &conf );
-        
-         
-        // handle to the C++ instance
-        plhs[0] = convertPtr2Mat<NICE::GPHIKClassifier>( classifier );
-        return;
-    }
-    
-    // in all other cases, there should be a second input,
-    // which the be the class instance handle
-    if (nrhs < 2)
-      mexErrMsgTxt("Second input should be a class instance handle.");
-    
-    // delete object
-    if ( !strcmp("delete", cmd.c_str() ) )
-    {
-        // Destroy the C++ object
-        destroyObject<NICE::GPHIKClassifier>(prhs[1]);
-        return;
-    }
-    
-    // get the class instance pointer from the second input
-    // every following function needs the classifier object
-    NICE::GPHIKClassifier * classifier = convertMat2Ptr<NICE::GPHIKClassifier>(prhs[1]);
-    
-    
-    ////////////////////////////////////////
-    //  Check which class method to call  //
-    ////////////////////////////////////////
-    
-    
-    // standard train - assumes initialized object
-    if (!strcmp("train", cmd.c_str() ))
-    {
-        // Check parameters
-        if (nlhs < 0 || nrhs < 4)
-        {
-            mexErrMsgTxt("Train: Unexpected arguments.");
-        }
-        
-        //------------- read the data --------------
-          
-        std::vector< NICE::SparseVector *> examplesTrain;
-        NICE::Vector yMultiTrain;  
-
-        if ( mxIsSparse( prhs[2] ) )
-        {
-            examplesTrain = convertSparseMatrixToNice( prhs[2] );
-        }
-        else
-        {
-            NICE::Matrix dataTrain;
-            dataTrain = convertMatrixToNice(prhs[2]);
-            
-            //----------------- convert data to sparse data structures ---------
-            examplesTrain.resize( dataTrain.rows() );
-
-                    
-            std::vector< NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
-            for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
-            {
-                *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
-            }            
-        }
-          
-          yMultiTrain = convertVectorToNice(prhs[3]);
-          
-//           std::cerr << " DATA AFTER CONVERSION: \n" << std::endl;
-//           int lineIdx(0);
-//           for ( std::vector< NICE::SparseVector *>::const_iterator exTrainIt = examplesTrain.begin();
-//                 exTrainIt != examplesTrain.end(); exTrainIt++, lineIdx++)
-//           {
-//               std::cerr << "\n lineIdx: " << lineIdx << std::endl;
-//               (*exTrainIt)->store( std::cerr );
-//               
-//           }
-
-          // test assumption
-          {
-            if( yMultiTrain.Min() < 0)
-              mexErrMsgIdAndTxt("mexnice:error","Class labels smaller 0 are not allowed");
-          }
-
-
-          //----------------- train our classifier -------------
-          classifier->train ( examplesTrain , yMultiTrain );
-
-          //----------------- clean up -------------
-          for(int i=0;i<examplesTrain.size();i++)
-              delete examplesTrain[i];
-        
-        return;
-    }
-    
-    
-    // Classify    
-    if ( !strcmp("classify", cmd.c_str() ) )
-    {
-        // Check parameters
-        if ( (nlhs < 0) || (nrhs < 2) )
-        {
-            mexErrMsgTxt("Test: Unexpected arguments.");
-        }
-        
-        //------------- read the data --------------
-
-        int result;
-        NICE::SparseVector scores;
-        double uncertainty;        
-
-        if ( mxIsSparse( prhs[2] ) )
-        {
-            NICE::SparseVector * example;
-            example = new NICE::SparseVector ( convertSparseVectorToNice( prhs[2] ) );
-            classifier->classify ( example,  result, scores, uncertainty );
-            
-            //----------------- clean up -------------
-            delete example;
-        }
-        else
-        {
-            NICE::Vector * example;
-            example = new NICE::Vector ( convertVectorToNice(prhs[2]) ); 
-            classifier->classify ( example,  result, scores, uncertainty );
-            
-            //----------------- clean up -------------
-            delete example;            
-        }
-          
-          
-
-          // output
-          plhs[0] = mxCreateDoubleScalar( result ); 
-          
-          
-          if(nlhs >= 2)
-          {
-            plhs[1] = convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
-          }
-          if(nlhs >= 3)
-          {
-            plhs[2] = mxCreateDoubleScalar( uncertainty );          
-          }
-          return;
-    }
-    
-    // Classify    
-    if ( !strcmp("uncertainty", cmd.c_str() ) )
-    {
-        // Check parameters
-        if ( (nlhs < 0) || (nrhs < 2) )
-        {
-            mexErrMsgTxt("Test: Unexpected arguments.");
-        }
-        
-        double uncertainty;        
-        
-        //------------- read the data --------------
-
-        if ( mxIsSparse( prhs[2] ) )
-        {
-            NICE::SparseVector * example;
-            example = new NICE::SparseVector ( convertSparseVectorToNice( prhs[2] ) );
-            classifier->predictUncertainty( example, uncertainty );
-            
-            //----------------- clean up -------------
-            delete example;            
-        }
-        else
-        {
-            NICE::Vector * example;
-            example = new NICE::Vector ( convertVectorToNice(prhs[2]) ); 
-            classifier->predictUncertainty( example, uncertainty );
-            
-            //----------------- clean up -------------
-            delete example;            
-        }
-        
-       
-
-          // output
-          plhs[0] = mxCreateDoubleScalar( uncertainty );                    
-          return;
-    }    
-    
-    
-    // Test    
-    if ( !strcmp("test", cmd.c_str() ) )
-    {        
-        // Check parameters
-        if (nlhs < 0 || nrhs < 4)
-            mexErrMsgTxt("Test: Unexpected arguments.");
-        //------------- read the data --------------
-        
-        
-        bool dataIsSparse ( mxIsSparse( prhs[2] ) );
-        
-        std::vector< NICE::SparseVector *> dataTest_sparse;
-        NICE::Matrix dataTest_dense;
-
-        if ( dataIsSparse )
-        {
-            dataTest_sparse = convertSparseMatrixToNice( prhs[2] );
-        }
-        else
-        {    
-            dataTest_dense = convertMatrixToNice(prhs[2]);          
-        }        
-
-          NICE::Vector yMultiTest;
-          yMultiTest = convertVectorToNice(prhs[3]);
-
-          
-          // ------------------------------------------
-          // ------------- PREPARATION --------------
-          // ------------------------------------------   
-          
-          // determine classes known during training and corresponding mapping
-          // thereby allow for non-continous class labels
-          std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
-          
-          int noClassesKnownTraining ( classesKnownTraining.size() );
-          std::map<int,int> mapClNoToIdxTrain;
-          std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-          for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-              mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
-          
-          // determine classes known during testing and corresponding mapping
-          // thereby allow for non-continous class labels
-          std::set<int> classesKnownTest;
-          classesKnownTest.clear();
-          
-  
-          // determine which classes we have in our label vector
-          // -> MATLAB: myClasses = unique(y);
-          for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
-          {
-            if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
-            {
-              classesKnownTest.insert ( *it );
-            }
-          }          
-          
-          int noClassesKnownTest ( classesKnownTest.size() );  
-          std::map<int,int> mapClNoToIdxTest;
-          std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-          for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-              mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );          
-          
-
-
-          int i_numTestSamples;
-          
-          if ( dataIsSparse ) 
-              i_numTestSamples = dataTest_sparse.size();
-          else
-              i_numTestSamples = (int) dataTest_dense.rows();
-          
-          NICE::Matrix confusionMatrix( noClassesKnownTraining, noClassesKnownTest, 0.0);
-          NICE::Matrix scores( i_numTestSamples, noClassesKnownTraining, 0.0);
-          
-          
-
-          // ------------------------------------------
-          // ------------- CLASSIFICATION --------------
-          // ------------------------------------------          
-          
-          NICE::Timer t;
-          double testTime (0.0);
-          
-
-
-          for (int i = 0; i < i_numTestSamples; i++)
-          {
-             //----------------- convert data to sparse data structures ---------
-            
-
-             int result;
-             NICE::SparseVector exampleScoresSparse;
-
-             if ( dataIsSparse )
-             {                
-                // and classify
-                t.start();
-                classifier->classify( dataTest_sparse[ i ], result, exampleScoresSparse );
-                t.stop();
-                testTime += t.getLast();
-             }
-             else
-             {
-                 NICE::Vector example ( dataTest_dense.getRow(i) );
-                // and classify
-                t.start();
-                classifier->classify( &example, result, exampleScoresSparse );
-                t.stop();
-                testTime += t.getLast();                
-             }
-
-             confusionMatrix(  mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
-             int scoreCnt ( 0 );
-             for ( NICE::SparseVector::const_iterator scoreIt = exampleScoresSparse.begin(); scoreIt != exampleScoresSparse.end(); scoreIt++, scoreCnt++ )
-                scores(i,scoreCnt) = scoreIt->second;
-                
-          }
-          
-          std::cerr << "Time for testing: " << testTime << std::endl;          
-          
-          // clean up
-          if ( dataIsSparse )
-          {
-              for ( std::vector<NICE::SparseVector *>::iterator it = dataTest_sparse.begin(); it != dataTest_sparse.end(); it++) 
-                  delete *it;
-          }
-          
-
-
-          confusionMatrix.normalizeColumnsL1();
-          //std::cerr << confusionMatrix << std::endl;
-
-          double recRate = confusionMatrix.trace()/confusionMatrix.rows();
-          //std::cerr << "average recognition rate: " << recRate << std::endl;
-
-          
-          plhs[0] = mxCreateDoubleScalar( recRate );
-
-          if(nlhs >= 2)
-            plhs[1] = convertMatrixFromNice(confusionMatrix);
-          if(nlhs >= 3)
-            plhs[2] = convertMatrixFromNice(scores);          
-          
-          
-        return;
-    }
-    
-    // store the classifier    
-    if ( !strcmp("store", cmd.c_str() ) || !strcmp("save", cmd.c_str() ) )
-    {
-        // Check parameters
-        if ( nrhs < 3 )
-            mexErrMsgTxt("store: no destination given.");        
-               
-        std::string s_destination = convertMatlabToString( prhs[2] );
-          
-        std::filebuf fb;
-        fb.open ( s_destination.c_str(), ios::out );
-        std::ostream os(&fb);
-        //
-        classifier->store( os );
-        //   
-        fb.close();        
-            
-        return;
-    }
-    
-    // load classifier from external file    
-    if ( !strcmp("restore", cmd.c_str() ) || !strcmp("load", cmd.c_str() ) )
-    {
-        // Check parameters
-        if ( nrhs < 3 )
-            mexErrMsgTxt("restore: no destination given.");        
-               
-        std::string s_destination = convertMatlabToString( prhs[2] );
-        
-        std::cerr << " aim at restoring the classifier from " << s_destination << std::endl;
-          
-        std::filebuf fbIn;
-        fbIn.open ( s_destination.c_str(), ios::in );
-        std::istream is (&fbIn);
-        //
-        classifier->restore( is );
-        //   
-        fbIn.close();        
-            
-        return;
-    }    
-    
-    
-    // Got here, so command not recognized
-    
-    std::string errorMsg (cmd.c_str() );
-    errorMsg += " -- command not recognized.";
-    mexErrMsgTxt( errorMsg.c_str() );
-
-}

+ 73 - 0
matlab/GPHIKClassifier.m

@@ -0,0 +1,73 @@
+% brief:    MATLAB class wrapper for the underlying Matlab-C++ Interface (GPHIKClassifierMex.cpp)
+% author:   Alexander Freytag
+% date:     07-01-2014 (dd-mm-yyyy)
+classdef GPHIKClassifier < handle
+    
+    properties (SetAccess = private, Hidden = true)
+        % Handle to the underlying C++ class instance
+        objectHandle; 
+    end
+    
+    methods
+        
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%      Constructor / Destructor    %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%        
+        %% constructor - create object
+        function this = GPHIKClassifier(varargin)
+            this.objectHandle = GPHIKClassifierMex('new', varargin{:});
+        end
+        
+        %% destructor - delete object
+        function delete(this)
+            GPHIKClassifierMex('delete', this.objectHandle);
+        end
+
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%       Classification stuff       %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%         
+        %% train - standard train - assumes initialized object
+        function varargout = train(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('train', this.objectHandle, varargin{:});
+        end
+        
+        %% classify
+        function varargout = classify(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('classify', this.objectHandle, varargin{:});
+        end 
+        
+        %% uncertainty - Uncertainty prediction
+        function varargout = uncertainty(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('uncertainty', this.objectHandle, varargin{:});
+        end        
+
+        %% test - evaluate classifier on whole test set
+        function varargout = test(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('test', this.objectHandle, varargin{:});
+        end
+        
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%       Online Learnable methods   %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %% addExample
+        function varargout = addExample(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('addExample', this.objectHandle, varargin{:});
+        end 
+        %% addMultipleExamples
+        function varargout = addMultipleExamples(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('addMultipleExamples', this.objectHandle, varargin{:});
+        end
+        
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%       Persistent methods         %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %% store - store the classifier to an external file
+        function varargout = store(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('store', this.objectHandle, varargin{:});
+        end
+        %% restore -  load classifier from external file 
+        function varargout = restore(this, varargin)
+            [varargout{1:nargout}] = GPHIKClassifierMex('restore', this.objectHandle, varargin{:});
+        end
+    end
+end

+ 729 - 0
matlab/GPHIKClassifierMex.cpp

@@ -0,0 +1,729 @@
+/** 
+* @file GPHIKClassifierMex.cpp
+* @author Alexander Freytag
+* @date 07-01-2014 (dd-mm-yyyy)
+* @brief Matlab-Interface of our GPHIKClassifier, allowing for training, classification, optimization, variance prediction, incremental learning, and  storing/re-storing.
+*/
+
+// STL includes
+#include <math.h>
+#include <matrix.h>
+#include <mex.h>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+#include <core/vector/MatrixT.h>
+#include <core/vector/VectorT.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKClassifier.h"
+
+
+// Interface for conversion between Matlab and C objects
+#include "gp-hik-core/matlab/classHandleMtoC.h"
+#include "gp-hik-core/matlab/ConverterMatlabToNICE.h"
+#include "gp-hik-core/matlab/ConverterNICEToMatlab.h"
+
+const NICE::ConverterMatlabToNICE converterMtoNICE;
+const NICE::ConverterNICEToMatlab converterNICEtoM;
+
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+
+NICE::Config parseParametersGPHIKClassifier(const mxArray *prhs[], int nrhs)
+{
+  NICE::Config conf;
+  
+  // if first argument is the filename of an existing config file,
+  // read the config accordingly
+  
+  int i_start ( 0 );
+  std::string variable = converterMtoNICE.convertMatlabToString(prhs[i_start]);
+  if(variable == "conf")
+  {
+      conf = NICE::Config ( converterMtoNICE.convertMatlabToString( prhs[i_start+1] )  );
+      i_start = i_start+2;
+  }
+  
+  // now run over all given parameter specifications
+  // and add them to the config
+  for( int i=i_start; i < nrhs; i+=2 )
+  {
+    std::string variable = converterMtoNICE.convertMatlabToString(prhs[i]);
+    
+    /////////////////////////////////////////
+    // READ STANDARD BOOLEAN VARIABLES
+    /////////////////////////////////////////
+    if( (variable == "verboseTime") || (variable == "verbose") ||
+        (variable == "optimize_noise") || (variable == "uncertaintyPredictionForClassification") ||
+        (variable == "use_quantization") || (variable == "ils_verbose")
+      )
+    {
+      if ( mxIsChar( prhs[i+1] ) )
+      {
+        string value = converterMtoNICE.convertMatlabToString( prhs[i+1] );
+        if ( (value != "true") && (value != "false") )
+        {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. In string modus, \'true\' or \'false\' expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+        }
+        
+        if( value == "true" )
+          conf.sB("GPHIKClassifier", variable, true);
+        else
+          conf.sB("GPHIKClassifier", variable, false);
+      }
+      else if ( mxIsLogical( prhs[i+1] ) )
+      {
+        bool value = converterMtoNICE.convertMatlabToBool( prhs[i+1] );
+        conf.sB("GPHIKClassifier", variable, value);
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. \'true\', \'false\', or logical expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );        
+      }
+    }
+    
+    /////////////////////////////////////////
+    // READ STANDARD INT VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "nrOfEigenvaluesToConsiderForVarApprox")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = converterMtoNICE.convertMatlabToDouble(prhs[i+1]);
+        conf.sI("GPHIKClassifier", variable, (int) value);        
+      }
+      else if ( mxIsInt32( prhs[i+1] ) )
+      {
+        int value = converterMtoNICE.convertMatlabToInt32(prhs[i+1]);
+        conf.sI("GPHIKClassifier", variable, value);          
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Int32 or Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );         
+      }     
+    }
+    
+    /////////////////////////////////////////
+    // READ STRICT POSITIVE INT VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "num_bins") || (variable == "ils_max_iterations")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = converterMtoNICE.convertMatlabToDouble(prhs[i+1]);
+        if( value < 1 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );     
+        }
+        conf.sI("GPHIKClassifier", variable, (int) value);        
+      }
+      else if ( mxIsInt32( prhs[i+1] ) )
+      {
+        int value = converterMtoNICE.convertMatlabToInt32(prhs[i+1]);
+        if( value < 1 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );     
+        }        
+        conf.sI("GPHIKClassifier", variable, value);          
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Int32 or Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );         
+      }     
+    }
+    
+    /////////////////////////////////////////
+    // READ POSITIVE DOUBLE VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "ils_min_delta") || (variable == "ils_min_residual") ||
+         (variable == "noise")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = converterMtoNICE.convertMatlabToDouble(prhs[i+1]);
+        if( value < 0.0 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );     
+        }
+        conf.sD("GPHIKClassifier", variable, value);        
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );         
+      }     
+    }    
+    
+    /////////////////////////////////////////
+    // READ REMAINING SPECIFIC VARIABLES
+    /////////////////////////////////////////  
+
+    if(variable == "ils_method")
+    {
+      string value = converterMtoNICE.convertMatlabToString(prhs[i+1]);
+      if(value != "CG" && value != "CGL" && value != "SYMMLQ" && value != "MINRES")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_method\'. \'CG\', \'CGL\', \'SYMMLQ\' or \'MINRES\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+
+
+    if(variable == "optimization_method")
+    {
+      string value = converterMtoNICE.convertMatlabToString(prhs[i+1]);
+      if(value != "greedy" && value != "downhillsimplex" && value != "none")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'optimization_method\'. \'greedy\', \'downhillsimplex\' or \'none\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "transform")
+    {
+      string value = converterMtoNICE.convertMatlabToString( prhs[i+1] );
+      if(value != "absexp" && value != "exp" && value != "MKL" && value != "WeightedDim")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'transform\'. \'absexp\', \'exp\' , \'MKL\' or \'WeightedDim\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+
+  
+    if(variable == "varianceApproximation")
+    {
+      string value = converterMtoNICE.convertMatlabToString(prhs[i+1]);
+      if(value != "approximate_fine" && value != "approximate_rough" && value != "exact" && value != "none")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'varianceApproximation\'. \'approximate_fine\', \'approximate_rough\', \'none\' or \'exact\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+    
+
+    
+  }
+
+
+  return conf;
+}
+
+// MAIN MATLAB FUNCTION
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{    
+    // get the command string specifying what to do
+    if (nrhs < 1)
+        mexErrMsgTxt("No commands and options passed... Aborting!");        
+    
+    if( !mxIsChar( prhs[0] ) )
+        mexErrMsgTxt("First argument needs to be the command, ie.e, the class method to call... Aborting!");        
+    
+    std::string cmd = converterMtoNICE.convertMatlabToString( prhs[0] );
+      
+        
+    // create object
+    if ( !strcmp("new", cmd.c_str() ) )
+    {
+        // check output variable
+        if (nlhs != 1)
+            mexErrMsgTxt("New: One output expected.");
+        
+        // read config settings
+        NICE::Config conf = parseParametersGPHIKClassifier(prhs+1,nrhs-1);
+        
+        // create class instance
+        NICE::GPHIKClassifier * classifier = new NICE::GPHIKClassifier ( &conf, "GPHIKClassifier" /*sectionName in config*/ );
+        
+         
+        // handle to the C++ instance
+        plhs[0] = convertPtr2Mat<NICE::GPHIKClassifier>( classifier );
+        return;
+    }
+    
+    // in all other cases, there should be a second input,
+    // which the be the class instance handle
+    if (nrhs < 2)
+      mexErrMsgTxt("Second input should be a class instance handle.");
+    
+    // delete object
+    if ( !strcmp("delete", cmd.c_str() ) )
+    {
+        // Destroy the C++ object
+        destroyObject<NICE::GPHIKClassifier>(prhs[1]);
+        return;
+    }
+    
+    // get the class instance pointer from the second input
+    // every following function needs the classifier object
+    NICE::GPHIKClassifier * classifier = convertMat2Ptr<NICE::GPHIKClassifier>(prhs[1]);
+    
+    
+    ////////////////////////////////////////
+    //  Check which class method to call  //
+    ////////////////////////////////////////
+    
+    
+    // standard train - assumes initialized object
+    if (!strcmp("train", cmd.c_str() ))
+    {
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+        {
+            mexErrMsgTxt("Train: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+          
+        std::vector< const NICE::SparseVector *> examplesTrain;
+        NICE::Vector yMultiTrain;  
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            examplesTrain = converterMtoNICE.convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            NICE::Matrix dataTrain;
+            dataTrain = converterMtoNICE.convertDoubleMatrixToNice(prhs[2]);
+            
+            //----------------- convert data to sparse data structures ---------
+            examplesTrain.resize( dataTrain.rows() );
+
+                    
+            std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+            for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+            {
+                *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+            }            
+        }
+          
+        yMultiTrain = converterMtoNICE.convertDoubleVectorToNice(prhs[3]);
+
+        //----------------- train our classifier -------------
+        classifier->train ( examplesTrain , yMultiTrain );
+
+        //----------------- clean up -------------
+        for(int i=0;i<examplesTrain.size();i++)
+            delete examplesTrain[i];
+        
+        return;
+    }
+    
+    
+    // Classify    
+    if ( !strcmp("classify", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        int result;
+        NICE::SparseVector scores;
+        double uncertainty;        
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( converterMtoNICE.convertSparseVectorToNice( prhs[2] ) );
+            classifier->classify ( example,  result, scores, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( converterMtoNICE.convertDoubleVectorToNice(prhs[2]) ); 
+            classifier->classify ( example,  result, scores, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+          
+          
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( result ); 
+          
+          
+          if(nlhs >= 2)
+          {
+            plhs[1] = converterNICEtoM.convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
+          }
+          if(nlhs >= 3)
+          {
+            plhs[2] = mxCreateDoubleScalar( uncertainty );          
+          }
+          return;
+    }
+    
+    // Uncertainty prediction    
+    if ( !strcmp("uncertainty", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        double uncertainty;        
+        
+        //------------- read the data --------------
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( converterMtoNICE.convertSparseVectorToNice( prhs[2] ) );
+            classifier->predictUncertainty( example, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( converterMtoNICE.convertDoubleVectorToNice(prhs[2]) ); 
+            classifier->predictUncertainty( example, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+        
+       
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( uncertainty );                    
+          return;
+    }    
+    
+    
+    // Test - evaluate classifier on whole test set  
+    if ( !strcmp("test", cmd.c_str() ) )
+    {        
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        //------------- read the data --------------
+        
+        
+        bool dataIsSparse ( mxIsSparse( prhs[2] ) );
+        
+        std::vector< const NICE::SparseVector *> dataTest_sparse;
+        NICE::Matrix dataTest_dense;
+
+        if ( dataIsSparse )
+        {
+            dataTest_sparse = converterMtoNICE.convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {    
+            dataTest_dense = converterMtoNICE.convertDoubleMatrixToNice(prhs[2]);          
+        }        
+
+        NICE::Vector yMultiTest;
+        yMultiTest = converterMtoNICE.convertDoubleVectorToNice(prhs[3]);
+
+        
+        // ------------------------------------------
+        // ------------- PREPARATION --------------
+        // ------------------------------------------   
+        
+        // determine classes known during training and corresponding mapping
+        // thereby allow for non-continous class labels
+        std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+        
+        int noClassesKnownTraining ( classesKnownTraining.size() );
+        std::map<int,int> mapClNoToIdxTrain;
+        std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+        for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+            mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+        
+        // determine classes known during testing and corresponding mapping
+        // thereby allow for non-continous class labels
+        std::set<int> classesKnownTest;
+        classesKnownTest.clear();
+        
+
+        // determine which classes we have in our label vector
+        // -> MATLAB: myClasses = unique(y);
+        for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+        {
+          if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+          {
+            classesKnownTest.insert ( *it );
+          }
+        }          
+        
+        int noClassesKnownTest ( classesKnownTest.size() );  
+        std::map<int,int> mapClNoToIdxTest;
+        std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
+        for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
+            mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );          
+        
+
+
+        int i_numTestSamples;
+        
+        if ( dataIsSparse ) 
+            i_numTestSamples = dataTest_sparse.size();
+        else
+            i_numTestSamples = (int) dataTest_dense.rows();
+        
+        NICE::Matrix confusionMatrix( noClassesKnownTraining, noClassesKnownTest, 0.0);
+        NICE::Matrix scores( i_numTestSamples, noClassesKnownTraining, 0.0);
+          
+          
+
+        // ------------------------------------------
+        // ------------- CLASSIFICATION --------------
+        // ------------------------------------------          
+        
+        NICE::Timer t;
+        double testTime (0.0);
+        
+
+
+        for (int i = 0; i < i_numTestSamples; i++)
+        {
+            //----------------- convert data to sparse data structures ---------
+          
+
+            int result;
+            NICE::SparseVector exampleScoresSparse;
+
+            if ( dataIsSparse )
+            {                
+              // and classify
+              t.start();
+              classifier->classify( dataTest_sparse[ i ], result, exampleScoresSparse );
+              t.stop();
+              testTime += t.getLast();
+            }
+            else
+            {
+                NICE::Vector example ( dataTest_dense.getRow(i) );
+              // and classify
+              t.start();
+              classifier->classify( &example, result, exampleScoresSparse );
+              t.stop();
+              testTime += t.getLast();                
+            }
+
+            confusionMatrix(  mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+            int scoreCnt ( 0 );
+            for ( NICE::SparseVector::const_iterator scoreIt = exampleScoresSparse.begin(); scoreIt != exampleScoresSparse.end(); scoreIt++, scoreCnt++ )
+            {
+              scores(i,scoreCnt) = scoreIt->second;
+            }
+              
+        }
+        
+        std::cerr << "Time for testing: " << testTime << std::endl;          
+        
+        // clean up
+        if ( dataIsSparse )
+        {
+            for ( std::vector<const NICE::SparseVector *>::iterator it = dataTest_sparse.begin(); it != dataTest_sparse.end(); it++) 
+                delete *it;
+        }
+        
+
+
+        confusionMatrix.normalizeColumnsL1();
+
+        double recRate = confusionMatrix.trace()/confusionMatrix.cols();
+
+        
+        plhs[0] = mxCreateDoubleScalar( recRate );
+
+        if(nlhs >= 2)
+          plhs[1] = converterNICEtoM.convertMatrixFromNice(confusionMatrix);
+        if(nlhs >= 3)
+          plhs[2] = converterNICEtoM.convertMatrixFromNice(scores);          
+          
+          
+        return;
+    }
+    
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////      
+    
+    // addExample    
+    if ( !strcmp("addExample", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 4) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        NICE::SparseVector * newExample;
+        double newLabel;        
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            newExample = new NICE::SparseVector ( converterMtoNICE.convertSparseVectorToNice( prhs[2] ) );
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( converterMtoNICE.convertDoubleVectorToNice(prhs[2]) ); 
+            newExample = new NICE::SparseVector ( *example );
+            //----------------- clean up -------------
+            delete example;            
+        }
+        
+        newLabel = converterMtoNICE.convertMatlabToDouble( prhs[3] );
+        
+        // setting performOptimizationAfterIncrement is optional
+        if ( nrhs > 4 )
+        {
+          bool performOptimizationAfterIncrement;          
+          performOptimizationAfterIncrement = converterMtoNICE.convertMatlabToBool( prhs[4] );
+          
+          classifier->addExample ( newExample,  newLabel, performOptimizationAfterIncrement );
+        }
+        else
+        {
+          classifier->addExample ( newExample,  newLabel );
+        }
+          
+        
+        //----------------- clean up -------------
+        delete newExample;        
+
+        return;
+    }
+    
+    // addMultipleExamples    
+    if ( !strcmp("addMultipleExamples", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 4) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        std::vector< const NICE::SparseVector *> newExamples;
+        NICE::Vector newLabels;
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            newExamples = converterMtoNICE.convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            NICE::Matrix newData;
+            newData = converterMtoNICE.convertDoubleMatrixToNice(prhs[2]);
+            
+            //----------------- convert data to sparse data structures ---------
+            newExamples.resize( newData.rows() );
+
+                    
+            std::vector< const NICE::SparseVector *>::iterator exTrainIt = newExamples.begin();
+            for (int i = 0; i < (int)newData.rows(); i++, exTrainIt++)
+            {
+                *exTrainIt =  new NICE::SparseVector( newData.getRow(i) );
+            }            
+        }
+          
+        newLabels = converterMtoNICE.convertDoubleVectorToNice(prhs[3]);
+        
+        // setting performOptimizationAfterIncrement is optional
+        if ( nrhs > 4 )
+        {
+          bool performOptimizationAfterIncrement;          
+          performOptimizationAfterIncrement = converterMtoNICE.convertMatlabToBool( prhs[4] );
+          
+          classifier->addMultipleExamples ( newExamples,  newLabels, performOptimizationAfterIncrement );
+        }
+        else
+        {
+          classifier->addMultipleExamples ( newExamples,  newLabels );
+        }
+          
+        
+        //----------------- clean up -------------
+        for ( std::vector< const NICE::SparseVector *>::iterator exIt = newExamples.begin();
+              exIt != newExamples.end(); exIt++
+            ) 
+        {
+          delete *exIt;
+        }
+
+        return;
+    }    
+    
+
+    
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////    
+    
+  
+    
+    // store the classifier  to an external file
+    if ( !strcmp("store", cmd.c_str() ) || !strcmp("save", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( nrhs < 3 )
+            mexErrMsgTxt("store: no destination given.");        
+               
+        std::string s_destination = converterMtoNICE.convertMatlabToString( prhs[2] );
+          
+        std::filebuf fb;
+        fb.open ( s_destination.c_str(), ios::out );
+        std::ostream os(&fb);
+        //
+        classifier->store( os );
+        //   
+        fb.close();        
+            
+        return;
+    }
+    
+    // load classifier from external file    
+    if ( !strcmp("restore", cmd.c_str() ) || !strcmp("load", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( nrhs < 3 )
+            mexErrMsgTxt("restore: no destination given.");        
+               
+        std::string s_destination = converterMtoNICE.convertMatlabToString( prhs[2] );
+        
+        std::cerr << " aim at restoring the classifier from " << s_destination << std::endl;
+          
+        std::filebuf fbIn;
+        fbIn.open ( s_destination.c_str(), ios::in );
+        std::istream is (&fbIn);
+        //
+        classifier->restore( is );
+        //   
+        fbIn.close();        
+            
+        return;
+    }    
+    
+    
+    // Got here, so command not recognized
+    
+    std::string errorMsg (cmd.c_str() );
+    errorMsg += " -- command not recognized.";
+    mexErrMsgTxt( errorMsg.c_str() );
+
+}

+ 73 - 0
matlab/GPHIKRegression.m

@@ -0,0 +1,73 @@
+% brief:    MATLAB class wrapper for the underlying Matlab-C++ Interface (GPHIKRegressionMex.cpp)
+% author:   Alexander Freytag
+% date:     17-01-2014 (dd-mm-yyyy)
+classdef GPHIKRegression < handle
+    
+    properties (SetAccess = private, Hidden = true)
+        % Handle to the underlying C++ class instance
+        objectHandle; 
+    end
+    
+    methods
+        
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%      Constructor / Destructor    %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%        
+        %% constructor - create object
+        function this = GPHIKRegression(varargin)
+            this.objectHandle = GPHIKRegressionMex('new', varargin{:});
+        end
+        
+        %% destructor - delete object
+        function delete(this)
+            GPHIKRegressionMex('delete', this.objectHandle);
+        end
+
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%          Regression stuff        %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%         
+        %% train - standard train - assumes initialized object
+        function varargout = train(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('train', this.objectHandle, varargin{:});
+        end
+        
+        %% perform regression
+        function varargout = estimate(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('estimate', this.objectHandle, varargin{:});
+        end 
+        
+        %% uncertainty - Uncertainty prediction
+        function varargout = uncertainty(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('uncertainty', this.objectHandle, varargin{:});
+        end        
+
+        %% test - evaluate regression on whole test set using L2 loss
+        function varargout = testL2loss(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('testL2loss', this.objectHandle, varargin{:});
+        end
+        
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%       Online Learnable methods   %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %% addExample
+        function varargout = addExample(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('addExample', this.objectHandle, varargin{:});
+        end 
+        %% addMultipleExamples
+        function varargout = addMultipleExamples(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('addMultipleExamples', this.objectHandle, varargin{:});
+        end
+        
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %%       Persistent methods         %%
+        %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+        %% store - store the classifier to an external file
+        function varargout = store(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('store', this.objectHandle, varargin{:});
+        end
+        %% restore -  load classifier from external file 
+        function varargout = restore(this, varargin)
+            [varargout{1:nargout}] = GPHIKRegressionMex('restore', this.objectHandle, varargin{:});
+        end
+    end
+end

+ 679 - 0
matlab/GPHIKRegressionMex.cpp

@@ -0,0 +1,679 @@
+/** 
+* @file GPHIKRegressionMex.cpp
+* @author Alexander Freytag
+* @date 17-01-2014 (dd-mm-yyyy)
+* @brief Matlab-Interface of our GPHIKRegression, allowing for training, regression, optimization, variance prediction, incremental learning, and  storing/re-storing.
+*/
+
+// STL includes
+#include <math.h>
+#include <matrix.h>
+#include <mex.h>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+#include <core/vector/MatrixT.h>
+#include <core/vector/VectorT.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKRegression.h"
+
+
+// Interface for conversion between Matlab and C objects
+#include "gp-hik-core/matlab/classHandleMtoC.h"
+#include "gp-hik-core/matlab/ConverterMatlabToNICE.h"
+#include "gp-hik-core/matlab/ConverterNICEToMatlab.h"
+
+const NICE::ConverterMatlabToNICE converterMtoNICE;
+const NICE::ConverterNICEToMatlab converterNICEtoM;
+
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+
+NICE::Config parseParametersGPHIKRegression(const mxArray *prhs[], int nrhs)
+{
+  NICE::Config conf;
+  
+  // if first argument is the filename of an existing config file,
+  // read the config accordingly
+  
+  int i_start ( 0 );
+  std::string variable = converterMtoNICE.convertMatlabToString(prhs[i_start]);
+  if(variable == "conf")
+  {
+      conf = NICE::Config ( converterMtoNICE.convertMatlabToString( prhs[i_start+1] )  );
+      i_start = i_start+2;
+  }
+  
+  // now run over all given parameter specifications
+  // and add them to the config
+  for( int i=i_start; i < nrhs; i+=2 )
+  {
+    std::string variable = converterMtoNICE.convertMatlabToString(prhs[i]);
+    
+    /////////////////////////////////////////
+    // READ STANDARD BOOLEAN VARIABLES
+    /////////////////////////////////////////
+    if( (variable == "verboseTime") || (variable == "verbose") ||
+        (variable == "optimize_noise") || (variable == "uncertaintyPredictionForRegression") ||
+        (variable == "use_quantization") || (variable == "ils_verbose")
+      )
+    {
+      if ( mxIsChar( prhs[i+1] ) )
+      {
+        string value = converterMtoNICE.convertMatlabToString( prhs[i+1] );
+        if ( (value != "true") && (value != "false") )
+        {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. In string modus, \'true\' or \'false\' expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );
+        }
+        
+        if( value == "true" )
+          conf.sB("GPHIKRegression", variable, true);
+        else
+          conf.sB("GPHIKRegression", variable, false);
+      }
+      else if ( mxIsLogical( prhs[i+1] ) )
+      {
+        bool value = converterMtoNICE.convertMatlabToBool( prhs[i+1] );
+        conf.sB("GPHIKRegression", variable, value);
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. \'true\', \'false\', or logical expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );        
+      }
+    }
+    
+    /////////////////////////////////////////
+    // READ STANDARD INT VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "nrOfEigenvaluesToConsiderForVarApprox")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = converterMtoNICE.convertMatlabToDouble(prhs[i+1]);
+        conf.sI("GPHIKRegression", variable, (int) value);        
+      }
+      else if ( mxIsInt32( prhs[i+1] ) )
+      {
+        int value = converterMtoNICE.convertMatlabToInt32(prhs[i+1]);
+        conf.sI("GPHIKRegression", variable, value);          
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Int32 or Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );         
+      }     
+    }
+    
+    /////////////////////////////////////////
+    // READ STRICT POSITIVE INT VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "num_bins") || (variable == "ils_max_iterations")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = converterMtoNICE.convertMatlabToDouble(prhs[i+1]);
+        if( value < 1 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );     
+        }
+        conf.sI("GPHIKRegression", variable, (int) value);        
+      }
+      else if ( mxIsInt32( prhs[i+1] ) )
+      {
+        int value = converterMtoNICE.convertMatlabToInt32(prhs[i+1]);
+        if( value < 1 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );     
+        }        
+        conf.sI("GPHIKRegression", variable, value);          
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Int32 or Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );         
+      }     
+    }
+    
+    /////////////////////////////////////////
+    // READ POSITIVE DOUBLE VARIABLES
+    /////////////////////////////////////////
+    if ( (variable == "ils_min_delta") || (variable == "ils_min_residual") ||
+         (variable == "noise")
+       )
+    {
+      if ( mxIsDouble( prhs[i+1] ) )
+      {
+        double value = converterMtoNICE.convertMatlabToDouble(prhs[i+1]);
+        if( value < 0.0 )
+        {
+          std::string errorMsg = "Expected parameter value larger than 0 for \'" +  variable + "\'.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );     
+        }
+        conf.sD("GPHIKRegression", variable, value);        
+      }
+      else
+      {
+          std::string errorMsg = "Unexpected parameter value for \'" +  variable + "\'. Double expected.";
+          mexErrMsgIdAndTxt( "mexnice:error", errorMsg.c_str() );         
+      }     
+    }    
+    
+    /////////////////////////////////////////
+    // READ REMAINING SPECIFIC VARIABLES
+    /////////////////////////////////////////  
+
+    if(variable == "ils_method")
+    {
+      string value = converterMtoNICE.convertMatlabToString(prhs[i+1]);
+      if(value != "CG" && value != "CGL" && value != "SYMMLQ" && value != "MINRES")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_method\'. \'CG\', \'CGL\', \'SYMMLQ\' or \'MINRES\' expected.");
+        conf.sS("GPHIKRegression", variable, value);
+    }
+
+
+    if(variable == "optimization_method")
+    {
+      string value = converterMtoNICE.convertMatlabToString(prhs[i+1]);
+      if(value != "greedy" && value != "downhillsimplex" && value != "none")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'optimization_method\'. \'greedy\', \'downhillsimplex\' or \'none\' expected.");
+        conf.sS("GPHIKRegression", variable, value);
+    }
+
+    if(variable == "transform")
+    {
+      string value = converterMtoNICE.convertMatlabToString( prhs[i+1] );
+      if(value != "absexp" && value != "exp" && value != "MKL" && value != "WeightedDim")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'transform\'. \'absexp\', \'exp\' , \'MKL\' or \'WeightedDim\' expected.");
+        conf.sS("GPHIKRegression", variable, value);
+    }
+
+  
+    if(variable == "varianceApproximation")
+    {
+      string value = converterMtoNICE.convertMatlabToString(prhs[i+1]);
+      if(value != "approximate_fine" && value != "approximate_rough" && value != "exact" && value != "none")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'varianceApproximation\'. \'approximate_fine\', \'approximate_rough\', \'none\' or \'exact\' expected.");
+        conf.sS("GPHIKRegression", variable, value);
+    }
+    
+
+    
+  }
+
+
+  return conf;
+}
+
+// MAIN MATLAB FUNCTION
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{    
+    // get the command string specifying what to do
+    if (nrhs < 1)
+        mexErrMsgTxt("No commands and options passed... Aborting!");        
+    
+    if( !mxIsChar( prhs[0] ) )
+        mexErrMsgTxt("First argument needs to be the command, ie.e, the class method to call... Aborting!");        
+    
+    std::string cmd = converterMtoNICE.convertMatlabToString( prhs[0] );
+      
+        
+    // create object
+    if ( !strcmp("new", cmd.c_str() ) )
+    {
+        // check output variable
+        if (nlhs != 1)
+            mexErrMsgTxt("New: One output expected.");
+        
+        // read config settings
+        NICE::Config conf = parseParametersGPHIKRegression(prhs+1,nrhs-1);
+        
+        // create class instance
+        NICE::GPHIKRegression * regressor = new NICE::GPHIKRegression ( &conf, "GPHIKRegression" /*sectionName in config*/ );
+        
+         
+        // handle to the C++ instance
+        plhs[0] = convertPtr2Mat<NICE::GPHIKRegression>( regressor );
+        return;
+    }
+    
+    // in all other cases, there should be a second input,
+    // which the be the class instance handle
+    if (nrhs < 2)
+      mexErrMsgTxt("Second input should be a class instance handle.");
+    
+    // delete object
+    if ( !strcmp("delete", cmd.c_str() ) )
+    {
+        // Destroy the C++ object
+        destroyObject<NICE::GPHIKRegression>(prhs[1]);
+        return;
+    }
+    
+    // get the class instance pointer from the second input
+    // every following function needs the regressor object
+    NICE::GPHIKRegression * regressor = convertMat2Ptr<NICE::GPHIKRegression>(prhs[1]);
+    
+    
+    ////////////////////////////////////////
+    //  Check which class method to call  //
+    ////////////////////////////////////////
+    
+    
+    // standard train - assumes initialized object
+    if (!strcmp("train", cmd.c_str() ))
+    {
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+        {
+            mexErrMsgTxt("Train: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+          
+        std::vector< const NICE::SparseVector *> examplesTrain;
+        NICE::Vector yValuesTrain;  
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            examplesTrain = converterMtoNICE.convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            NICE::Matrix dataTrain;
+            dataTrain = converterMtoNICE.convertDoubleMatrixToNice(prhs[2]);
+            
+            //----------------- convert data to sparse data structures ---------
+            examplesTrain.resize( dataTrain.rows() );
+
+                    
+            std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+            for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+            {
+                *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+            }            
+        }
+          
+        yValuesTrain = converterMtoNICE.convertDoubleVectorToNice(prhs[3]);
+
+        //----------------- train our regressor -------------
+        regressor->train ( examplesTrain , yValuesTrain );
+
+        //----------------- clean up -------------
+        for(int i=0;i<examplesTrain.size();i++)
+            delete examplesTrain[i];
+        
+        return;
+    }
+    
+    
+    // perform regression    
+    if ( !strcmp("estimate", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        double result;
+        double uncertainty;        
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( converterMtoNICE.convertSparseVectorToNice( prhs[2] ) );
+            regressor->estimate ( example,  result, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( converterMtoNICE.convertDoubleVectorToNice(prhs[2]) ); 
+            regressor->estimate ( example,  result, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+          
+          
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( result ); 
+          
+          
+          if(nlhs >= 2)
+          {
+            plhs[1] = mxCreateDoubleScalar( uncertainty );          
+          }
+          return;
+    }
+    
+    // Uncertainty prediction    
+    if ( !strcmp("uncertainty", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        double uncertainty;        
+        
+        //------------- read the data --------------
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( converterMtoNICE.convertSparseVectorToNice( prhs[2] ) );
+            regressor->predictUncertainty( example, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( converterMtoNICE.convertDoubleVectorToNice(prhs[2]) ); 
+            regressor->predictUncertainty( example, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+        
+       
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( uncertainty );                    
+          return;
+    }    
+    
+    
+    // Test - evaluate regressor on whole test set  
+    if ( !strcmp("testL2loss", cmd.c_str() ) )
+    {        
+        // Check parameters
+        if (nlhs < 0 || nrhs < 3)
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        //------------- read the data --------------
+        
+        
+        bool dataIsSparse ( mxIsSparse( prhs[2] ) );
+        
+        std::vector< const NICE::SparseVector *> dataTest_sparse;
+        NICE::Matrix dataTest_dense;
+
+        if ( dataIsSparse )
+        {
+            dataTest_sparse = converterMtoNICE.convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {    
+            dataTest_dense = converterMtoNICE.convertDoubleMatrixToNice(prhs[2]);          
+        }        
+
+        NICE::Vector yValuesTest;
+        yValuesTest = converterMtoNICE.convertDoubleVectorToNice(prhs[3]);
+	
+        int i_numTestSamples ( yValuesTest.size() );
+        
+	double l2loss ( 0.0 );
+	
+	NICE::Vector scores;
+	NICE::Vector::iterator itScores;
+	if ( nlhs >= 2 )
+	{
+	  scores.resize( i_numTestSamples );
+	  itScores = scores.begin();
+	}
+          
+          
+
+        // ------------------------------------------
+        // ------------- REGRESSION --------------
+        // ------------------------------------------          
+        
+        NICE::Timer t;
+        double testTime (0.0);
+        
+
+
+        for (int i = 0; i < i_numTestSamples; i++)
+        {
+            //----------------- convert data to sparse data structures ---------
+          
+
+            double result;
+
+            if ( dataIsSparse )
+            {                
+              // and perform regression
+              t.start();
+              regressor->estimate( dataTest_sparse[ i ], result);
+              t.stop();
+              testTime += t.getLast();
+            }
+            else
+            {
+                NICE::Vector example ( dataTest_dense.getRow(i) );
+              // and perform regression
+              t.start();
+              regressor->estimate( &example, result );
+              t.stop();
+              testTime += t.getLast();                
+            }
+
+            l2loss += pow ( yValuesTest[i] - result, 2); 
+	    
+	    if ( nlhs >= 2 )
+	    {
+	      *itScores = result;
+	      itScores++;
+	    }	    
+        }
+        
+        std::cerr << "Time for testing: " << testTime << std::endl;          
+        
+        // clean up
+        if ( dataIsSparse )
+        {
+            for ( std::vector<const NICE::SparseVector *>::iterator it = dataTest_sparse.begin(); it != dataTest_sparse.end(); it++) 
+                delete *it;
+        }
+        
+
+
+        plhs[0] = mxCreateDoubleScalar( l2loss );
+
+        if(nlhs >= 2)
+          plhs[1] = converterNICEtoM.convertVectorFromNice(scores);          
+          
+          
+        return;
+    }
+    
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////      
+    
+    // addExample    
+    if ( !strcmp("addExample", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 4) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        NICE::SparseVector * newExample;
+        double newLabel;        
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            newExample = new NICE::SparseVector ( converterMtoNICE.convertSparseVectorToNice( prhs[2] ) );
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( converterMtoNICE.convertDoubleVectorToNice(prhs[2]) ); 
+            newExample = new NICE::SparseVector ( *example );
+            //----------------- clean up -------------
+            delete example;            
+        }
+        
+        newLabel = converterMtoNICE.convertMatlabToDouble( prhs[3] );
+        
+        // setting performOptimizationAfterIncrement is optional
+        if ( nrhs > 4 )
+        {
+          bool performOptimizationAfterIncrement;          
+          performOptimizationAfterIncrement = converterMtoNICE.convertMatlabToBool( prhs[4] );
+          
+          regressor->addExample ( newExample,  newLabel, performOptimizationAfterIncrement );
+        }
+        else
+        {
+          regressor->addExample ( newExample,  newLabel );
+        }
+          
+        
+        //----------------- clean up -------------
+        delete newExample;        
+
+        return;
+    }
+    
+    // addMultipleExamples    
+    if ( !strcmp("addMultipleExamples", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 4) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        std::vector< const NICE::SparseVector *> newExamples;
+        NICE::Vector newLabels;
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            newExamples = converterMtoNICE.convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            NICE::Matrix newData;
+            newData = converterMtoNICE.convertDoubleMatrixToNice(prhs[2]);
+            
+            //----------------- convert data to sparse data structures ---------
+            newExamples.resize( newData.rows() );
+
+                    
+            std::vector< const NICE::SparseVector *>::iterator exTrainIt = newExamples.begin();
+            for (int i = 0; i < (int)newData.rows(); i++, exTrainIt++)
+            {
+                *exTrainIt =  new NICE::SparseVector( newData.getRow(i) );
+            }            
+        }
+          
+        newLabels = converterMtoNICE.convertDoubleVectorToNice(prhs[3]);
+        
+        // setting performOptimizationAfterIncrement is optional
+        if ( nrhs > 4 )
+        {
+          bool performOptimizationAfterIncrement;          
+          performOptimizationAfterIncrement = converterMtoNICE.convertMatlabToBool( prhs[4] );
+          
+          regressor->addMultipleExamples ( newExamples,  newLabels, performOptimizationAfterIncrement );
+        }
+        else
+        {
+          regressor->addMultipleExamples ( newExamples,  newLabels );
+        }
+          
+        
+        //----------------- clean up -------------
+        for ( std::vector< const NICE::SparseVector *>::iterator exIt = newExamples.begin();
+              exIt != newExamples.end(); exIt++
+            ) 
+        {
+          delete *exIt;
+        }
+
+        return;
+    }    
+    
+
+    
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////    
+    
+  
+    
+    // store the regressor  to an external file
+    if ( !strcmp("store", cmd.c_str() ) || !strcmp("save", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( nrhs < 3 )
+            mexErrMsgTxt("store: no destination given.");        
+               
+        std::string s_destination = converterMtoNICE.convertMatlabToString( prhs[2] );
+          
+        std::filebuf fb;
+        fb.open ( s_destination.c_str(), ios::out );
+        std::ostream os(&fb);
+        //
+        regressor->store( os );
+        //   
+        fb.close();        
+            
+        return;
+    }
+    
+    // load regressor from external file    
+    if ( !strcmp("restore", cmd.c_str() ) || !strcmp("load", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( nrhs < 3 )
+            mexErrMsgTxt("restore: no destination given.");        
+               
+        std::string s_destination = converterMtoNICE.convertMatlabToString( prhs[2] );
+        
+        std::cerr << " aim at restoring the regressor from " << s_destination << std::endl;
+          
+        std::filebuf fbIn;
+        fbIn.open ( s_destination.c_str(), ios::in );
+        std::istream is (&fbIn);
+        //
+        regressor->restore( is );
+        //   
+        fbIn.close();        
+            
+        return;
+    }    
+    
+    
+    // Got here, so command not recognized
+    
+    std::string errorMsg (cmd.c_str() );
+    errorMsg += " -- command not recognized.";
+    mexErrMsgTxt( errorMsg.c_str() );
+
+}

+ 8 - 1
matlab/Makefile

@@ -2,4 +2,11 @@ NICEFLAGS1=$(shell pkg-config libgp-hik-core --cflags --libs)
 NICEFLAGS=$(subst -fopenmp,,$(NICEFLAGS1))
 
 default:
-	/home/matlab/7.14/bin/mex ${NICEFLAGS} -largeArrayDims GPHIK.cpp 
+	/home/matlab/7.14/bin/mex ${NICEFLAGS} -largeArrayDims GPHIKClassifierMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp
+	/home/matlab/7.14/bin/mex ${NICEFLAGS} -largeArrayDims GPHIKRegressionMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp
+
+classification:
+	/home/matlab/7.14/bin/mex ${NICEFLAGS} -largeArrayDims GPHIKClassifierMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp
+
+regression:        
+	/home/matlab/7.14/bin/mex ${NICEFLAGS} -largeArrayDims GPHIKRegressionMex.cpp ConverterMatlabToNICE.cpp ConverterNICEToMatlab.cpp

+ 3 - 2
matlab/classHandleMtoC.h

@@ -8,7 +8,8 @@
 #ifndef _NICE_CLASSHANDLEMTOCINCLUDE
 #define _NICE_CLASSHANDLEMTOCINCLUDE
 
-#include "mex.h"
+// STL includes
+#include <mex.h>
 #include <stdint.h>
 #include <iostream>
 #include <string>
@@ -18,7 +19,7 @@
 #define CLASS_HANDLE_SIGNATURE 0xFF00F0A3
 
   /** 
-  * @class FMKGPHyperparameterOptimization
+  * @class ClassHandle
   * @brief Generic class to pass C++ objects to matlab
   * @author Alexander Freytag
   */

+ 90 - 0
matlab/plot1dExampleClassification.m

@@ -0,0 +1,90 @@
+% BRIEF: Small visualization script using GPHIKClassifier
+% author: Alexander Freytag
+% date: 20-01-2014 (dd-mm-yyyy)
+
+myData = [ 0.2; 0.6; 0.9];
+% create l1-normalized 'histograms'
+myData = cat(2,myData , 1-myData);
+myLabels = [1; 2; 2];
+
+
+% init new GPHIKClassifier object
+myGPHIKClassifier = GPHIKClassifier ( 'verbose', 'false', ...
+    'optimization_method', 'none', 'varianceApproximation', 'approximate_fine',...
+    'nrOfEigenvaluesToConsiderForVarApprox',2,...
+    'uncertaintyPredictionForClassification', true, ...
+    'noise', 0.000001 ...
+    );
+
+% run train method
+myGPHIKClassifier.train( myData, myLabels );
+
+myDataTest = 0:0.01:1;
+% create l1-normalized 'histograms'
+myDataTest = cat(1, myDataTest, 1-myDataTest)';
+
+
+scores = zeros(size(myDataTest,1),1);
+uncertainties = zeros(size(myDataTest,1),1);
+for i=1:size(myDataTest,1)
+    example = myDataTest(i,:);
+    [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( example );
+    scores(i) = score(1);
+end
+
+
+% create figure and set title
+classificationFig = figure;
+set ( classificationFig, 'name', 'Classification with GPHIK');
+
+hold on;
+
+%#initialize x array
+x=0:0.01:1;
+
+%#create first curve
+uncLower=scores-uncertainties;
+%#create second curve
+uncUpper=scores+uncertainties;
+
+
+%#create polygon-like x values for plotting
+X=[x,fliplr(x)];
+%# concatenate y-values accordingly
+Y=[uncLower',fliplr(uncUpper')]; 
+%#plot filled area
+fill(X,Y,'y');                  
+
+% plot mean values
+plot ( x,scores, ...
+       'LineStyle', '--', ...
+       'LineWidth', 2, ...
+       'Color', 'r', ...
+       'Marker','none', ...
+       'MarkerSize',1, ...
+       'MarkerEdgeColor','r', ...
+       'MarkerFaceColor',[0.5,0.5,0.5] ...
+       );
+
+% plot training data
+plot ( myData(:,1), 2*(myLabels==1)-1, ...
+       'LineStyle', 'none', ...
+       'LineWidth', 3, ...
+       'Marker','o', ...
+       'MarkerSize',6, ...
+       'MarkerEdgeColor','b', ...
+       'MarkerFaceColor',[0.5,0.5,0.5] ...
+       );
+
+xlabel('1st Input dimension');
+ylabel('Classification score');   
+
+i_fontSizeAxis = 16;
+set(get(gca,'XLabel'), 'FontSize', i_fontSizeAxis);
+set(get(gca,'YLabel'), 'FontSize', i_fontSizeAxis);
+
+
+% clean up and delete object
+myGPHIKClassifier.delete();
+
+clear ( 'myGPHIKClassifier' );

+ 94 - 0
matlab/plot1dExampleRegression.m

@@ -0,0 +1,94 @@
+% BRIEF: Small visualization script using the GPHIKRegression
+% author: Alexander Freytag
+% date: 20-01-2014 (dd-mm-yyyy)
+
+myData = [ 0.1; 0.3; 0.7; 0.8];
+% create l1-normalized 'histograms'
+myData = cat(2,myData , 1-myData);
+myValues = [0.3; 0.0; 1.0; 1.4];
+
+
+% init new GPHIKRegression object
+myGPHIKRegression = GPHIKRegression ( 'verbose', 'false', ...
+    'optimization_method', 'none', ...
+    'varianceApproximation', 'exact',...
+    'nrOfEigenvaluesToConsiderForVarApprox',1,...
+    'uncertaintyPredictionForRegression', true, ...
+    'noise', 0.000001 ...
+    );
+
+    %'varianceApproximation', 'approximate_fine',...
+    %'varianceApproximation', 'exact',...
+
+% run train method
+myGPHIKRegression.train( myData, myValues );
+
+myDataTest = 0:0.01:1;
+% create l1-normalized 'histograms'
+myDataTest = cat(1, myDataTest, 1-myDataTest)';
+
+
+scores = zeros(size(myDataTest,1),1);
+uncertainties = zeros(size(myDataTest,1),1);
+for i=1:size(myDataTest,1)
+    example = myDataTest(i,:);
+    [ scores(i), uncertainties(i)] = myGPHIKRegression.estimate( example );
+end
+
+
+% create figure and set title
+classificationFig = figure;
+set ( classificationFig, 'name', 'Regression with GPHIK');
+
+hold on;
+
+%#initialize x array
+x=0:0.01:1;
+
+%#create first curve
+uncLower=scores-uncertainties;
+%#create second curve
+uncUpper=scores+uncertainties;
+
+
+%#create polygon-like x values for plotting
+X=[x,fliplr(x)];
+%# concatenate y-values accordingly
+Y=[uncLower',fliplr(uncUpper')]; 
+%#plot filled area for predictive variance ( aka regression uncertainty )
+fill(X,Y,'y');                  
+
+% plot mean values
+plot ( x,scores, ...
+       'LineStyle', '--', ...
+       'LineWidth', 2, ...
+       'Color', 'r', ...
+       'Marker','none', ...
+       'MarkerSize',1, ...
+       'MarkerEdgeColor','r', ...
+       'MarkerFaceColor',[0.5,0.5,0.5] ...
+       );
+
+% plot training data
+plot ( myData(:,1), myValues, ...
+       'LineStyle', 'none', ...
+       'LineWidth', 3, ...
+       'Marker','o', ...
+       'MarkerSize',6, ...
+       'MarkerEdgeColor','b', ...
+       'MarkerFaceColor',[0.5,0.5,0.5] ...
+       );
+
+xlabel('1st Input dimension');
+ylabel('Regression score');   
+
+i_fontSizeAxis = 16;
+set(get(gca,'XLabel'), 'FontSize', i_fontSizeAxis);
+set(get(gca,'YLabel'), 'FontSize', i_fontSizeAxis);
+
+   
+
+% clean up and delete object
+myGPHIKRegression.delete();
+
+clear ( 'myGPHIKRegression' );

+ 65 - 0
matlab/testGPHIKClassifier.m

@@ -0,0 +1,65 @@
+% brief:    Demo-program showing how to use the GPHIKClassifier Interface (including the class wrapper)
+% author:   Alexander Freytag
+% date:     07-01-2014 (dd-mm-yyyy)
+
+myData = [ 0.2 0.3 0.5;
+           0.3 0.2 0.5;
+           0.9 0.0 0.1;
+           0.8 0.1 0.1;
+           0.1 0.1 0.8;
+           0.1 0.0 0.9
+          ];
+myLabels = [1,1,2,2,3,3];
+
+
+% init new GPHIKClassifier object
+myGPHIKClassifier = GPHIKClassifier ( 'verbose', 'false', ...
+    'optimization_method', 'none', 'varianceApproximation', 'approximate_fine',...
+    'nrOfEigenvaluesToConsiderForVarApprox',4,...
+    'uncertaintyPredictionForClassification', false ...
+    );
+
+% run train method
+myGPHIKClassifier.train( myData, myLabels );
+
+% check the reclassification is working!
+[ arrReCl, confMatReCl, scoresReCl] = myGPHIKClassifier.test( myData, myLabels )
+uncertainty = myGPHIKClassifier.uncertainty( myData(1,:) )
+
+myDataTest = [ 0.3 0.4 0.3
+             ];
+myLabelsTest = [1];
+
+% run single classification call
+[ classNoEst, score, uncertainty] = myGPHIKClassifier.classify( myDataTest )
+% compute predictive variance
+uncertainty = myGPHIKClassifier.uncertainty( myDataTest )
+% run test method evaluating arr potentially using multiple examples
+[ arr, confMat, scores] = myGPHIKClassifier.test( myDataTest, myLabelsTest )
+
+% add a single new example
+newExample = [ 0.5 0.5 0.0
+             ];
+newLabel = [4];
+myGPHIKClassifier.addExample( newExample, newLabel);
+
+% add mutiple new examples
+newExamples = [ 0.3 0.3 0.4;
+                0.1, 0.2, 0.7
+             ];
+newLabels = [1,3];
+myGPHIKClassifier.addMultipleExamples( newExamples, newLabels );
+
+% perform evaluation again
+
+% run single classification call
+[ classNoEst, score, uncertainty] = myGPHIKClassifier.classify( myDataTest )
+% compute predictive variance
+uncertainty = myGPHIKClassifier.uncertainty( myDataTest )
+% run test method evaluating arr potentially using multiple examples
+[ arr, confMat, scores] = myGPHIKClassifier.test( myDataTest, myLabelsTest )
+
+% clean up and delete object
+myGPHIKClassifier.delete();
+
+clear ( 'myGPHIKClassifier' );

+ 59 - 0
matlab/testGPHIKClassifierMex.m

@@ -0,0 +1,59 @@
+% brief:    Demo-program showing how to use the GPHIK Interface (without a class wrapper)
+% author:   Alexander Freytag
+% date:     07-01-2014 (dd-mm-yyyy)
+
+myData = [ 0.2 0.3 0.5;
+           0.3 0.2 0.5;
+           0.9 0.0 0.1;
+           0.8 0.1 0.1;
+           0.1 0.1 0.8;
+           0.1 0.0 0.9
+          ];
+myLabels = [1,1,2,2,3,3];
+
+
+% init new GPHIKClassifier object
+myGPHIKClassifier = GPHIKClassifierMex ( 'new', 'verbose', 'false', ...
+    'optimization_method', 'none', 'varianceApproximation', 'approximate_rough',...
+    'nrOfEigenvaluesToConsiderForVarApprox',4,...
+    'uncertaintyPredictionForClassification', false ...
+    );
+
+% run train method
+GPHIKClassifierMex ( 'train', myGPHIKClassifier, myData, myLabels);
+
+myDataTest = [ 0.3 0.4 0.3
+             ];
+myLabelsTest = [1];
+
+% run single classification call
+[ classNoEst, score, uncertainty] = GPHIKClassifierMex ( 'classify', myGPHIKClassifier, myDataTest )
+% compute predictive variance
+uncertainty = GPHIKClassifierMex ( 'uncertainty', myGPHIKClassifier, myDataTest )
+% run test method evaluating arr potentially using multiple examples
+[ arr, confMat, scores] = GPHIKClassifierMex ( 'test', myGPHIKClassifier, myDataTest, myLabelsTest )
+
+% add a single new example
+newExample = [ 0.5 0.5 0.0
+             ];
+newLabel = [4];
+GPHIKClassifierMex ( 'addExample', myGPHIKClassifier, newExample, newLabel);
+
+% add mutiple new examples
+newExamples = [ 0.3 0.3 0.4;
+                0.1, 0.2, 0.7
+             ];
+newLabels = [1,3];
+GPHIKClassifierMex ( 'addMultipleExamples', myGPHIKClassifier, newExamples, newLabels );
+
+% perform evaluation again
+
+% run single classification call
+[ classNoEst, score, uncertainty] = GPHIKClassifierMex ( 'classify', myGPHIKClassifier, myDataTest )
+% compute predictive variance
+uncertainty = GPHIKClassifierMex ( 'uncertainty', myGPHIKClassifier, myDataTest )
+% run test method evaluating arr potentially using multiple examples
+[ arr, confMat, scores] = GPHIKClassifierMex ( 'test', myGPHIKClassifier, myDataTest, myLabelsTest )
+
+% clean up and delete object
+GPHIKClassifierMex ( 'delete',myGPHIKClassifier);

+ 1 - 1
parameterizedFunctions/PFMKL.h

@@ -55,7 +55,7 @@ class PFMKL : public ParameterizedFunction
     int dummyCnt ( 0 );
     for (std::set<int>::const_iterator it = steps.begin(); it != steps.end(); it++, dummyCnt++)
     {
-      if ( index < *it)
+      if ( (int)index < *it)
         return x * m_parameters[dummyCnt];
     }
     //default value, should never be reached

+ 1 - 1
progs/completeEvaluationFastMinkernel.cpp

@@ -223,7 +223,7 @@ int main (int argc, char* argv[])
     
     Vector kstarSlow ( hikSlow.computeKernelVector(rand_feat_transposed, xstar_stl));
     xstar.resize(xstar_stl.size());
-    for ( int i = 0 ; i < xstar.size() ; i++ )
+    for ( int i = 0 ; (uint) i < xstar.size() ; i++ )
       xstar[i] = xstar_stl[i];
     double kSumSlowly = alphas.scalarProduct(kstarSlow);
     

+ 1 - 3
progs/toyExampleStoreRestore.cpp

@@ -157,9 +157,7 @@ int main (int argc, char* argv[])
   
   NICE::Timer t;
   double testTime (0.0);
-  
-  double uncertainty;
-  
+    
   int i_loopEnd  ( (int)dataTest.rows() );
   
   

+ 23 - 19
tests/TestGPHIKOnlineLearnable.cpp

@@ -25,6 +25,7 @@ using namespace NICE;  // nice-core
 
 const bool verboseStartEnd = true;
 const bool verbose = false;
+const bool writeClassifiersForVerification = false;
 
 
 CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKOnlineLearnable );
@@ -557,25 +558,28 @@ void TestGPHIKOnlineLearnable::testOnlineLearningMultiClass()
   
   
   // TEST that both classifiers produce equal store-files
-   std::string s_destination_save_IL ( "myClassifierIL.txt" );
-  
-  std::filebuf fbOut;
-  fbOut.open ( s_destination_save_IL.c_str(), ios::out );
-  std::ostream os (&fbOut);
-  //
-  classifier->store( os );
-  //   
-  fbOut.close(); 
-  
-  std::string s_destination_save_scratch ( "myClassifierScratch.txt" );
-  
-  std::filebuf fbOutScratch;
-  fbOutScratch.open ( s_destination_save_scratch.c_str(), ios::out );
-  std::ostream osScratch (&fbOutScratch);
-  //
-  classifierScratch->store( osScratch );
-  //   
-  fbOutScratch.close(); 
+  if ( writeClassifiersForVerification )
+  {
+    std::string s_destination_save_IL ( "myClassifierIL.txt" );
+    
+    std::filebuf fbOut;
+    fbOut.open ( s_destination_save_IL.c_str(), ios::out );
+    std::ostream os (&fbOut);
+    //
+    classifier->store( os );
+    //   
+    fbOut.close(); 
+    
+    std::string s_destination_save_scratch ( "myClassifierScratch.txt" );
+    
+    std::filebuf fbOutScratch;
+    fbOutScratch.open ( s_destination_save_scratch.c_str(), ios::out );
+    std::ostream osScratch (&fbOutScratch);
+    //
+    classifierScratch->store( osScratch );
+    //   
+    fbOutScratch.close(); 
+  }
   
   
   // TEST both classifiers to produce equal results

+ 1 - 1
tests/TestGPHIKOnlineLearnable.h

@@ -8,7 +8,7 @@
  * CppUnit-Testcase. 
  * @brief CppUnit-Testcase to verify that GPHIKClassifierIL methods herited from OnlineLearnable (addExample and addMultipleExamples) work as desired.
  * @author Alexander Freytag
- * @date 03-11-2014 (dd-mm-yyyy)
+ * @date 03-01-2014 (dd-mm-yyyy)
  */
 class TestGPHIKOnlineLearnable : public CppUnit::TestFixture {
 

+ 530 - 0
tests/TestGPHIKRegression.cpp

@@ -0,0 +1,530 @@
+/** 
+ * @file TestGPHIKRegression.cpp
+ * @brief CppUnit-Testcase to verify that GPHIKRegression works as desired.
+ * @author Alexander Freytag
+ * @date 16-01-2014 (dd-mm-yyyy)
+*/
+
+#ifdef NICE_USELIB_CPPUNIT
+
+// STL includes
+#include <iostream>
+#include <vector>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKRegression.h"
+
+#include "TestGPHIKRegression.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+const bool verboseStartEnd = true;
+const bool verbose = false;
+const bool writeRegressionObjectsForVerification = false;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKRegression );
+
+void TestGPHIKRegression::setUp() {
+}
+
+void TestGPHIKRegression::tearDown() {
+}
+
+
+
+void readData ( const std::string filename, NICE::Matrix & data, NICE::Vector & yValues )
+{
+ std::ifstream ifs ( filename.c_str() , ios::in );
+
+  if ( ifs.good() )
+  {
+    NICE::Vector tmp;
+    ifs >> data;
+    ifs >> tmp; //yBin;
+    ifs >> yValues;
+    ifs.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read data from file " << filename << " -- aborting." << std::endl;
+    CPPUNIT_ASSERT ( ifs.good() );
+  }    
+}
+
+void evaluateRegressionMethod ( double & regressionLoss, 
+                          const NICE::GPHIKRegression * regressionMethod, 
+                          const NICE::Matrix & data,
+                          const NICE::Vector & yValues
+                        ) 
+{
+  regressionLoss = 0.0;
+  
+  int i_loopEnd  ( (int)data.rows() );  
+  
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example ( data.getRow(i) );
+    double result;    
+    
+    // classify with previously trained regression method
+    regressionMethod->estimate( &example, result );
+    
+    if ( verbose )
+      std::cerr << "i: " << i << " gt: " << yValues[i] << " result: " << result << std::endl;
+    
+    //use L2-loss for evaluation
+    regressionLoss +=  pow( yValues[i] - result, 2 ); 
+  }
+}
+
+void TestGPHIKRegression::testRegressionHoldInData()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionHoldInData ===================== " << std::endl;  
+  
+  NICE::Config conf;
+  
+  conf.sB ( "GPHIKRegression", "eig_verbose", false);
+  conf.sS ( "GPHIKRegression", "optimization_method", "downhillsimplex");
+  // set pretty low built-in noise for hold-in regression estimation
+  conf.sD ( "GPHIKRegression", "noise", 1e-6 );
+  
+  std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yValues; 
+  
+  readData ( s_trainData, dataTrain, yValues );
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows() );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }
+    
+  //create regressionMethod object
+  NICE::GPHIKRegression * regressionMethod;
+  regressionMethod = new NICE::GPHIKRegression ( &conf );
+  regressionMethod->train ( examplesTrain , yValues );
+  
+  double holdInLoss ( 0.0 );
+  
+    
+  // ------------------------------------------
+  // ------------- REGRESSION --------------
+  // ------------------------------------------  
+  evaluateRegressionMethod ( holdInLoss, regressionMethod, dataTrain, yValues ); 
+  
+  
+  if ( verbose ) 
+  {
+    std::cerr << " holdInLoss: " << holdInLoss << std::endl;
+  }  
+
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( 0.0, holdInLoss, 1e-8);
+  
+  // don't waste memory
+  
+  delete regressionMethod;
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  }
+  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionHoldInData done ===================== " << std::endl;   
+}
+
+void TestGPHIKRegression::testRegressionHoldOutData()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionHoldOutData ===================== " << std::endl;  
+
+  NICE::Config conf;
+  
+  conf.sB ( "GPHIKRegression", "eig_verbose", false);
+  conf.sS ( "GPHIKRegression", "optimization_method", "downhillsimplex");
+  // set higher built-in noise for hold-out regression estimation
+  conf.sD ( "GPHIKRegression", "noise", 1e-4 );
+  
+  std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yValues; 
+  
+  readData ( s_trainData, dataTrain, yValues );
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows() );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }
+    
+  //create regressionMethod object
+  NICE::GPHIKRegression * regressionMethod;
+  regressionMethod = new NICE::GPHIKRegression ( &conf, "GPHIKRegression" );
+  regressionMethod->train ( examplesTrain , yValues );
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yValuesTest; 
+  
+  std::string s_testData = conf.gS( "main", "testData", "toyExampleTest.data" );  
+  
+  readData ( s_testData, dataTest, yValuesTest );  
+  
+  double holdOutLoss ( 0.0 );
+  
+    
+  // ------------------------------------------
+  // ------------- REGRESSION --------------
+  // ------------------------------------------  
+  evaluateRegressionMethod ( holdOutLoss, regressionMethod, dataTest, yValuesTest ); 
+
+  // acceptable difference for every estimated y-value on average
+  double diffOkay ( 0.4 );
+  
+  if ( verbose ) 
+  {
+    std::cerr << " holdOutLoss: " << holdOutLoss << " accepting: " << pow(diffOkay,2)*yValuesTest.size() << std::endl;
+  }  
+  
+  CPPUNIT_ASSERT( pow(diffOkay,2)*yValuesTest.size() - holdOutLoss > 0.0);
+  
+  // don't waste memory
+  
+  delete regressionMethod;
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  }  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionHoldOutData done ===================== " << std::endl;     
+}
+    
+void TestGPHIKRegression::testRegressionOnlineLearnableAdd1Example()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionOnlineLearnableAdd1Example ===================== " << std::endl;  
+
+  NICE::Config conf;
+  
+  conf.sB ( "GPHIKRegressionMethod", "eig_verbose", false);
+  conf.sS ( "GPHIKRegressionMethod", "optimization_method", "downhillsimplex");//downhillsimplex greedy
+  // set higher built-in noise for hold-out regression estimation
+  conf.sD ( "GPHIKRegression", "noise", 1e-4 );  
+  
+  std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yValuesTrain; 
+  
+  readData ( s_trainData, dataTrain, yValuesTrain );
+
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows()-1 );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows()-1; i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }  
+  
+  // TRAIN INITIAL CLASSIFIER FROM SCRATCH
+  NICE::GPHIKRegression * regressionMethod;
+  regressionMethod = new NICE::GPHIKRegression ( &conf, "GPHIKRegression" );
+
+  //use all but the first example for training and add the first one lateron
+  NICE::Vector yValuesRelevantTrain  ( yValuesTrain.getRangeRef( 0, yValuesTrain.size()-2  ) );
+  
+  regressionMethod->train ( examplesTrain , yValuesRelevantTrain );
+  
+  
+  // RUN INCREMENTAL LEARNING
+  
+  bool performOptimizationAfterIncrement ( true );
+  
+  NICE::SparseVector * exampleToAdd = new NICE::SparseVector ( dataTrain.getRow( (int)dataTrain.rows()-1 ) );
+  
+  
+  regressionMethod->addExample ( exampleToAdd, yValuesTrain[ (int)dataTrain.rows()-2 ], performOptimizationAfterIncrement );
+  
+  if ( verbose )
+    std::cerr << "label of example to add: " << yValuesTrain[ (int)dataTrain.rows()-1 ] << std::endl;
+  
+  // TRAIN SECOND REGRESSOR FROM SCRATCH USING THE SAME OVERALL AMOUNT OF EXAMPLES
+  examplesTrain.push_back(  exampleToAdd );
+
+  NICE::GPHIKRegression * regressionMethodScratch = new NICE::GPHIKRegression ( &conf, "GPHIKRegression" );
+  regressionMethodScratch->train ( examplesTrain, yValuesTrain );
+  
+  if ( verbose )
+    std::cerr << "trained both regressionMethods - now start evaluating them" << std::endl;
+  
+  
+  // TEST that both regressionMethods produce equal store-files
+  if ( writeRegressionObjectsForVerification )
+  {
+    std::string s_destination_save_IL ( "myRegressionMethodIL.txt" );
+    
+    std::filebuf fbOut;
+    fbOut.open ( s_destination_save_IL.c_str(), ios::out );
+    std::ostream os (&fbOut);
+    //
+    regressionMethod->store( os );
+    //   
+    fbOut.close(); 
+    
+    std::string s_destination_save_scratch ( "myRegressionMethodScratch.txt" );
+    
+    std::filebuf fbOutScratch;
+    fbOutScratch.open ( s_destination_save_scratch.c_str(), ios::out );
+    std::ostream osScratch (&fbOutScratch);
+    //
+    regressionMethodScratch->store( osScratch );
+    //   
+    fbOutScratch.close(); 
+  }
+  
+  
+  // TEST both regressionMethods to produce equal results
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yValuesTest; 
+  
+  std::string s_testData = conf.gS( "main", "testData", "toyExampleTest.data" );  
+  
+  readData ( s_testData, dataTest, yValuesTest );
+
+  
+  // ------------------------------------------
+  // ------------- REGRESSION --------------
+  // ------------------------------------------  
+
+
+  double holdOutLossIL ( 0.0 );
+  double holdOutLossScratch ( 0.0 );
+  
+  evaluateRegressionMethod ( holdOutLossIL, regressionMethod, dataTest, yValuesTest ); 
+  
+  evaluateRegressionMethod ( holdOutLossScratch, regressionMethodScratch, dataTest, yValuesTest );  
+  
+    
+  if ( verbose ) 
+  {
+    std::cerr << "holdOutLossIL: " << holdOutLossIL  << std::endl;
+  
+    std::cerr << "holdOutLossScratch: " << holdOutLossScratch << std::endl;
+  }
+  
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( holdOutLossIL, holdOutLossScratch, 1e-4);
+  
+  // don't waste memory
+  
+  delete regressionMethod;
+  delete regressionMethodScratch;
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  } 
+
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionOnlineLearnableAdd1Example done ===================== " << std::endl;   
+}
+
+void TestGPHIKRegression::testRegressionOnlineLearnableAddMultipleExamples()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionOnlineLearnableAddMultipleExamples ===================== " << std::endl;  
+
+  NICE::Config conf;
+  
+  conf.sB ( "GPHIKRegressionMethod", "eig_verbose", false);
+  conf.sS ( "GPHIKRegressionMethod", "optimization_method", "downhillsimplex");//downhillsimplex greedy
+  // set higher built-in noise for hold-out regression estimation
+  conf.sD ( "GPHIKRegression", "noise", 1e-4 );  
+  
+  std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yValuesTrain; 
+  
+  readData ( s_trainData, dataTrain, yValuesTrain );
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  std::vector< const NICE::SparseVector *> examplesTrainPlus;
+  std::vector< const NICE::SparseVector *> examplesTrainMinus;
+  
+  examplesTrain.resize( dataTrain.rows() );
+  NICE::Vector yValuesPlus( dataTrain.rows() );
+  NICE::Vector yValuesMinus( dataTrain.rows() );  
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  
+  int cntPlus ( 0 );
+  int cntMinus ( 0 );
+  // note: we also slightly shuffle the order of how examples are added compared to the scratch-classifier... 
+  // this should not result in any difference of behaviour...
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+    
+    if ( ( yValuesTrain[i] == 1 ) || ( yValuesTrain[i] == 2 ) )
+    {
+      examplesTrainPlus.push_back ( *exTrainIt );
+      yValuesPlus[cntPlus] = yValuesTrain[i];
+      cntPlus++;
+    }
+    else
+    {
+       examplesTrainMinus.push_back ( *exTrainIt );
+      yValuesMinus[cntMinus] = yValuesTrain[i];
+      cntMinus++;      
+    }
+  }
+  
+  yValuesPlus.resize ( examplesTrainPlus.size()  ) ;
+  yValuesMinus.resize( examplesTrainMinus.size() );  
+
+  
+  // TRAIN INITIAL CLASSIFIER FROM SCRATCH
+  NICE::GPHIKRegression * regressionMethod;
+  regressionMethod = new NICE::GPHIKRegression ( &conf, "GPHIKRegression" );
+  
+  regressionMethod->train ( examplesTrainPlus , yValuesPlus );
+  
+  if ( verbose ) 
+  {
+    std::cerr << "Initial values: " << yValuesPlus << std::endl;
+    std::cerr << "Values to add: " << yValuesMinus << std::endl;
+  }
+  
+  
+  // RUN INCREMENTAL LEARNING
+  
+  bool performOptimizationAfterIncrement ( true );
+  
+  regressionMethod->addMultipleExamples ( examplesTrainMinus, yValuesMinus, performOptimizationAfterIncrement );
+  
+  
+  // TRAIN SECOND REGRESSOR FROM SCRATCH USING THE SAME OVERALL AMOUNT OF EXAMPLES
+
+  NICE::GPHIKRegression * regressionMethodScratch = new NICE::GPHIKRegression ( &conf, "GPHIKRegression" );
+  regressionMethodScratch->train ( examplesTrain, yValuesTrain );
+  
+  if ( verbose )
+    std::cerr << "trained both regressionMethods - now start evaluating them" << std::endl;
+  
+  
+  // TEST that both regressionMethods produce equal store-files
+  if ( writeRegressionObjectsForVerification )
+  {
+    std::string s_destination_save_IL ( "myRegressionMethodIL.txt" );
+    
+    std::filebuf fbOut;
+    fbOut.open ( s_destination_save_IL.c_str(), ios::out );
+    std::ostream os (&fbOut);
+    //
+    regressionMethod->store( os );
+    //   
+    fbOut.close(); 
+    
+    std::string s_destination_save_scratch ( "myRegressionMethodScratch.txt" );
+    
+    std::filebuf fbOutScratch;
+    fbOutScratch.open ( s_destination_save_scratch.c_str(), ios::out );
+    std::ostream osScratch (&fbOutScratch);
+    //
+    regressionMethodScratch->store( osScratch );
+    //   
+    fbOutScratch.close(); 
+  }
+  
+  
+  // TEST both regressionMethods to produce equal results
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yValuesTest; 
+  
+  std::string s_testData = conf.gS( "main", "testData", "toyExampleTest.data" );  
+  
+  readData ( s_testData, dataTest, yValuesTest );
+  
+
+  // ------------------------------------------
+  // ------------- REGRESSION --------------
+  // ------------------------------------------  
+
+
+  double holdOutLossIL ( 0.0 );
+  double holdOutLossScratch ( 0.0 );
+  
+  evaluateRegressionMethod ( holdOutLossIL, regressionMethod, dataTest, yValuesTest ); 
+  
+  evaluateRegressionMethod ( holdOutLossScratch, regressionMethodScratch, dataTest, yValuesTest );  
+  
+    
+  if ( verbose ) 
+  {
+    std::cerr << "holdOutLossIL: " << holdOutLossIL  << std::endl;
+  
+    std::cerr << "holdOutLossScratch: " << holdOutLossScratch << std::endl;
+  }
+  
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( holdOutLossIL, holdOutLossScratch, 1e-4);
+  
+  // don't waste memory
+  
+  delete regressionMethod;
+  delete regressionMethodScratch;
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  }   
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKRegression::testRegressionOnlineLearnableAddMultipleExamples done ===================== " << std::endl;   
+}    
+
+#endif

+ 37 - 0
tests/TestGPHIKRegression.h

@@ -0,0 +1,37 @@
+#ifndef _TESTGPHIKREGRESSION_H
+#define _TESTGPHIKREGRESSION_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <gp-hik-core/GPHIKRegression.h>
+
+/**
+ * CppUnit-Testcase. 
+ * @brief CppUnit-Testcase to verify that GPHIKRegression works as desired.
+ * @author Alexander Freytag
+ * @date 16-01-2014 (dd-mm-yyyy)
+ */
+class TestGPHIKRegression : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestGPHIKRegression );
+      CPPUNIT_TEST(testRegressionHoldInData);
+      CPPUNIT_TEST(testRegressionHoldOutData);
+      
+      CPPUNIT_TEST(testRegressionOnlineLearnableAdd1Example);
+      CPPUNIT_TEST(testRegressionOnlineLearnableAddMultipleExamples);
+      
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    void testRegressionHoldInData();
+    void testRegressionHoldOutData();    
+    
+    void testRegressionOnlineLearnableAdd1Example();
+    void testRegressionOnlineLearnableAddMultipleExamples();    
+};
+
+#endif // _TESTGPHIKREGRESSION_H

+ 0 - 15
todo

@@ -1,15 +0,0 @@
-[current crashs / bugs]
-- fix crash if labels +1 and -1 (instead of +1 and 0 ) are given
-- fix crash when balanced learning is activated in a binary scenario
-- fix crash when optimize noise is activated in binary scenario without DHS
-- allow to add a new class when current setting is binary (i.e., we need to add two alpha vecs instead of one)
-
-[nice to have]
-
-- separate verbose-flags (optimization, training, classification, ...) needed?
-
-- tutorials, numbers, demo-programs, ...
-
-- consistency checks (dimensions etc.)
-
-- more documentation (e.g., train of GPHIKClassifierNice train methods - what is binLabels)