Browse Source

class labels are now uints

Alexander Freytag 9 years ago
parent
commit
49997136fc

File diff suppressed because it is too large
+ 236 - 173
FMKGPHyperparameterOptimization.cpp


+ 73 - 34
FMKGPHyperparameterOptimization.h

@@ -92,12 +92,12 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     typedef VVector PrecomputedType;
 
     /** precomputed arrays A (1 per class) needed for classification without quantization  */
-    std::map< int, PrecomputedType > precomputedA;    
+    std::map< uint, PrecomputedType > precomputedA;    
     /** precomputed arrays B (1 per class) needed for classification without quantization  */
-    std::map< int, PrecomputedType > precomputedB;
+    std::map< uint, PrecomputedType > precomputedB;
     
     /** precomputed LUTs (1 per class) needed for classification with quantization  */
-    std::map< int, double * > precomputedT;  
+    std::map< uint, double * > precomputedT;  
     
     //! storing the labels is needed for Incremental Learning (re-optimization)
     NICE::Vector labels; 
@@ -108,7 +108,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     int i_binaryLabelNegative;
     
     //! contains all class numbers of the currently known classes
-    std::set<int> knownClasses;
+    std::set<uint> knownClasses;
     
     //! container for multiple kernel matrices (e.g., a data-containing kernel matrix (GMHIKernel) and a noise matrix (IKMNoise) )
     NICE::IKMLinearCombination * ikmsum;    
@@ -197,7 +197,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     bool b_usePreviousAlphas;
     
     //! store alpha vectors for good initializations in the IL setting, if activated
-    std::map<int, NICE::Vector> previousAlphas;     
+    std::map<uint, NICE::Vector> previousAlphas;     
 
     
     /////////////////////////
@@ -211,37 +211,47 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @brief calculate binary label vectors using a multi-class label vector
     * @author Alexander Freytag
     */    
-    int prepareBinaryLabels ( std::map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses);     
+    uint prepareBinaryLabels ( std::map<uint, NICE::Vector> & _binaryLabels, 
+                              const NICE::Vector & _y , 
+                              std::set<uint> & _myClasses
+                            );     
     
     /**
     * @brief prepare the GPLike object for given binary labels and already given ikmsum-object
     * @author Alexander Freytag
     */
-    inline void setupGPLikelihoodApprox( GPLikelihoodApprox * & gplike, const std::map<int, NICE::Vector> & binaryLabels, uint & parameterVectorSize);    
+    inline void setupGPLikelihoodApprox( GPLikelihoodApprox * & _gplike, 
+                                         const std::map<uint, NICE::Vector> & _binaryLabels,
+                                         uint & _parameterVectorSize
+                                       );    
     
     /**
     * @brief update eigenvectors and eigenvalues for given ikmsum-objects and a method to compute eigenvalues
     * @author Alexander Freytag
     */
-    inline void updateEigenDecomposition( const int & i_noEigenValues );
+    inline void updateEigenDecomposition( const int & _noEigenValues );
     
     /**
     * @brief core of the optimize-functions
     * @author Alexander Freytag
     */
-    inline void performOptimization( GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
+    inline void performOptimization( GPLikelihoodApprox & gplike, 
+                                     const uint & parameterVectorSize
+                                   );
     
     /**
     * @brief apply the optimized transformation values to the underlying features
     * @author Alexander Freytag
     */    
-    inline void transformFeaturesWithOptimalParameters(const GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
+    inline void transformFeaturesWithOptimalParameters(const GPLikelihoodApprox & _gplike, 
+                                                       const uint & _parameterVectorSize
+                                                      );
     
     /**
     * @brief build the resulting matrices A and B as well as lookup tables T for fast evaluations using the optimized parameter settings
     * @author Alexander Freytag
     */
-    inline void computeMatricesAndLUTs( const GPLikelihoodApprox & gplike);
+    inline void computeMatricesAndLUTs( const GPLikelihoodApprox & _gplike);
     
      
 
@@ -250,8 +260,8 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @author Alexander Freytag
     */           
     void updateAfterIncrement (
-      const std::set<int> newClasses,
-      const bool & performOptimizationAfterIncrement = false
+      const std::set<uint> _newClasses,
+      const bool & _performOptimizationAfterIncrement = false
     );    
   
 
@@ -330,7 +340,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @brief Get the currently known class numbers
     * @author Alexander Freytag
     */    
-    std::set<int> getKnownClassNumbers ( ) const;
+    std::set<uint> getKnownClassNumbers ( ) const;
     
     /**
      * @brief Change between classification and regression, only allowed if not trained. Otherwise, exceptions will be thrown...
@@ -351,7 +361,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @author Alexander Freytag
      * @date 06-02-2014 (dd-mm-yyyy)
      */        
-    void setNrOfEigenvaluesToConsiderForVarApprox ( const int & i_nrOfEigenvaluesToConsiderForVarApprox );
+    void setNrOfEigenvaluesToConsiderForVarApprox ( const int & _nrOfEigenvaluesToConsiderForVarApprox );
     
     ///////////////////// ///////////////////// /////////////////////
     //                      CLASSIFIER STUFF
@@ -368,7 +378,12 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param positives set of positive examples (indices)
     * @param negatives set of negative examples (indices)
     */
-    void optimizeBinary ( const sparse_t & data, const NICE::Vector & y, const std::set<int> & positives, const std::set<int> & negatives, double noise );
+    void optimizeBinary ( const sparse_t & data, 
+                          const NICE::Vector & y, 
+                          const std::set<uint> & positives, 
+                          const std::set<uint> & negatives, 
+                          double noise 
+                        );
 
     /**
     * @brief Perform hyperparameter optimization for GP multi-class or binary problems
@@ -378,7 +393,11 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param y label vector with multi-class labels
     * @param examples mapping of example index to new index
     */
-    void optimize ( const sparse_t & data, const NICE::Vector & y, const std::map<int, int> & examples, double noise );
+    void optimize ( const sparse_t & data, 
+                    const NICE::Vector & y, 
+                    const std::map<uint, uint> & examples, 
+                    double noise 
+                  );
 #endif
 
     /**
@@ -394,7 +413,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @param binLabels vector of binary label vectors (1,-1) and corresponding class no.
     */
-    void optimize ( std::map<int, NICE::Vector> & binaryLabels );  
+    void optimize ( std::map<uint, NICE::Vector> & _binaryLabels );  
    
     /**
     * @brief Compute the necessary variables for appxorimations of predictive variance (LUTs), assuming an already initialized fmk object
@@ -418,7 +437,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @return class number achieving the best score
     */
-    int classify ( const NICE::SparseVector & x, SparseVector & scores ) const;
+    uint classify ( const NICE::SparseVector & _x, 
+                   SparseVector & _scores 
+                 ) const;
     
     /**
     * @brief classify an example that is given as non-sparse vector
@@ -432,7 +453,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @return class number achieving the best score
     */
-    int classify ( const NICE::Vector & x, SparseVector & scores ) const;    
+    uint classify ( const NICE::Vector & _x, 
+                    SparseVector & _scores 
+                  ) const;    
 
     //////////////////////////////////////////
     // variance computation: sparse inputs
@@ -446,7 +469,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & x, double & predVariance ) const;
+    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & _x, 
+                                                   double & _predVariance 
+                                                  ) const;
     
     /**
     * @brief compute predictive variance for a given test example using a fine approximation  (k eigenvalues and eigenvectors to approximate the quadratic term)
@@ -456,7 +481,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & x, double & predVariance ) const; 
+    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & _x, 
+                                                  double & _predVariance 
+                                                 ) const; 
     
     /**
     * @brief compute exact predictive variance for a given test example using ILS methods (exact, but more time consuming than approx versions)
@@ -466,7 +493,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceExact(const NICE::SparseVector & x, double & predVariance ) const; 
+    void computePredictiveVarianceExact(const NICE::SparseVector & _x, 
+                                        double & _predVariance 
+                                       ) const; 
     
     
     //////////////////////////////////////////
@@ -481,7 +510,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateRough(const NICE::Vector & x, double & predVariance ) const;    
+    void computePredictiveVarianceApproximateRough(const NICE::Vector & _x, 
+                                                   double & _predVariance 
+                                                  ) const;    
 
    
     
@@ -493,7 +524,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateFine(const NICE::Vector & x, double & predVariance ) const;      
+    void computePredictiveVarianceApproximateFine(const NICE::Vector & _x, 
+                                                  double & _predVariance 
+                                                 ) const;      
     
 
     
@@ -505,7 +538,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceExact(const NICE::Vector & x, double & predVariance ) const;  
+    void computePredictiveVarianceExact(const NICE::Vector & _x, 
+                                        double & _predVariance 
+                                       ) const;  
     
     
     
@@ -519,13 +554,17 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @brief Load current object from external file (stream)
      * @author Alexander Freytag
      */     
-    void restore ( std::istream & is, int format = 0 );
+    void restore ( std::istream & _is, 
+                   int _format = 0 
+                 );
     
     /** 
      * @brief Save current object to external file (stream)
      * @author Alexander Freytag
      */      
-    void store ( std::ostream & os, int format = 0 ) const;
+    void store ( std::ostream & _os,
+                 int _format = 0 
+               ) const;
     
     /** 
      * @brief Clear current object
@@ -541,18 +580,18 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @brief add a new example
      * @author Alexander Freytag
      */       
-    virtual void addExample( const NICE::SparseVector * example, 
-                             const double & label, 
-                             const bool & performOptimizationAfterIncrement = true
+    virtual void addExample( const NICE::SparseVector * _example, 
+                             const double & _label, 
+                             const bool & _performOptimizationAfterIncrement = true
                            );
 
     /** 
      * @brief add several new examples
      * @author Alexander Freytag
      */    
-    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-                                      const NICE::Vector & newLabels,
-                                      const bool & performOptimizationAfterIncrement = true
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                      const NICE::Vector & _newLabels,
+                                      const bool & _performOptimizationAfterIncrement = true
                                     );         
 };
 

+ 202 - 139
GPHIKClassifier.cpp

@@ -46,7 +46,9 @@ GPHIKClassifier::GPHIKClassifier( )
   
 }
 
-GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSection )
+GPHIKClassifier::GPHIKClassifier( const Config *_conf, 
+                                  const string & _confSection 
+                                )
 {
   ///////////
   // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
@@ -61,13 +63,13 @@ GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSecti
   // here comes the new code part different from the empty constructor
   ///////////
   
-  this->confSection = s_confSection;  
+  this->confSection = _confSection;  
   
   // if no config file was given, we either restore the classifier from an external file, or run ::init with 
   // an emtpy config (using default values thereby) when calling the train-method
-  if ( conf != NULL )
+  if ( _conf != NULL )
   {
-    this->initFromConfig( conf, confSection );
+    this->initFromConfig( _conf, _confSection );
   }
   else
   {
@@ -83,19 +85,22 @@ GPHIKClassifier::~GPHIKClassifier()
     delete gphyper;
 }
 
-void GPHIKClassifier::initFromConfig(const Config *conf, const string & s_confSection)
+void GPHIKClassifier::initFromConfig(const Config *_conf, 
+                                     const string & _confSection
+                                    )
 { 
-  this->noise = conf->gD(confSection, "noise", 0.01);
+  this->noise       = _conf->gD(confSection, "noise", 0.01);
 
-  this->confSection = confSection;
-  this->verbose = conf->gB(confSection, "verbose", false);
-  this->debug = conf->gB(confSection, "debug", false);
-  this->uncertaintyPredictionForClassification = conf->gB( confSection, "uncertaintyPredictionForClassification", false );
+  this->confSection = _confSection;
+  this->verbose     = _conf->gB(confSection, "verbose", false);
+  this->debug       = _conf->gB(confSection, "debug", false);
+  this->uncertaintyPredictionForClassification 
+                    = _conf->gB( confSection, "uncertaintyPredictionForClassification", false );
   
 
    
   //how do we approximate the predictive variance for classification uncertainty?
-  string s_varianceApproximation = conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
+  string s_varianceApproximation = _conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
   if ( (s_varianceApproximation.compare("approximate_rough") == 0) || ((s_varianceApproximation.compare("1") == 0)) )
   {
     this->varianceApproximation = APPROXIMATE_ROUGH;
@@ -108,7 +113,7 @@ void GPHIKClassifier::initFromConfig(const Config *conf, const string & s_confSe
     this->varianceApproximation = APPROXIMATE_FINE;    
     
     //security check - compute at least one eigenvalue for this approximation strategy
-    this->gphyper->setNrOfEigenvaluesToConsiderForVarApprox ( std::max( conf->gI(confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 ), 1) );
+    this->gphyper->setNrOfEigenvaluesToConsiderForVarApprox ( std::max( _conf->gI(confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 ), 1) );
   }
   else if ( (s_varianceApproximation.compare("exact") == 0)  || ((s_varianceApproximation.compare("3") == 0)) )
   {
@@ -129,14 +134,14 @@ void GPHIKClassifier::initFromConfig(const Config *conf, const string & s_confSe
     std::cerr << "varianceApproximationStrategy: " << s_varianceApproximation  << std::endl;
   
   //NOTE init all member pointer variables here as well
-  this->gphyper->initFromConfig ( conf, confSection /*possibly delete the handing of confSection*/);
+  this->gphyper->initFromConfig ( _conf, _confSection /*possibly delete the handing of confSection*/);
 }
 
 ///////////////////// ///////////////////// /////////////////////
 //                         GET / SET
 ///////////////////// ///////////////////// ///////////////////// 
 
-std::set<int> GPHIKClassifier::getKnownClassNumbers ( ) const
+std::set<uint> GPHIKClassifier::getKnownClassNumbers ( ) const
 {
   if ( ! this->b_isTrained )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
@@ -149,52 +154,84 @@ std::set<int> GPHIKClassifier::getKnownClassNumbers ( ) const
 //                      CLASSIFIER STUFF
 ///////////////////// ///////////////////// /////////////////////
 
-void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores ) const
+void GPHIKClassifier::classify ( const SparseVector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores 
+                               ) const
 {
   double tmpUncertainty;
-  this->classify( example, result, scores, tmpUncertainty );
+  this->classify( _example, _result, _scores, tmpUncertainty );
 }
 
-void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores ) const
+void GPHIKClassifier::classify ( const NICE::Vector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores 
+                               ) const
 {
   double tmpUncertainty;
-  this->classify( example, result, scores, tmpUncertainty );
+  this->classify( _example, _result, _scores, tmpUncertainty );
 }
 
-void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores, double & uncertainty ) const
+void GPHIKClassifier::classify ( const SparseVector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores, 
+                                 double & _uncertainty 
+                               ) const
 {
   if ( ! this->b_isTrained )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );
     
-  scores.clear(); 
+  _scores.clear(); 
+  
+  if ( this->debug )
+  {
+    std::cerr << "GPHIKClassifier::classify (sparse)" << std::endl;
+    _example->store( std::cerr );  
+  }
  
-  result = gphyper->classify ( *example, scores );
+  _result = gphyper->classify ( *_example, _scores );
 
-  if ( scores.size() == 0 ) {
-    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  if ( this->debug )
+  {  
+    _scores.store ( std::cerr ); 
+    std::cerr << "_result: " << _result << std::endl;
+  }
+
+  if ( _scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << _example->size() );
   }
   
-  if (uncertaintyPredictionForClassification)
+  if ( this->uncertaintyPredictionForClassification )
   {
-    if (varianceApproximation != NONE)
+    if ( this->debug )
+    {
+      std::cerr << "GPHIKClassifier::classify -- uncertaintyPredictionForClassification is true"  << std::endl;
+    }
+    
+    if ( this->varianceApproximation != NONE)
     {
-      this->predictUncertainty( example, uncertainty );
+      this->predictUncertainty( _example, _uncertainty );
     }  
     else
     {
-      //do nothing
-      uncertainty = std::numeric_limits<double>::max();
+//       //do nothing
+      _uncertainty = std::numeric_limits<double>::max();
     }
   }
   else
   {
+    if ( this->debug )
+    {
+      std::cerr << "GPHIKClassifier::classify -- uncertaintyPredictionForClassification is false"  << std::endl;
+    }    
+    
     //do nothing
-    uncertainty = std::numeric_limits<double>::max();
+    _uncertainty = std::numeric_limits<double>::max();
   }    
 }
 
 void GPHIKClassifier::classify ( const NICE::Vector * _example,  
-                                 int & _result, 
+                                 uint & _result, 
                                  SparseVector & _scores, 
                                  double & _uncertainty 
                                ) const
@@ -205,16 +242,27 @@ void GPHIKClassifier::classify ( const NICE::Vector * _example,
   
   _scores.clear();
   
-  _result = gphyper->classify ( *_example, _scores );
+  if ( this->debug )
+  {  
+    std::cerr << "GPHIKClassifier::classify (non-sparse)" << std::endl;
+    std::cerr << *_example << std::endl;
+  }
+    
+  _result = this->gphyper->classify ( *_example, _scores );
+  
+  if ( this->debug )
+  {  
+    std::cerr << "GPHIKClassifier::classify (non-sparse) -- classification done " << std::endl;
+  }
  
 
   if ( _scores.size() == 0 ) {
     fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << _example->size() );
   }
     
-  if (uncertaintyPredictionForClassification)
+  if ( this->uncertaintyPredictionForClassification )
   {
-    if (varianceApproximation != NONE)
+    if ( this->varianceApproximation != NONE)
     {
       this->predictUncertainty( _example, _uncertainty );
     }  
@@ -232,13 +280,15 @@ void GPHIKClassifier::classify ( const NICE::Vector * _example,
 }
 
 /** training process */
-void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels )
+void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                              const NICE::Vector & _labels 
+                            )
 {
   
   //FIXME add check whether the classifier has been trained already. if so, discard all previous results.
     
   // security-check: examples and labels have to be of same size
-  if ( examples.size() != labels.size() ) 
+  if ( _examples.size() != _labels.size() ) 
   {
     fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
   }  
@@ -251,9 +301,9 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
   Timer t;
   t.start();
   
-  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
+  FastMinKernel *fmk = new FastMinKernel ( _examples, noise, this->debug );
 
-  gphyper->setFastMinKernel ( fmk ); 
+  this->gphyper->setFastMinKernel ( fmk ); 
   
   t.stop();
   if (verbose)
@@ -261,25 +311,25 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
  
 
   if (verbose)
-    cerr << "Learning ..." << endl;
+    std::cerr << "Learning ..." << endl;
 
   // go go go
-  gphyper->optimize ( labels );
+  this->gphyper->optimize ( _labels );
   if (verbose)
     std::cerr << "optimization done" << std::endl;
   
-  if ( ( varianceApproximation != NONE ) )
+  if ( ( this->varianceApproximation != NONE ) )
   {    
-    switch (varianceApproximation)    
+    switch ( this->varianceApproximation )    
     {
       case APPROXIMATE_ROUGH:
       {
-        gphyper->prepareVarianceApproximationRough();
+        this->gphyper->prepareVarianceApproximationRough();
         break;
       }
       case APPROXIMATE_FINE:
       {
-        gphyper->prepareVarianceApproximationFine();
+        this->gphyper->prepareVarianceApproximationFine();
         break;
       }    
       case EXACT:
@@ -303,45 +353,49 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
 }
 
 /** training process */
-void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
+void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                              std::map<uint, NICE::Vector> & _binLabels 
+                            )
 { 
   // security-check: examples and labels have to be of same size
-  for ( std::map< int, NICE::Vector >::const_iterator binLabIt = binLabels.begin();
-        binLabIt != binLabels.end();
+  for ( std::map< uint, NICE::Vector >::const_iterator binLabIt = _binLabels.begin();
+        binLabIt != _binLabels.end();
         binLabIt++ 
       )
   {
-    if ( examples.size() != binLabIt->second.size() ) 
+    if ( _examples.size() != binLabIt->second.size() ) 
     {
       fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
     }
   }
   
-  if (verbose)
+  if ( this->verbose )
     std::cerr << "GPHIKClassifier::train" << std::endl;
  
   Timer t;
   t.start();
   
-  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
-  gphyper->setFastMinKernel ( fmk );  
+  FastMinKernel *fmk = new FastMinKernel ( _examples, noise, this->debug );
+  this->gphyper->setFastMinKernel ( fmk );  
   
   t.stop();
-  if (verbose)
+  if ( this->verbose )
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
 
 
 
-  if (verbose)
-    cerr << "Learning ..." << endl;
+  if ( this->verbose )
+    std::cerr << "Learning ..." << std::endl;
+  
   // go go go
-  gphyper->optimize ( binLabels );
-  if (verbose)
+  this->gphyper->optimize ( _binLabels );
+  
+  if ( this->verbose )
     std::cerr << "optimization done, now prepare for the uncertainty prediction" << std::endl;
   
-  if ( ( varianceApproximation != NONE ) )
+  if ( ( this->varianceApproximation != NONE ) )
   {    
-    switch (varianceApproximation)    
+    switch ( this->varianceApproximation )    
     {
       case APPROXIMATE_ROUGH:
       {
@@ -369,7 +423,7 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
   this->b_isTrained = true;
 
   // clean up all examples ??
-  if (verbose)
+  if ( this->verbose )
     std::cerr << "Learning finished" << std::endl;
 }
 
@@ -380,27 +434,30 @@ GPHIKClassifier *GPHIKClassifier::clone () const
   return NULL;
 }
   
-void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const
+void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * _example, 
+                                          double & _uncertainty 
+                                        ) const
 {  
-  if (gphyper == NULL)
+  if ( this->gphyper == NULL )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
   
   //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
-  switch (varianceApproximation)    
+  switch ( this->varianceApproximation )    
   {
     case APPROXIMATE_ROUGH:
     {
-      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceApproximateRough( *_example, _uncertainty );
       break;
     }
     case APPROXIMATE_FINE:
     {
-      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      std::cerr << "gphyper->computePredictiveVarianceApproximateFine" << std::endl;
+      this->gphyper->computePredictiveVarianceApproximateFine( *_example, _uncertainty );
       break;
     }    
     case EXACT:
     {
-      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceExact( *_example, _uncertainty );
       break;
     }
     default:
@@ -410,27 +467,29 @@ void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, do
   }
 }
 
-void GPHIKClassifier::predictUncertainty( const NICE::Vector * example, double & uncertainty ) const
+void GPHIKClassifier::predictUncertainty( const NICE::Vector * _example, 
+                                          double & _uncertainty 
+                                        ) const
 {  
-  if (gphyper == NULL)
+  if ( this->gphyper == NULL )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
   
   //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
-  switch (varianceApproximation)    
+  switch ( this->varianceApproximation )    
   {
     case APPROXIMATE_ROUGH:
     {
-      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceApproximateRough( *_example, _uncertainty );
       break;
     }
     case APPROXIMATE_FINE:
     {
-      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceApproximateFine( *_example, _uncertainty );
       break;
     }    
     case EXACT:
     {
-      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceExact( *_example, _uncertainty );
       break;
     }
     default:
@@ -444,7 +503,9 @@ void GPHIKClassifier::predictUncertainty( const NICE::Vector * example, double &
 // interface specific methods for store and restore
 ///////////////////// INTERFACE PERSISTENT ///////////////////// 
 
-void GPHIKClassifier::restore ( std::istream & is, int format )
+void GPHIKClassifier::restore ( std::istream & _is, 
+                                int _format 
+                              )
 {
   //delete everything we knew so far...
   this->clear();
@@ -454,13 +515,13 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
   b_restoreVerbose = true;
 #endif  
   
-  if ( is.good() )
+  if ( _is.good() )
   {
     if ( b_restoreVerbose ) 
       std::cerr << " restore GPHIKClassifier" << std::endl;
     
     std::string tmp;
-    is >> tmp; //class name 
+    _is >> tmp; //class name 
     
     if ( ! this->isStartTag( tmp, "GPHIKClassifier" ) )
     {
@@ -474,13 +535,13 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
       gphyper = NULL;
     }    
     
-    is.precision (numeric_limits<double>::digits10 + 1);
+    _is.precision (numeric_limits<double>::digits10 + 1);
     
     bool b_endOfBlock ( false ) ;
     
     while ( !b_endOfBlock )
     {
-      is >> tmp; // start of block 
+      _is >> tmp; // start of block 
       
       if ( this->isEndTag( tmp, "GPHIKClassifier" ) )
       {
@@ -495,58 +556,58 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
       
       if ( tmp.compare("confSection") == 0 )
       {
-        is >> confSection;        
-        is >> tmp; // end of block 
+        _is >> confSection;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("gphyper") == 0 )
       {
-        if ( gphyper == NULL )
-          gphyper = new NICE::FMKGPHyperparameterOptimization();
+        if ( this->gphyper == NULL )
+          this->gphyper = new NICE::FMKGPHyperparameterOptimization();
         
         //then, load everything that we stored explicitely,
         // including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
-        gphyper->restore(is, format);  
+        this->gphyper->restore( _is, _format );  
           
-        is >> tmp; // end of block 
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }   
       else if ( tmp.compare("b_isTrained") == 0 )
       {
-        is >> b_isTrained;        
-        is >> tmp; // end of block 
+        _is >> b_isTrained;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("noise") == 0 )
       {
-        is >> noise;        
-        is >> tmp; // end of block 
+        _is >> noise;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }      
       else if ( tmp.compare("verbose") == 0 )
       {
-        is >> verbose;        
-        is >> tmp; // end of block 
+        _is >> verbose;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }      
       else if ( tmp.compare("debug") == 0 )
       {
-        is >> debug;        
-        is >> tmp; // end of block 
+        _is >> debug;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }      
       else if ( tmp.compare("uncertaintyPredictionForClassification") == 0 )
       {
-        is >> uncertaintyPredictionForClassification;        
-        is >> tmp; // end of block 
+        _is >> uncertaintyPredictionForClassification;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("varianceApproximation") == 0 )
       {
         unsigned int ui_varianceApproximation;
-        is >> ui_varianceApproximation;        
+        _is >> ui_varianceApproximation;        
         varianceApproximation = static_cast<VarianceApproximation> ( ui_varianceApproximation );
-        is >> tmp; // end of block 
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else
@@ -563,59 +624,61 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
   }
 }
 
-void GPHIKClassifier::store ( std::ostream & os, int format ) const
+void GPHIKClassifier::store ( std::ostream & _os, 
+                              int _format 
+                            ) const
 { 
-  if (os.good())
+  if ( _os.good() )
   {
     // show starting point
-    os << this->createStartTag( "GPHIKClassifier" ) << std::endl;    
+    _os << this->createStartTag( "GPHIKClassifier" ) << std::endl;    
     
-    os.precision (numeric_limits<double>::digits10 + 1);
+    _os.precision (numeric_limits<double>::digits10 + 1);
     
-    os << this->createStartTag( "confSection" ) << std::endl;
-    os << confSection << std::endl;
-    os << this->createEndTag( "confSection" ) << std::endl; 
+    _os << this->createStartTag( "confSection" ) << std::endl;
+    _os << confSection << std::endl;
+    _os << this->createEndTag( "confSection" ) << std::endl; 
    
-    os << this->createStartTag( "gphyper" ) << std::endl;
+    _os << this->createStartTag( "gphyper" ) << std::endl;
     //store the underlying data
     //will be done in gphyper->store(of,format)
     //store the optimized parameter values and all that stuff
-    gphyper->store(os, format);
-    os << this->createEndTag( "gphyper" ) << std::endl; 
+    this->gphyper->store( _os, _format );
+    _os << this->createEndTag( "gphyper" ) << std::endl; 
     
     
     /////////////////////////////////////////////////////////
     // store variables which we previously set via config    
     /////////////////////////////////////////////////////////
-    os << this->createStartTag( "b_isTrained" ) << std::endl;
-    os << b_isTrained << std::endl;
-    os << this->createEndTag( "b_isTrained" ) << std::endl; 
+    _os << this->createStartTag( "b_isTrained" ) << std::endl;
+    _os << b_isTrained << std::endl;
+    _os << this->createEndTag( "b_isTrained" ) << std::endl; 
     
-    os << this->createStartTag( "noise" ) << std::endl;
-    os << noise << std::endl;
-    os << this->createEndTag( "noise" ) << std::endl;
+    _os << this->createStartTag( "noise" ) << std::endl;
+    _os << noise << std::endl;
+    _os << this->createEndTag( "noise" ) << std::endl;
     
     
-    os << this->createStartTag( "verbose" ) << std::endl;
-    os << verbose << std::endl;
-    os << this->createEndTag( "verbose" ) << std::endl; 
+    _os << this->createStartTag( "verbose" ) << std::endl;
+    _os << verbose << std::endl;
+    _os << this->createEndTag( "verbose" ) << std::endl; 
     
-    os << this->createStartTag( "debug" ) << std::endl;
-    os << debug << std::endl;
-    os << this->createEndTag( "debug" ) << std::endl; 
+    _os << this->createStartTag( "debug" ) << std::endl;
+    _os << debug << std::endl;
+    _os << this->createEndTag( "debug" ) << std::endl; 
     
-    os << this->createStartTag( "uncertaintyPredictionForClassification" ) << std::endl;
-    os << uncertaintyPredictionForClassification << std::endl;
-    os << this->createEndTag( "uncertaintyPredictionForClassification" ) << std::endl;
+    _os << this->createStartTag( "uncertaintyPredictionForClassification" ) << std::endl;
+    _os << uncertaintyPredictionForClassification << std::endl;
+    _os << this->createEndTag( "uncertaintyPredictionForClassification" ) << std::endl;
     
-    os << this->createStartTag( "varianceApproximation" ) << std::endl;
-    os << varianceApproximation << std::endl;
-    os << this->createEndTag( "varianceApproximation" ) << std::endl;     
+    _os << this->createStartTag( "varianceApproximation" ) << std::endl;
+    _os << varianceApproximation << std::endl;
+    _os << this->createEndTag( "varianceApproximation" ) << std::endl;     
   
     
     
     // done
-    os << this->createEndTag( "GPHIKClassifier" ) << std::endl;    
+    _os << this->createEndTag( "GPHIKClassifier" ) << std::endl;    
   }
   else
   {
@@ -625,10 +688,10 @@ void GPHIKClassifier::store ( std::ostream & os, int format ) const
 
 void GPHIKClassifier::clear ()
 {
-  if ( gphyper != NULL )
+  if ( this->gphyper != NULL )
   {
-    delete gphyper;
-    gphyper = NULL;
+    delete this->gphyper;
+    this->gphyper = NULL;
   }
 }
 
@@ -636,10 +699,10 @@ void GPHIKClassifier::clear ()
 // interface specific methods for incremental extensions
 ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
 
-void GPHIKClassifier::addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement
-			   )
+void GPHIKClassifier::addExample( const NICE::SparseVector * _example, 
+                                  const double & _label, 
+                                  const bool & _performOptimizationAfterIncrement
+                                )
 {
   
   if ( ! this->b_isTrained )
@@ -648,25 +711,25 @@ void GPHIKClassifier::addExample( const NICE::SparseVector * example,
     std::cerr << "Classifier not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
      
     std::vector< const NICE::SparseVector *> examplesVec;
-    examplesVec.push_back ( example );
+    examplesVec.push_back ( _example );
     
-    NICE::Vector labelsVec ( 1 , label );
+    NICE::Vector labelsVec ( 1 , _label );
     
     this->train ( examplesVec, labelsVec );
   }
   else
   {
-    this->gphyper->addExample( example, label, performOptimizationAfterIncrement );  
+    this->gphyper->addExample( _example, _label, _performOptimizationAfterIncrement );  
   }
 }
 
-void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement
-				    )
+void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                           const NICE::Vector & _newLabels,
+                                           const bool & _performOptimizationAfterIncrement
+                                         )
 {
   //are new examples available? If not, nothing has to be done
-  if ( newExamples.size() < 1)
+  if ( _newExamples.size() < 1)
     return;
 
   if ( ! this->b_isTrained )
@@ -674,10 +737,10 @@ void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::Sparse
     //call train method instead
     std::cerr << "Classifier not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
     
-    this->train ( newExamples, newLabels );    
+    this->train ( _newExamples, _newLabels );    
   }
   else
   {
-    this->gphyper->addMultipleExamples( newExamples, newLabels, performOptimizationAfterIncrement );     
+    this->gphyper->addMultipleExamples( _newExamples, _newLabels, _performOptimizationAfterIncrement );     
   }
 }

+ 51 - 21
GPHIKClassifier.h

@@ -105,7 +105,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief standard constructor
      * @author Alexander Freytag
      */
-    GPHIKClassifier( const NICE::Config *conf , const std::string & s_confSection = "GPHIKClassifier" );
+    GPHIKClassifier( const NICE::Config *_conf , 
+                     const std::string & s_confSection = "GPHIKClassifier" 
+                   );
       
     /**
      * @brief simple destructor
@@ -119,7 +121,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
     * @param conf Config file to specify variable settings
     * @param s_confSection
     */    
-    void initFromConfig(const NICE::Config *conf, const std::string & s_confSection);    
+    void initFromConfig(const NICE::Config *_conf, 
+                        const std::string & s_confSection
+                       );    
     
     ///////////////////// ///////////////////// /////////////////////
     //                         GET / SET
@@ -129,7 +133,7 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief Return currently known class numbers
      * @author Alexander Freytag
      */    
-    std::set<int> getKnownClassNumbers ( ) const;    
+    std::set<uint> getKnownClassNumbers ( ) const;    
    
     ///////////////////// ///////////////////// /////////////////////
     //                      CLASSIFIER STUFF
@@ -143,18 +147,25 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      */        
-    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores ) const;
+    void classify ( const NICE::SparseVector * _example, 
+                    uint & _result, 
+                    NICE::SparseVector & _scores 
+                  ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
      * @date 19-06-2012 (dd-mm-yyyy)
      * @author Alexander Freytag
      * @param example (SparseVector) to be classified given in a sparse representation
-     * @param result (int) class number of most likely class
+     * @param result (uint) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      * @param uncertainty (double*) predictive variance of the classification result, if computed
      */    
-    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores, double & uncertainty ) const;
+    void classify ( const NICE::SparseVector * _example,  
+                    uint & _result, 
+                    NICE::SparseVector & _scores, 
+                    double & _uncertainty 
+                  ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -165,7 +176,10 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      */        
-    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores ) const;
+    void classify ( const NICE::Vector * _example,  
+                    uint & _result, 
+                    NICE::SparseVector & _scores 
+                  ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -173,11 +187,15 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @date 18-06-2013 (dd-mm-yyyy)
      * @author Alexander Freytag
      * @param example (non-sparse Vector) to be classified given in a non-sparse representation
-     * @param result (int) class number of most likely class
+     * @param result (uint) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      * @param uncertainty (double) predictive variance of the classification result, if computed
      */    
-    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores, double & uncertainty ) const;    
+    void classify ( const NICE::Vector * _example,  
+                    uint & _result, 
+                    NICE::SparseVector & _scores, 
+                    double & _uncertainty 
+                  ) const;    
 
     /**
      * @brief train this classifier using a given set of examples and a given set of binary label vectors 
@@ -186,7 +204,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
      * @param labels (Vector) class labels (multi-class)
      */
-    void train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels );
+    void train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                 const NICE::Vector & _labels 
+               );
     
     /** 
      * @brief train this classifier using a given set of examples and a given set of binary label vectors 
@@ -195,7 +215,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples examples to use given in a sparse data structure
      * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
      */
-    void train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
+    void train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                 std::map<uint, NICE::Vector> & _binLabels 
+               );
     
     /**
      * @brief Clone classifier object
@@ -210,7 +232,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples example for which the classification uncertainty shall be predicted, given in a sparse representation
      * @param uncertainty contains the resulting classification uncertainty
      */       
-    void predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const;
+    void predictUncertainty( const NICE::SparseVector * _example, 
+                             double & _uncertainty 
+                           ) const;
     
     /** 
      * @brief prediction of classification uncertainty
@@ -219,7 +243,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples example for which the classification uncertainty shall be predicted, given in a non-sparse representation
      * @param uncertainty contains the resulting classification uncertainty
      */       
-    void predictUncertainty( const NICE::Vector * example, double & uncertainty ) const;    
+    void predictUncertainty( const NICE::Vector * _example, 
+                             double & _uncertainty 
+                           ) const;    
     
 
 
@@ -231,13 +257,17 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief Load classifier from external file (stream)
      * @author Alexander Freytag
      */     
-    void restore ( std::istream & is, int format = 0 );
+    void restore ( std::istream & _is, 
+                   int _format = 0 
+                 );
     
     /** 
      * @brief Save classifier to external file (stream)
      * @author Alexander Freytag
      */     
-    void store ( std::ostream & os, int format = 0 ) const;
+    void store ( std::ostream & _os, 
+                 int _format = 0 
+               ) const;
     
     /** 
      * @brief Clear classifier object
@@ -254,18 +284,18 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief add a new example
      * @author Alexander Freytag
      */    
-    virtual void addExample( const NICE::SparseVector * example, 
-                              const double & label, 
-                              const bool & performOptimizationAfterIncrement = true
+    virtual void addExample( const NICE::SparseVector * _example, 
+                             const double & _label, 
+                             const bool & _performOptimizationAfterIncrement = true
                             );
                           
     /** 
      * @brief add several new examples
      * @author Alexander Freytag
      */    
-    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-                                      const NICE::Vector & newLabels,
-                                      const bool & performOptimizationAfterIncrement = true
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                      const NICE::Vector & _newLabels,
+                                      const bool & _performOptimizationAfterIncrement = true
                                     );       
 
 

+ 50 - 43
GPLikelihoodApprox.cpp

@@ -34,28 +34,28 @@ using namespace NICE;
 using namespace OPTIMIZATION;
 
 
-GPLikelihoodApprox::GPLikelihoodApprox( const std::map<int, NICE::Vector> & binaryLabels,
-                                        ImplicitKernelMatrix *ikm,
-                                        IterativeLinearSolver *linsolver, 
-                                        EigValues *eig,
-                                        bool verifyApproximation,
+GPLikelihoodApprox::GPLikelihoodApprox( const std::map<uint, NICE::Vector> & _binaryLabels,
+                                        ImplicitKernelMatrix *_ikm,
+                                        IterativeLinearSolver *_linsolver, 
+                                        EigValues *_eig,
+                                        bool _verifyApproximation,
                                         int _nrOfEigenvaluesToConsider
                                       ) 
 
-      : CostFunction( ikm->getNumParameters() )
+      : CostFunction( _ikm->getNumParameters() )
 {
-  this->binaryLabels = binaryLabels;
-  this->ikm = ikm;
-  this->linsolver = linsolver;
-  this->eig = eig;
+  this->binaryLabels = _binaryLabels;
+  this->ikm = _ikm;
+  this->linsolver = _linsolver;
+  this->eig = _eig;
 
-  if ( binaryLabels.size() == 1 )
+  if ( _binaryLabels.size() == 1 )
     this->nrOfClasses = 2;
   else
-    this->nrOfClasses = binaryLabels.size();
+    this->nrOfClasses = _binaryLabels.size();
 
   this->min_nlikelihood = std::numeric_limits<double>::max();
-  this->verifyApproximation = verifyApproximation;
+  this->verifyApproximation = _verifyApproximation;
   
   this->nrOfEigenvaluesToConsider = _nrOfEigenvaluesToConsider;
     
@@ -73,7 +73,7 @@ GPLikelihoodApprox::~GPLikelihoodApprox()
     this->initialAlphaGuess = NULL;
 }
 
-const std::map<int, Vector> & GPLikelihoodApprox::getBestAlphas () const
+const std::map<uint, Vector> & GPLikelihoodApprox::getBestAlphas () const
 {
   if ( this->min_alphas.size() > 0 )
   {
@@ -91,7 +91,12 @@ const std::map<int, Vector> & GPLikelihoodApprox::getBestAlphas () const
   return this->min_alphas;
 }
 
-void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax )
+void GPLikelihoodApprox::calculateLikelihood ( double _mypara, 
+                                               const FeatureMatrix & _f, 
+                                               const std::map< uint, NICE::Vector > & _yset, 
+                                               double _noise, 
+                                               double lambdaMax 
+                                             )
 {
   // robust cholesky routine without noise !!
   CholeskyRobust cr ( true /*verbose*/, 0.0, false /*useCuda*/ );
@@ -102,8 +107,8 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   Matrix K;
   IntersectionKernelFunction<double> hik;
   //old version, not needed anymore - we explore sparsity
-//   K = hik.computeKernelMatrix(data_matrix, noise); // = K + sigma^2 I
-  K = hik.computeKernelMatrix(f, noise);
+//   K = hik.computeKernelMatrix(data_matrix, _noise); // = K + sigma^2 I
+  K = hik.computeKernelMatrix(_f, _noise);
   t.stop();
   cerr << "VERIFY: Time used for calculating kernel matrix is: " << t.getLast() << endl;
 
@@ -116,11 +121,11 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   t.start();
   Matrix choleskyMatrix; 
   cr.robustChol ( K, choleskyMatrix ); // K = choleskyMatrix^T * choleskyMatrix
-  double gt_logdet = (yset.size()) * cr.getLastLogDet();
+  double gt_logdet = (_yset.size()) * cr.getLastLogDet();
   cerr << "chol * chol^T: " << ( choleskyMatrix * choleskyMatrix.transpose() )(0,0,4,4) << endl;
 
   double gt_dataterm = 0.0;
-  for ( std::map< int, NICE::Vector >::const_iterator i = yset.begin(); i != yset.end(); i++ )
+  for ( std::map< uint, NICE::Vector >::const_iterator i = _yset.begin(); i != _yset.end(); i++ )
   {
     const NICE::Vector & y = i->second;
     Vector gt_alpha;
@@ -137,10 +142,12 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   
   
   double gt_nlikelihood = gt_logdet + gt_dataterm;
-  cerr << "OPTGT: " << mypara << " " << gt_nlikelihood << " " << gt_logdet << " " << gt_dataterm << endl;
+  cerr << "OPTGT: " << _mypara << " " << gt_nlikelihood << " " << gt_logdet << " " << gt_dataterm << endl;
 }
 
-void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x, const NICE::Vector & eigenValues )
+void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & _x, 
+                                            const NICE::Vector & _eigenValues 
+                                           )
 {
   Timer t;
   
@@ -155,15 +162,15 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
   
 
   // all alpha vectors will be stored!
-  std::map<int, NICE::Vector> alphas;
+  std::map<uint, NICE::Vector> alphas;
 
   // This has to be done m times for the multi-class case
   if ( this->verbose )
     std::cerr << "run ILS for every bin label. binaryLabels.size(): " << binaryLabels.size() << std::endl;
-  for ( std::map<int, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
+  for ( std::map<uint, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
   {
     // (b) y^T (K+sI)^{-1} y
-    int classCnt = j->first;
+    uint classCnt = j->first;
     if ( this->verbose )
     {
       std::cerr << "Solving linear equation system for class " << classCnt << " ..." << std::endl;
@@ -185,7 +192,7 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
      */
     NICE::Vector alpha;
     
-    alpha = (binaryLabels[classCnt] * (1.0 / eigenValues[0]) );
+    alpha = (binaryLabels[classCnt] * (1.0 / _eigenValues[0]) );
     
     if ( verbose )
       std::cerr << "Using the standard solver ..." << std::endl;
@@ -194,7 +201,7 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
     linsolver->solveLin ( *ikm, binaryLabels[classCnt], alpha );
     t.stop();
    
-    alphas.insert( std::pair<int, NICE::Vector> ( classCnt, alpha) );
+    alphas.insert( std::pair<uint, NICE::Vector> ( classCnt, alpha) );
   }  
   
   // save the parameter value and alpha vectors
@@ -202,13 +209,13 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
   this->min_alphas = alphas;
 }
 
-double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
+double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & _x)
 {
   NICE::Vector xv;
    
-  xv.resize ( x.rows() );
-  for ( uint i = 0 ; i < x.rows(); i++ )
-    xv[i] = x(i,0);
+  xv.resize ( _x.rows() );
+  for ( uint i = 0 ; i < _x.rows(); i++ )
+    xv[i] = _x(i,0);
 
   // check whether we have been here before
   unsigned long hashValue = xv.getHashValue();
@@ -275,16 +282,16 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   
 
   // all alpha vectors will be stored!
-  std::map<int, NICE::Vector> alphas;
+  std::map<uint, NICE::Vector> alphas;
 
   // This has to be done m times for the multi-class case
   if ( this->verbose )
     std::cerr << "run ILS for every bin label. binaryLabels.size(): " << binaryLabels.size() << std::endl;
   
-  for ( std::map<int, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
+  for ( std::map<uint, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
   {
     // (b) y^T (K+sI)^{-1} y
-    int classCnt = j->first;
+    uint classCnt = j->first;
     if ( this->verbose )
     {
       std::cerr << "Solving linear equation system for class " << classCnt << " ..." << std::endl;
@@ -308,7 +315,7 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
     NICE::Vector alpha;
     if ( this->initialAlphaGuess != NULL )
     {
-      std::map<int, NICE::Vector>::iterator myIt = this->initialAlphaGuess->find(classCnt);
+      std::map<uint, NICE::Vector>::iterator myIt = this->initialAlphaGuess->find(classCnt);
       if ( myIt != this->initialAlphaGuess->end() )
         alpha = myIt->second;
       else
@@ -375,8 +382,8 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
     cerr << "Time used for approximating logdet(K): " << t.getLast() << endl;
 
   // (c) adding the two terms
-  double nlikelihood = nrOfClasses*logdet;
-  double dataterm = binaryDataterms.sum();
+  double nlikelihood = this->nrOfClasses*logdet;
+  double dataterm    = binaryDataterms.sum();
   nlikelihood += dataterm;
 
   if ( this->verbose )
@@ -386,32 +393,32 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   {
     min_nlikelihood = nlikelihood;
     ikm->getParameters ( min_parameter );
-    min_alphas = alphas;
+    this->min_alphas = alphas;
   }
 
-  alreadyVisited.insert ( pair<int, double> ( hashValue, nlikelihood ) );
+  this->alreadyVisited.insert ( std::pair<unsigned long, double> ( hashValue, nlikelihood ) );
   return nlikelihood;
 }
 
 void GPLikelihoodApprox::setParameterLowerBound(const double & _parameterLowerBound)
 {
-  parameterLowerBound = _parameterLowerBound;
+  this->parameterLowerBound = _parameterLowerBound;
 }
   
 void GPLikelihoodApprox::setParameterUpperBound(const double & _parameterUpperBound)
 {
-  parameterUpperBound = _parameterUpperBound;
+  this->parameterUpperBound = _parameterUpperBound;
 }
 
-void GPLikelihoodApprox::setInitialAlphaGuess(std::map< int, NICE::Vector >* _initialAlphaGuess)
+void GPLikelihoodApprox::setInitialAlphaGuess(std::map< uint, NICE::Vector >* _initialAlphaGuess)
 {
   this->initialAlphaGuess = _initialAlphaGuess;
 }
 
 
-void GPLikelihoodApprox::setBinaryLabels(const std::map<int, Vector> & _binaryLabels)
+void GPLikelihoodApprox::setBinaryLabels(const std::map<uint, Vector> & _binaryLabels)
 {
-  binaryLabels = _binaryLabels;
+  this->binaryLabels = _binaryLabels;
 }
 
 void GPLikelihoodApprox::setVerbose( const bool & _verbose )

+ 20 - 14
GPLikelihoodApprox.h

@@ -48,22 +48,26 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     ImplicitKernelMatrix *ikm;
 
     /** set of binary label vectors */
-    std::map<int, Vector> binaryLabels;
+    std::map<uint, Vector> binaryLabels;
    
     /** number of classes */
-    int nrOfClasses;
+    uint nrOfClasses;
     
     /** To define how fine the approximation of the squared frobenius norm will be*/
     int nrOfEigenvaluesToConsider;
     
     //! only for debugging purposes, printing some statistics
-    void calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax );
+    void calculateLikelihood ( double _mypara, 
+                               const FeatureMatrix & _f, 
+                               const std::map< uint, NICE::Vector > & _yset, 
+                               double _noise, 
+                               double _lambdaMax );
 
     //! last alpha vectors computed (from previous IL-step)
-    std::map<int, NICE::Vector> * initialAlphaGuess;
+    std::map<uint, NICE::Vector> * initialAlphaGuess;
     
     //! alpha vectors of the best solution
-    std::map<int, Vector> min_alphas;
+    std::map<uint, Vector> min_alphas;
 
     //! minimal value of the likelihood
     double min_nlikelihood;
@@ -92,11 +96,11 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
 
     // ------ constructors and destructors ------
     /** simple constructor */
-    GPLikelihoodApprox( const std::map<int, Vector> & binaryLabels, 
-                        ImplicitKernelMatrix *ikm,
-                        IterativeLinearSolver *linsolver,
-                        EigValues *eig,
-                        bool verifyApproximation = false,
+    GPLikelihoodApprox( const std::map<uint, Vector> & _binaryLabels, 
+                        ImplicitKernelMatrix *_ikm,
+                        IterativeLinearSolver *_linsolver,
+                        EigValues *_eig,
+                        bool _verifyApproximation = false,
                         int _nrOfEigenvaluesToConsider = 1
                       );
       
@@ -112,7 +116,9 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     *
     * @return void
     */    
-    void computeAlphaDirect(const OPTIMIZATION::matrix_type & x, const NICE::Vector & eigenValues);
+    void computeAlphaDirect(const OPTIMIZATION::matrix_type & _x, 
+                            const NICE::Vector & _eigenValues
+                           );
     
     /**
     * @brief Evaluate the likelihood for given hyperparameters
@@ -126,13 +132,13 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     
     // ------ get and set methods ------
     const NICE::Vector & getBestParameters () const { return min_parameter; };
-    const std::map<int, Vector> & getBestAlphas () const;
+    const std::map<uint, Vector> & getBestAlphas () const;
     
     void setParameterLowerBound(const double & _parameterLowerBound);
     void setParameterUpperBound(const double & _parameterUpperBound);
     
-    void setInitialAlphaGuess(std::map<int, NICE::Vector> * _initialAlphaGuess);
-    void setBinaryLabels(const std::map<int, Vector> & _binaryLabels);
+    void setInitialAlphaGuess(std::map<uint, NICE::Vector> * _initialAlphaGuess);
+    void setBinaryLabels(const std::map<uint, Vector> & _binaryLabels);
     
     void setVerbose( const bool & _verbose );
     void setDebug( const bool & _debug );

+ 1 - 1
matlab/ConverterMatlabToNICE.cpp

@@ -100,7 +100,7 @@ NICE::SparseVector MatlabConversion::convertSparseVectorToNice(
   
 
   NICE::SparseVector svec( std::max(dimx, dimy) );
-   
+  
   
   if ( dimx > 1)
   {

+ 1 - 1
matlab/ConverterNICEToMatlab.cpp

@@ -14,7 +14,7 @@ mxArray* MatlabConversion::convertSparseVectorFromNice( const NICE::SparseVector
        matlabSparseVec = mxCreateSparse( niceSvec.getDim() -1 /*m*/, 1/*n*/, niceSvec.size() -1 /*nzmax*/, mxREAL);
     else
       matlabSparseVec = mxCreateSparse( niceSvec.getDim() /*m*/, 1/*n*/, niceSvec.size() /*nzmax*/, mxREAL);
-
+    
     
     // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
     // mxCreateSparse allocates space for:

+ 15 - 15
matlab/GPHIKClassifierMex.cpp

@@ -56,6 +56,7 @@ NICE::Config parseParametersGPHIKClassifier(const mxArray *prhs[], int nrhs)
     /////////////////////////////////////////
     if( (variable == "verboseTime") || 
         (variable == "verbose") ||
+        (variable == "debug") ||            
         (variable == "optimize_noise") || 
         (variable == "uncertaintyPredictionForClassification") ||
         (variable == "use_quantization") || 
@@ -349,7 +350,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
         
         //------------- read the data --------------
 
-        int result;
+        uint result;
         NICE::SparseVector scores;
         double uncertainty;        
 
@@ -376,8 +377,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
 
           // output
           plhs[0] = mxCreateDoubleScalar( result ); 
-          
-          
+                    
           if(nlhs >= 2)
           {
             plhs[1] = MatlabConversion::convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
@@ -462,17 +462,17 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
         
         // determine classes known during training and corresponding mapping
         // thereby allow for non-continous class labels
-        std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+        std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
         
-        int noClassesKnownTraining ( classesKnownTraining.size() );
-        std::map<int,int> mapClNoToIdxTrain;
-        std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-        for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-            mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+        uint noClassesKnownTraining ( classesKnownTraining.size() );
+        std::map< uint, uint > mapClNoToIdxTrain;
+        std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+        for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+            mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
         
         // determine classes known during testing and corresponding mapping
         // thereby allow for non-continous class labels
-        std::set<int> classesKnownTest;
+        std::set< uint > classesKnownTest;
         classesKnownTest.clear();
         
 
@@ -487,10 +487,10 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
         }          
         
         int noClassesKnownTest ( classesKnownTest.size() );  
-        std::map<int,int> mapClNoToIdxTest;
-        std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-        for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-            mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );          
+        std::map< uint, uint> mapClNoToIdxTest;
+        std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+        for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+            mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  );          
         
 
 
@@ -520,7 +520,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
             //----------------- convert data to sparse data structures ---------
           
 
-            int result;
+            uint result;
             NICE::SparseVector exampleScoresSparse;
 
             if ( dataIsSparse )

+ 8 - 3
matlab/plot1dExampleClassification.m

@@ -15,6 +15,8 @@ b_verboseTime                       = false;
 
 %interested in outputs?
 b_verbose                           = false;  
+b_debug                             = false;  
+
 
 % important for plotting!
 b_uncertaintyPredictionForClassification ...
@@ -26,7 +28,7 @@ b_ils_verbose                       = false;
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%
 %% integer
 i_nrOfEigenvaluesToConsiderForVarApprox ...
-                                    = 0;
+                                    = 1;
 i_num_bins                          = 100; % default
 i_ils_max_iterations                = 1000; % default
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -59,6 +61,7 @@ myGPHIKClassifier = ...
         GPHIKClassifier ( ...
                           'verboseTime',                               b_verboseTime, ...
                           'verbose',                                   b_verbose, ...
+                          'debug',                                     b_debug, ...                          
                           'uncertaintyPredictionForClassification',    b_uncertaintyPredictionForClassification, ...
                           'optimize_noise',                            b_optimize_noise, ...
                           'use_quantization',                          b_use_quantization, ...
@@ -96,8 +99,10 @@ uncertainties = zeros(size(myDataTest,1),1);
 for i=1:size(myDataTest,1)
     example = myDataTest(i,:);
 
-    [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( example );
-    %[ classNoEst, score] = myGPHIKClassifier.classify( example );
+%     [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( sparse(example) );
+    [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( example );    
+%      [ classNoEst, score] = myGPHIKClassifier.classify( example );
+%     [ classNoEst, score] = myGPHIKClassifier.classify( sparse(  example ) );    
     scores(i) = score(1);
 end
 

+ 2 - 2
matlab/testMatlabConversionFunctionsMex.cpp

@@ -117,8 +117,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
 
         NICE::SparseVector t_vecTest = MatlabConversion::convertSparseVectorToNice( t_pData );
 	
-	NICE::Vector t_fullVector;
-	t_vecTest.convertToVectorT( t_fullVector );
+        NICE::Vector t_fullVector;
+        t_vecTest.convertToVectorT( t_fullVector );
         std::cerr << "convertDoubleSparseVector: full version:" << t_fullVector << std::endl;
 
         // output

+ 28 - 24
tests/TestGPHIKOnlineLearnable.cpp

@@ -56,20 +56,24 @@ void readData ( const std::string filename, NICE::Matrix & data, NICE::Vector &
   }    
 }
 
-void prepareLabelMappings (std::map<int,int> & mapClNoToIdxTrain, const GPHIKClassifier * classifier, std::map<int,int> & mapClNoToIdxTest, const NICE::Vector & yMultiTest)
+void prepareLabelMappings (std::map< uint, uint > & mapClNoToIdxTrain, 
+                           const GPHIKClassifier * classifier, 
+                           std::map< uint,uint > & mapClNoToIdxTest, 
+                           const NICE::Vector & yMultiTest
+                          )
 {
   // determine classes known during training and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
   
-  int noClassesKnownTraining ( classesKnownTraining.size() );
-  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  uint noClassesKnownTraining ( classesKnownTraining.size() );
+  std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
   
   // determine classes known during testing and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTest;
+  std::set< uint> classesKnownTest;
   classesKnownTest.clear();
   
 
@@ -83,18 +87,18 @@ void prepareLabelMappings (std::map<int,int> & mapClNoToIdxTrain, const GPHIKCla
     }
   }          
   
-  int noClassesKnownTest ( classesKnownTest.size() );  
-  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );   
+  uint noClassesKnownTest ( classesKnownTest.size() );  
+  std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+  for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair< uint,uint > ( *clTestIt, i )  );   
 }
 
 void evaluateClassifier ( NICE::Matrix & confusionMatrix, 
                           const NICE::GPHIKClassifier * classifier, 
                           const NICE::Matrix & data,
                           const NICE::Vector & yMulti,
-                          const std::map<int,int> & mapClNoToIdxTrain,
-                          const std::map<int,int> & mapClNoToIdxTest
+                          const std::map< uint,uint > & mapClNoToIdxTrain,
+                          const std::map< uint,uint > & mapClNoToIdxTest
                         ) 
 {
   int i_loopEnd  ( (int)data.rows() );  
@@ -103,7 +107,7 @@ void evaluateClassifier ( NICE::Matrix & confusionMatrix,
   {
     NICE::Vector example ( data.getRow(i) );
     NICE::SparseVector scores;
-    int result;    
+    uint result;    
     
     // classify with incrementally trained classifier 
     classifier->classify( &example, result, scores );
@@ -124,14 +128,14 @@ void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
     NICE::Vector example ( data.getRow(i) );
     
     NICE::SparseVector scores;
-    int result;    
+    uint result;    
     
     // classify with incrementally trained classifier 
     classifier->classify( &example, result, scores );
 
     
     NICE::SparseVector scoresScratch;
-    int resultScratch;
+    uint resultScratch;
     classifierScratch->classify( &example, resultScratch, scoresScratch );
     
     
@@ -215,8 +219,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   
@@ -352,8 +356,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   
@@ -492,8 +496,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningBinarytoMultiClass()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   
@@ -651,8 +655,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningMultiClass()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   

+ 15 - 15
tests/TestGPHIKPersistent.cpp

@@ -152,17 +152,17 @@ void TestGPHIKPersistent::testPersistentMethods()
   
   // determine classes known during training and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
   
-  int noClassesKnownTraining ( classesKnownTraining.size() );
-  std::map<int,int> mapClNoToIdxTrain;
-  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  uint noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map< uint, uint > mapClNoToIdxTrain;
+  std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
   
   // determine classes known during testing and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTest;
+  std::set< uint > classesKnownTest;
   classesKnownTest.clear();
   
 
@@ -176,23 +176,23 @@ void TestGPHIKPersistent::testPersistentMethods()
     }
   }          
   
-  int noClassesKnownTest ( classesKnownTest.size() );  
-  std::map<int,int> mapClNoToIdxTest;
-  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+  uint noClassesKnownTest ( classesKnownTest.size() );  
+  std::map< uint, uint > mapClNoToIdxTest;
+  std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+  for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  ); 
           
   
   if ( verbose )
   {
     std::cout << "Train data mapping: " << std::endl;
-    for ( std::map<int,int>::const_iterator clTrainIt = mapClNoToIdxTrain.begin(); clTrainIt != mapClNoToIdxTrain.end(); clTrainIt++ )
+    for ( std::map< uint, uint >::const_iterator clTrainIt = mapClNoToIdxTrain.begin(); clTrainIt != mapClNoToIdxTrain.end(); clTrainIt++ )
     {
       std::cout << " " << clTrainIt->first << " " << clTrainIt->second << std::endl;
     }
 
     std::cout << "Test data mapping: " << std::endl;
-    for ( std::map<int,int>::const_iterator clTestIt = mapClNoToIdxTest.begin(); clTestIt != mapClNoToIdxTest.end(); clTestIt++ )
+    for ( std::map< uint, uint >::const_iterator clTestIt = mapClNoToIdxTest.begin(); clTestIt != mapClNoToIdxTest.end(); clTestIt++ )
     {
       std::cout << " " << clTestIt->first << " " << clTestIt->second << std::endl;
     }    
@@ -208,7 +208,7 @@ void TestGPHIKPersistent::testPersistentMethods()
   {
     NICE::Vector example ( dataTest.getRow(i) );
     NICE::SparseVector scores;
-    int result;
+    uint result;
     
     // classify with trained classifier 
     classifier->classify( &example, result, scores );

Some files were not shown because too many files changed in this diff