Переглянути джерело

stable support of new functionality, uint for indexing, clean-up, etc.

Alexander Freytag 9 роки тому
батько
коміт
3ddc557830

Різницю між файлами не показано, бо вона завелика
+ 234 - 198
FMKGPHyperparameterOptimization.cpp


+ 92 - 46
FMKGPHyperparameterOptimization.h

@@ -56,11 +56,11 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     ///////////////////////////////////
     
     /** verbose flag */
-    bool verbose;    
+    bool b_verbose;    
     /** verbose flag for time measurement outputs */
-    bool verboseTime;        
+    bool b_verboseTime;        
     /** debug flag for several outputs useful for debugging*/
-    bool debug;    
+    bool b_debug;    
     
     //////////////////////////////////////
     // classification related variables //
@@ -77,10 +77,10 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     
     
     /** upper bound for hyper parameters (ParameterizedFunction) to optimize */
-    double parameterUpperBound;
+    double d_parameterUpperBound;
     
     /** lower bound for hyper parameters (ParameterizedFunction) to optimize */
-    double parameterLowerBound;
+    double d_parameterLowerBound;
     
     /** the parameterized function we use within the minimum kernel */
     NICE::ParameterizedFunction *pf;
@@ -92,23 +92,23 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     typedef VVector PrecomputedType;
 
     /** precomputed arrays A (1 per class) needed for classification without quantization  */
-    std::map< int, PrecomputedType > precomputedA;    
+    std::map< uint, PrecomputedType > precomputedA;    
     /** precomputed arrays B (1 per class) needed for classification without quantization  */
-    std::map< int, PrecomputedType > precomputedB;
+    std::map< uint, PrecomputedType > precomputedB;
     
     /** precomputed LUTs (1 per class) needed for classification with quantization  */
-    std::map< int, double * > precomputedT;  
+    std::map< uint, double * > precomputedT;  
     
     //! storing the labels is needed for Incremental Learning (re-optimization)
     NICE::Vector labels; 
     
     //! store the class number of the positive class (i.e., larger class no), only used in binary settings
-    int binaryLabelPositive;
+    int i_binaryLabelPositive;
     //! store the class number of the negative class (i.e., smaller class no), only used in binary settings
-    int binaryLabelNegative;
+    int i_binaryLabelNegative;
     
     //! contains all class numbers of the currently known classes
-    std::set<int> knownClasses;
+    std::set<uint> knownClasses;
     
     //! container for multiple kernel matrices (e.g., a data-containing kernel matrix (GMHIKernel) and a noise matrix (IKMNoise) )
     NICE::IKMLinearCombination * ikmsum;    
@@ -197,7 +197,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     bool b_usePreviousAlphas;
     
     //! store alpha vectors for good initializations in the IL setting, if activated
-    std::map<int, NICE::Vector> previousAlphas;     
+    std::map<uint, NICE::Vector> previousAlphas;     
 
     
     /////////////////////////
@@ -211,37 +211,47 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @brief calculate binary label vectors using a multi-class label vector
     * @author Alexander Freytag
     */    
-    int prepareBinaryLabels ( std::map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses);     
+    uint prepareBinaryLabels ( std::map<uint, NICE::Vector> & _binaryLabels, 
+                              const NICE::Vector & _y , 
+                              std::set<uint> & _myClasses
+                            );     
     
     /**
     * @brief prepare the GPLike object for given binary labels and already given ikmsum-object
     * @author Alexander Freytag
     */
-    inline void setupGPLikelihoodApprox( GPLikelihoodApprox * & gplike, const std::map<int, NICE::Vector> & binaryLabels, uint & parameterVectorSize);    
+    inline void setupGPLikelihoodApprox( GPLikelihoodApprox * & _gplike, 
+                                         const std::map<uint, NICE::Vector> & _binaryLabels,
+                                         uint & _parameterVectorSize
+                                       );    
     
     /**
     * @brief update eigenvectors and eigenvalues for given ikmsum-objects and a method to compute eigenvalues
     * @author Alexander Freytag
     */
-    inline void updateEigenDecomposition( const int & i_noEigenValues );
+    inline void updateEigenDecomposition( const int & _noEigenValues );
     
     /**
     * @brief core of the optimize-functions
     * @author Alexander Freytag
     */
-    inline void performOptimization( GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
+    inline void performOptimization( GPLikelihoodApprox & gplike, 
+                                     const uint & parameterVectorSize
+                                   );
     
     /**
     * @brief apply the optimized transformation values to the underlying features
     * @author Alexander Freytag
     */    
-    inline void transformFeaturesWithOptimalParameters(const GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
+    inline void transformFeaturesWithOptimalParameters(const GPLikelihoodApprox & _gplike, 
+                                                       const uint & _parameterVectorSize
+                                                      );
     
     /**
     * @brief build the resulting matrices A and B as well as lookup tables T for fast evaluations using the optimized parameter settings
     * @author Alexander Freytag
     */
-    inline void computeMatricesAndLUTs( const GPLikelihoodApprox & gplike);
+    inline void computeMatricesAndLUTs( const GPLikelihoodApprox & _gplike);
     
      
 
@@ -250,8 +260,8 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @author Alexander Freytag
     */           
     void updateAfterIncrement (
-      const std::set<int> newClasses,
-      const bool & performOptimizationAfterIncrement = false
+      const std::set<uint> _newClasses,
+      const bool & _performOptimizationAfterIncrement = false
     );    
   
 
@@ -269,7 +279,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @author Alexander Freytag
     * @param b_performRegression
     */
-    FMKGPHyperparameterOptimization( const bool & b_performRegression );
+    FMKGPHyperparameterOptimization( const bool & _performRegression );
 
     /**
     * @brief recommended constructor, only calls this->initialize with same input arguments
@@ -278,7 +288,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param confSection
     *
     */
-    FMKGPHyperparameterOptimization( const Config *conf, const std::string & confSection = "FMKGPHyperparameterOptimization" );
+    FMKGPHyperparameterOptimization( const Config *_conf, 
+                                     const std::string & _confSection = "FMKGPHyperparameterOptimization" 
+                                   );
     
     
     /**
@@ -289,7 +301,10 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param fmk pointer to a pre-initialized structure (will be deleted)
     * @param confSection
     */
-    FMKGPHyperparameterOptimization( const Config *conf, FastMinKernel *_fmk, const std::string & confSection = "FMKGPHyperparameterOptimization" );
+    FMKGPHyperparameterOptimization( const Config *_conf, 
+                                     FastMinKernel *_fmk, 
+                                     const std::string & _confSection = "FMKGPHyperparameterOptimization" 
+                                   );
       
     /**
     * @brief standard destructor
@@ -301,7 +316,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @brief Set variables and parameters to default or config-specified values
     * @author Alexander Freytag
     */       
-    void initFromConfig( const Config *conf, const std::string & confSection = "FMKGPHyperparameterOptimization" );
+    void initFromConfig( const Config *_conf, 
+                         const std::string & _confSection = "FMKGPHyperparameterOptimization" 
+                       );
     
     
     ///////////////////// ///////////////////// /////////////////////
@@ -323,14 +340,14 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @brief Get the currently known class numbers
     * @author Alexander Freytag
     */    
-    std::set<int> getKnownClassNumbers ( ) const;
+    std::set<uint> getKnownClassNumbers ( ) const;
     
     /**
      * @brief Change between classification and regression, only allowed if not trained. Otherwise, exceptions will be thrown...
      * @author Alexander Freytag
      * @date 05-02-2014 (dd-mm-yyyy)
      */
-    void setPerformRegression ( const bool & b_performRegression );
+    void setPerformRegression ( const bool & _performRegression );
     
     /**
      * @brief Set the FastMinKernel object. Only allowed if not trained. Otherwise, exceptions will be thrown...
@@ -344,7 +361,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @author Alexander Freytag
      * @date 06-02-2014 (dd-mm-yyyy)
      */        
-    void setNrOfEigenvaluesToConsiderForVarApprox ( const int & i_nrOfEigenvaluesToConsiderForVarApprox );
+    void setNrOfEigenvaluesToConsiderForVarApprox ( const int & _nrOfEigenvaluesToConsiderForVarApprox );
     
     ///////////////////// ///////////////////// /////////////////////
     //                      CLASSIFIER STUFF
@@ -361,7 +378,12 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param positives set of positive examples (indices)
     * @param negatives set of negative examples (indices)
     */
-    void optimizeBinary ( const sparse_t & data, const NICE::Vector & y, const std::set<int> & positives, const std::set<int> & negatives, double noise );
+    void optimizeBinary ( const sparse_t & data, 
+                          const NICE::Vector & y, 
+                          const std::set<uint> & positives, 
+                          const std::set<uint> & negatives, 
+                          double noise 
+                        );
 
     /**
     * @brief Perform hyperparameter optimization for GP multi-class or binary problems
@@ -371,7 +393,11 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param y label vector with multi-class labels
     * @param examples mapping of example index to new index
     */
-    void optimize ( const sparse_t & data, const NICE::Vector & y, const std::map<int, int> & examples, double noise );
+    void optimize ( const sparse_t & data, 
+                    const NICE::Vector & y, 
+                    const std::map<uint, uint> & examples, 
+                    double noise 
+                  );
 #endif
 
     /**
@@ -387,7 +413,7 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @param binLabels vector of binary label vectors (1,-1) and corresponding class no.
     */
-    void optimize ( std::map<int, NICE::Vector> & binaryLabels );  
+    void optimize ( std::map<uint, NICE::Vector> & _binaryLabels );  
    
     /**
     * @brief Compute the necessary variables for appxorimations of predictive variance (LUTs), assuming an already initialized fmk object
@@ -411,7 +437,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @return class number achieving the best score
     */
-    int classify ( const NICE::SparseVector & x, SparseVector & scores ) const;
+    uint classify ( const NICE::SparseVector & _x, 
+                   SparseVector & _scores 
+                 ) const;
     
     /**
     * @brief classify an example that is given as non-sparse vector
@@ -425,7 +453,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     *
     * @return class number achieving the best score
     */
-    int classify ( const NICE::Vector & x, SparseVector & scores ) const;    
+    uint classify ( const NICE::Vector & _x, 
+                    SparseVector & _scores 
+                  ) const;    
 
     //////////////////////////////////////////
     // variance computation: sparse inputs
@@ -439,7 +469,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & x, double & predVariance ) const;
+    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & _x, 
+                                                   double & _predVariance 
+                                                  ) const;
     
     /**
     * @brief compute predictive variance for a given test example using a fine approximation  (k eigenvalues and eigenvectors to approximate the quadratic term)
@@ -449,7 +481,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & x, double & predVariance ) const; 
+    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & _x, 
+                                                  double & _predVariance 
+                                                 ) const; 
     
     /**
     * @brief compute exact predictive variance for a given test example using ILS methods (exact, but more time consuming than approx versions)
@@ -459,7 +493,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceExact(const NICE::SparseVector & x, double & predVariance ) const; 
+    void computePredictiveVarianceExact(const NICE::SparseVector & _x, 
+                                        double & _predVariance 
+                                       ) const; 
     
     
     //////////////////////////////////////////
@@ -474,7 +510,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateRough(const NICE::Vector & x, double & predVariance ) const;    
+    void computePredictiveVarianceApproximateRough(const NICE::Vector & _x, 
+                                                   double & _predVariance 
+                                                  ) const;    
 
    
     
@@ -486,7 +524,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateFine(const NICE::Vector & x, double & predVariance ) const;      
+    void computePredictiveVarianceApproximateFine(const NICE::Vector & _x, 
+                                                  double & _predVariance 
+                                                 ) const;      
     
 
     
@@ -498,7 +538,9 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceExact(const NICE::Vector & x, double & predVariance ) const;  
+    void computePredictiveVarianceExact(const NICE::Vector & _x, 
+                                        double & _predVariance 
+                                       ) const;  
     
     
     
@@ -512,13 +554,17 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @brief Load current object from external file (stream)
      * @author Alexander Freytag
      */     
-    void restore ( std::istream & is, int format = 0 );
+    void restore ( std::istream & _is, 
+                   int _format = 0 
+                 );
     
     /** 
      * @brief Save current object to external file (stream)
      * @author Alexander Freytag
      */      
-    void store ( std::ostream & os, int format = 0 ) const;
+    void store ( std::ostream & _os,
+                 int _format = 0 
+               ) const;
     
     /** 
      * @brief Clear current object
@@ -534,18 +580,18 @@ class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::On
      * @brief add a new example
      * @author Alexander Freytag
      */       
-    virtual void addExample( const NICE::SparseVector * example, 
-                             const double & label, 
-                             const bool & performOptimizationAfterIncrement = true
+    virtual void addExample( const NICE::SparseVector * _example, 
+                             const double & _label, 
+                             const bool & _performOptimizationAfterIncrement = true
                            );
 
     /** 
      * @brief add several new examples
      * @author Alexander Freytag
      */    
-    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-                                      const NICE::Vector & newLabels,
-                                      const bool & performOptimizationAfterIncrement = true
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                      const NICE::Vector & _newLabels,
+                                      const bool & _performOptimizationAfterIncrement = true
                                     );         
 };
 

Різницю між файлами не показано, бо вона завелика
+ 329 - 225
FastMinKernel.cpp


+ 127 - 53
FastMinKernel.h

@@ -42,46 +42,56 @@ namespace NICE {
 
     protected:
       /** number of examples */
-      int n;
+      uint ui_n;
 
       /** dimension of feature vectors */
-      int d; 
+      uint ui_d; 
 
       /** noise added to the diagonal of the kernel matrix */
-      double noise;
+      double d_noise;
       
       /** sorted matrix of features (sorted along each dimension) */
       NICE::FeatureMatrixT<double> X_sorted;
       
       //! verbose flag for output after calling the restore-function
-      bool verbose;
+      bool b_verbose;
       //! debug flag for output during debugging
-      bool debug;      
+      bool b_debug;      
 
       /** 
       * @brief Set number of examples
       * @author Alexander Freytag
       * @date 07-12-2011 (dd-mm-yyyy)
       */
-      void set_n(const int & _n){n = _n;};
+      void set_n(const uint & _n){this->ui_n = _n;};
       
       /** 
       * @brief Set number of dimensions
       * @author Alexander Freytag
       * @date 07-12-2011 (dd-mm-yyyy)
       */
-      void set_d(const int & _d){d = _d;};     
+      void set_d(const uint & _d){this->ui_d = _d;};     
 
       /** 
       * @brief Prepare the efficient HIK-computations part 1: order the features in each dimension and save the permutation. Pay attention: X is of dim n x d, where as X_sorted is of dimensionality d x n!
       * @author Alexander Freytag
       * @date 07-12-2011 (dd-mm-yyyy)
       */
-      void hik_prepare_kernel_multiplications(const std::vector<std::vector<double> > & X, NICE::FeatureMatrixT<double> & X_sorted, const int & _dim = -1);
-      
-      void hik_prepare_kernel_multiplications ( const std::vector< const NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim = -1);
-      
-      void randomPermutation(NICE::Vector & permutation, const std::vector<int> & oldIndices, const int & newSize) const;
+      void hik_prepare_kernel_multiplications(const std::vector<std::vector<double> > & _X, 
+                                              NICE::FeatureMatrixT<double> & _X_sorted, 
+                                              const uint & _dim = 0
+                                             );
+      
+      void hik_prepare_kernel_multiplications ( const std::vector< const NICE::SparseVector * > & _X, 
+                                                NICE::FeatureMatrixT<double> & _X_sorted, 
+                                                const bool & _dimensionsOverExamples, 
+                                                const uint & _dim = 0
+                                              );
+      
+      void randomPermutation(NICE::Vector & _permutation, 
+                             const std::vector<uint> & _oldIndices, 
+                             const uint & _newSize
+                            ) const;
       
       enum ApproximationScheme{ MEDIAN = 0, EXPECTATION=1};
       ApproximationScheme approxScheme;
@@ -104,7 +114,11 @@ namespace NICE {
       * @author Alexander Freytag
       * @date 06-12-2011 (dd-mm-yyyy)
       */
-      FastMinKernel( const std::vector<std::vector<double> > & X, const double noise , const bool _debug = false, const int & _dim = -1);
+      FastMinKernel( const std::vector<std::vector<double> > & _X, 
+                     const double _noise ,
+                     const bool _debug = false, 
+                     const uint & _dim = 0
+                   );
 
       
       /**
@@ -113,7 +127,12 @@ namespace NICE {
       * @param X vector of sparse vector pointers
       * @param noise GP noise
       */
-      FastMinKernel( const std::vector< const NICE::SparseVector * > & X, const double noise, const bool _debug = false, const bool & dimensionsOverExamples=false, const int & _dim = -1);
+      FastMinKernel( const std::vector< const NICE::SparseVector * > & _X, 
+                     const double _noise, 
+                     const bool _debug = false, 
+                     const bool & dimensionsOverExamples=false, 
+                     const uint & _dim = 0
+                   );
 
 #ifdef NICE_USELIB_MATIO
       /**
@@ -123,7 +142,11 @@ namespace NICE {
       * @param noise additional noise variance of the labels
       * @param examples set of indices to include
       */
-      FastMinKernel ( const sparse_t & X, const double noise, const std::map<int, int> & examples, const bool _debug = false , const int & _dim = -1);
+      FastMinKernel ( const sparse_t & _X, 
+                      const double _noise, 
+                      const std::map<uint, uint> & _examples, 
+                      const bool _debug = false , 
+                      const uint & _dim = 0);
 #endif
 
       /** 
@@ -148,14 +171,14 @@ namespace NICE {
       * @author Alexander Freytag
       * @date 07-12-2011 (dd-mm-yyyy)
       */
-      int get_n() const;
+      uint get_n() const;
       
       /** 
       * @brief Get number of dimensions
       * @author Alexander Freytag
       * @date 07-12-2011 (dd-mm-yyyy)
       */
-      int get_d() const;
+      uint get_d() const;
 
       /** 
       * @brief Computes the ratio of sparsity across the matrix
@@ -183,22 +206,33 @@ namespace NICE {
       *
       * @param pf the parameterized function (optional), if not given, nothing will be done
       */         
-      void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf = NULL );
+      void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *_pf = NULL );
           
       /** 
       * @brief  Prepare the efficient HIK-computations part 2: calculate the partial sum for each dimension. Explicitely exploiting sparsity!!! Pay attention: X_sorted is of dimensionality d x n!
       * @author Alexander Freytag
       * @date 17-01-2012 (dd-mm-yyyy)
       */
-      void hik_prepare_alpha_multiplications(const NICE::Vector & alpha, NICE::VVector & A, NICE::VVector & B) const;
+      void hik_prepare_alpha_multiplications(const NICE::Vector & _alpha, 
+                                             NICE::VVector & _A, 
+                                             NICE::VVector & _B
+                                            ) const;
             
       /**
       * @brief Computing K*alpha with the minimum kernel trick, explicitely exploiting sparsity!!!
       * @author Alexander Freytag
       * @date 17-01-2012 (dd-mm-yyyy)
       */
-      void hik_kernel_multiply(const NICE::VVector & A, const NICE::VVector & B, const NICE::Vector & alpha, NICE::Vector & beta) const;
-      void hik_kernel_multiply_fast(const double *Tlookup, const Quantization & q, const NICE::Vector & alpha, NICE::Vector & beta) const;
+      void hik_kernel_multiply(const NICE::VVector & _A, 
+                               const NICE::VVector & _B, 
+                               const NICE::Vector & _alpha, 
+                               NICE::Vector & _beta
+                              ) const;
+      void hik_kernel_multiply_fast(const double *_Tlookup, 
+                                    const Quantization & _q, 
+                                    const NICE::Vector & _alpha, 
+                                    NICE::Vector & _beta
+                                   ) const;
 
       /**
       * @brief Computing k_{*}*alpha using the minimum kernel trick and exploiting sparsity of the feature vector given
@@ -211,7 +245,12 @@ namespace NICE {
       * @param beta result of the scalar product
       * @param pf optional feature transformation
       */
-      void hik_kernel_sum(const NICE::VVector & A, const NICE::VVector & B, const NICE::SparseVector & xstar, double & beta, const ParameterizedFunction *pf = NULL ) const;
+      void hik_kernel_sum(const NICE::VVector & _A, 
+                          const NICE::VVector & _B, 
+                          const NICE::SparseVector & _xstar, 
+                          double & _beta, 
+                          const ParameterizedFunction *_pf = NULL 
+                         ) const;
       
       /**
       * @brief Computing k_{*}*alpha using the minimum kernel trick and exploiting sparsity of the feature vector given
@@ -225,7 +264,12 @@ namespace NICE {
       * @param beta result of the scalar product
       * @param pf optional feature transformation
       */
-      void hik_kernel_sum(const NICE::VVector & A, const NICE::VVector & B, const NICE::Vector & xstar, double & beta, const ParameterizedFunction *pf = NULL ) const;      
+      void hik_kernel_sum(const NICE::VVector & _A, 
+                          const NICE::VVector & _B, 
+                          const NICE::Vector & _xstar, 
+                          double & _beta, 
+                          const ParameterizedFunction *_pf = NULL 
+                         ) const;      
       
       /**
       * @brief compute beta = k_*^T * alpha by using a large lookup table created by hik_prepare_alpha_multiplications_fast
@@ -238,7 +282,11 @@ namespace NICE {
       * @param xstar feature vector (indirect k_*)
       * @param beta result of the calculation
       */
-      void hik_kernel_sum_fast(const double* Tlookup, const Quantization & q, const NICE::Vector & xstar, double & beta) const;
+      void hik_kernel_sum_fast(const double* _Tlookup, 
+                               const Quantization & _q, 
+                               const NICE::Vector & _xstar, 
+                               double & _beta
+                              ) const;
       /**
       * @brief compute beta = k_*^T * alpha by using a large lookup table created by hik_prepare_alpha_multiplications_fast
       * NOTE: Whenever possible, you should use sparse features to obtain significantly smaller computation times!
@@ -250,7 +298,11 @@ namespace NICE {
       * @param beta result of the calculation
       */      
 
-      void hik_kernel_sum_fast(const double *Tlookup, const Quantization & q, const NICE::SparseVector & xstar, double & beta) const;
+      void hik_kernel_sum_fast(const double *_Tlookup, 
+                               const Quantization & _q, 
+                               const NICE::SparseVector & _xstar, 
+                               double & _beta
+                              ) const;
 
       /**
       * @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
@@ -264,7 +316,11 @@ namespace NICE {
       * @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
-      double *hik_prepare_alpha_multiplications_fast(const NICE::VVector & A, const NICE::VVector & B, const Quantization & q, const ParameterizedFunction *pf = NULL ) const;
+      double *hik_prepare_alpha_multiplications_fast(const NICE::VVector & _A, 
+                                                     const NICE::VVector & _B, 
+                                                     const Quantization & _q, 
+                                                     const ParameterizedFunction *_pf = NULL 
+                                                    ) const;
       
       /**
       * @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
@@ -277,7 +333,10 @@ namespace NICE {
       * @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
-      double* hikPrepareLookupTable(const NICE::Vector & alpha, const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+      double* hikPrepareLookupTable(const NICE::Vector & _alpha, 
+                                    const Quantization & _q, 
+                                    const ParameterizedFunction *_pf = NULL
+                                   ) const;
 
       /**
       * @brief update the lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
@@ -290,7 +349,13 @@ namespace NICE {
       * @param q Quantization
       * @param pf ParameterizedFunction to change the original feature values
       */
-      void hikUpdateLookupTable(double * T, const double & alphaNew, const double & alphaOld, const int & idx, const Quantization & q, const ParameterizedFunction *pf ) const;
+      void hikUpdateLookupTable(double * _T, 
+                                const double & _alphaNew, 
+                                const double & _alphaOld, 
+                                const uint & _idx, 
+                                const Quantization & _q, 
+                                const ParameterizedFunction *pf 
+                               ) const;
 
       /**
       * @brief return a reference to the sorted feature matrix
@@ -315,14 +380,23 @@ namespace NICE {
        * @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
        * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
        **/
-      double *solveLin(const NICE::Vector & y, NICE::Vector & alpha, const Quantization & q, const ParameterizedFunction *pf = NULL, const bool & useRandomSubsets = true, uint maxIterations = 10000, const int & _sizeOfRandomSubset = (-1), double minDelta = 1e-7, bool timeAnalysis = false) const;
+      double *solveLin(const NICE::Vector & _y, 
+                       NICE::Vector & _alpha, 
+                       const Quantization & _q, 
+                       const ParameterizedFunction *_pf = NULL, 
+                       const bool & _useRandomSubsets = true, 
+                       uint _maxIterations = 10000, 
+                       const uint & _sizeOfRandomSubset = 0, 
+                       double _minDelta = 1e-7, 
+                       bool _timeAnalysis = false
+                      ) const;
 
 
       //! set the noise parameter
-      void setNoise ( double noise ) { this->noise = noise; }
+      void setNoise ( double _noise ) { this->d_noise = _noise; }
 
       //! get the current noise parameter
-      double getNoise (void) const { return noise; }
+      double getNoise (void) const { return this->d_noise; }
       
       double getFrobNormApprox();
       
@@ -332,7 +406,7 @@ namespace NICE {
       * @author Alexander Freytag
       * @date 10-04-2012 (dd-mm-yyyy)
       */
-      void hikPrepareKVNApproximation(NICE::VVector & A) const;
+      void hikPrepareKVNApproximation(NICE::VVector & _A) const;
       
       /** 
       * @brief  Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples. You have to run hikPrepareSquaredKernelVector before
@@ -346,7 +420,7 @@ namespace NICE {
       * @return C standard vector representing a q.size()*d double matrix and the lookup table T. Elements can be accessed with
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
-      double * hikPrepareKVNApproximationFast(NICE::VVector & A, const Quantization & q, const ParameterizedFunction *pf = NULL ) const;
+      double * hikPrepareKVNApproximationFast(NICE::VVector & _A, const Quantization & _q, const ParameterizedFunction *_pf = NULL ) const;
       
       /**
       * @brief Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples ( equals hikPrepareSquaredKernelVector + hikPrepareSquaredKernelVectorFast, but is faster). Approximation does not considere mixed terms between dimensions.
@@ -359,7 +433,7 @@ namespace NICE {
       * @return C standard vector representing a q.size()*d double matrix and the lookup table T. Elements can be accessed with
       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
       */
-      double* hikPrepareLookupTableForKVNApproximation(const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+      double* hikPrepareLookupTableForKVNApproximation(const Quantization & _q, const ParameterizedFunction *_pf = NULL) const;
       
     //////////////////////////////////////////
     // variance computation: sparse inputs
@@ -375,7 +449,7 @@ namespace NICE {
       * @param norm result of the squared norm approximation
       * @param pf optional feature transformation
       */
-      void hikComputeKVNApproximation(const NICE::VVector & A, const NICE::SparseVector & xstar, double & norm, const ParameterizedFunction *pf = NULL ) ;
+      void hikComputeKVNApproximation(const NICE::VVector & _A, const NICE::SparseVector & _xstar, double & _norm, const ParameterizedFunction *_pf = NULL ) ;
       
       /**
       * @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
@@ -387,7 +461,7 @@ namespace NICE {
       * @param xstar feature vector (indirect k_*)
       * @param norm result of the calculation
       */
-      void hikComputeKVNApproximationFast(const double *Tlookup, const Quantization & q, const NICE::SparseVector & xstar, double & norm ) const;
+      void hikComputeKVNApproximationFast(const double *_Tlookup, const Quantization & _q, const NICE::SparseVector & _xstar, double & _norm ) const;
 
       /**
       * @brief Compute the kernel vector k_* between training examples and test example. Runtime. O(n \times D). Exploiting sparsity
@@ -397,7 +471,7 @@ namespace NICE {
       * @param xstar feature vector
       * @param kstar kernel vector
       */      
-      void hikComputeKernelVector( const NICE::SparseVector & xstar, NICE::Vector & kstar) const;
+      void hikComputeKernelVector( const NICE::SparseVector & _xstar, NICE::Vector & _kstar) const;
       
     //////////////////////////////////////////
     // variance computation: non-sparse inputs
@@ -413,7 +487,7 @@ namespace NICE {
       * @param norm result of the squared norm approximation
       * @param pf optional feature transformation
       */
-      void hikComputeKVNApproximation(const NICE::VVector & A, const NICE::Vector & xstar, double & norm, const ParameterizedFunction *pf = NULL ) ;
+      void hikComputeKVNApproximation(const NICE::VVector & _A, const NICE::Vector & _xstar, double & _norm, const ParameterizedFunction *_pf = NULL ) ;
       
       /**
       * @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
@@ -425,7 +499,7 @@ namespace NICE {
       * @param xstar feature vector (indirect k_*)
       * @param norm result of the calculation
       */
-      void hikComputeKVNApproximationFast(const double *Tlookup, const Quantization & q, const NICE::Vector & xstar, double & norm ) const;      
+      void hikComputeKVNApproximationFast(const double *_Tlookup, const Quantization & _q, const NICE::Vector & _xstar, double & _norm ) const;      
       
       /**
       * @brief Compute the kernel vector k_* between training examples and test example. Runtime. O(n \times D). Does not exploit sparsity - deprecated!
@@ -435,26 +509,26 @@ namespace NICE {
       * @param xstar feature vector
       * @param kstar kernel vector
       */      
-      void hikComputeKernelVector( const NICE::Vector & xstar, NICE::Vector & kstar) const;      
+      void hikComputeKernelVector( const NICE::Vector & _xstar, NICE::Vector & _kstar) const;      
       
       /** Persistent interface */
-      virtual void restore ( std::istream & is, int format = 0 );
-      virtual void store ( std::ostream & os, int format = 0 ) const; 
+      virtual void restore ( std::istream & _is, int _format = 0 );
+      virtual void store ( std::ostream & _os, int _format = 0 ) const; 
       virtual void clear ();
       
     ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
     // interface specific methods for incremental extensions
     ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
       
-    virtual void addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement = true
-			   );
-			   
-    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement = true
-				    );  
+    virtual void addExample( const NICE::SparseVector * _example, 
+                             const double & _label, 
+                             const bool & _performOptimizationAfterIncrement = true
+                           );
+
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                      const NICE::Vector & _newLabels,
+                                      const bool & _performOptimizationAfterIncrement = true
+                                    );  
     
 
       /**
@@ -464,7 +538,7 @@ namespace NICE {
       *
       * @param example new feature vector
       */       
-      void addExample(const NICE::SparseVector * example, const NICE::ParameterizedFunction *pf = NULL);
+      void addExample(const NICE::SparseVector * _example, const NICE::ParameterizedFunction *_pf = NULL);
       
       /**
       * @brief Add multiple new example to the feature-storage. You have to update the corresponding variables explicitely after that.
@@ -473,7 +547,7 @@ namespace NICE {
       *
       * @param newExamples new feature vectors
       */       
-      void addMultipleExamples(const std::vector<const NICE::SparseVector * > & newExamples, const NICE::ParameterizedFunction *pf = NULL);        
+      void addMultipleExamples(const std::vector<const NICE::SparseVector * > & _newExamples, const NICE::ParameterizedFunction *_pf = NULL);        
       
       
      

+ 112 - 46
FeatureMatrixT.h

@@ -42,14 +42,14 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
 {
 
   protected:
-    int n;
-    int d;
+    uint ui_n;
+    uint ui_d;
     std::vector<NICE::SortedVectorSparse<T> > features;
     
     //! verbose flag for output after calling the restore-function
-    bool verbose;
+    bool b_verbose;
     //! debug flag for output during debugging
-    bool debug;
+    bool b_debug;
 
 
   public:
@@ -85,7 +85,9 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy) 
     */
-    FeatureMatrixT(const std::vector<std::vector<T> > & _features, const int & _dim = -1);
+    FeatureMatrixT(const std::vector<std::vector<T> > & _features, 
+                   const uint & _dim = 0
+                  );
     
 #ifdef NICE_USELIB_MATIO
     /** 
@@ -93,11 +95,16 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @author Alexander Freytag
     * @date 10-01-2012 (dd-mm-yyyy)
     */
-    FeatureMatrixT(const sparse_t & _features, const int & _dim = -1);//, const int & nrFeatures);
+    FeatureMatrixT(const sparse_t & _features, 
+                   const uint & _dim = 0
+                  );//, const int & nrFeatures);
 #endif
 
     /** just another constructor for sparse features */
-    FeatureMatrixT(const std::vector< const NICE::SparseVector * > & X, const bool dimensionsOverExamples = false, const int & _dim = -1);
+    FeatureMatrixT(const std::vector< const NICE::SparseVector * > & _X, 
+                   const bool _dimensionsOverExamples = false, 
+                   const uint & _dim = 0
+                  );
     
 #ifdef NICE_USELIB_MATIO
     /**
@@ -107,7 +114,9 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @param _features sparse data matrix (sett MatFileIO)
     * @param examples set of example indices
     */
-    FeatureMatrixT(const sparse_t & _features, const std::map<int, int> & examples , const int & _dim = -1);
+    FeatureMatrixT(const sparse_t & _features, 
+                   const std::map<uint, uint> & _examples , 
+                   const uint & _dim = 0);
 #endif
 
     /** 
@@ -126,20 +135,20 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-      int get_n() const;
+      uint get_n() const;
     /** 
     * @brief Get number of dimensions
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-      int get_d() const;
+      uint get_d() const;
       
     /** 
     * @brief Sets the given dimension and re-sizes internal data structure. WARNING: this will completely remove your current data!
     * @author Alexander Freytag
     * @date 06-12-2011 (dd-mm-yyyy)
     */
-      void set_d(const int & _d);
+      void set_d(const uint & _d);
       
     /** set verbose flag used for restore-functionality*/
     void setVerbose( const bool & _verbose);
@@ -158,7 +167,7 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @param F data to compare with
     * @return true if \c F and \c this are equal
     */
-    inline bool operator==(const FeatureMatrixT<T> & F) const;
+    inline bool operator==(const FeatureMatrixT<T> & _F) const;
     
     /**
     * @brief Compare \c F with \c this.
@@ -168,7 +177,7 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @param F data to compare with
     * @return true if \c F and \c this are not equal
     */
-    inline bool operator!= (const FeatureMatrixT<T> & F) const;
+    inline bool operator!= (const FeatureMatrixT<T> & _F) const;
 
     /**
     * @brief Copy data from \c F to \c this.
@@ -177,83 +186,113 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @param v New data
     * @return \c *this
     */
-    inline FeatureMatrixT<T>& operator=(const FeatureMatrixT<T> & F);
+    inline FeatureMatrixT<T>& operator=(const FeatureMatrixT<T> & _F);
       
     /** 
     * @brief Matrix-like operator for element access, performs validity check
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-    inline T operator()(const int row, const int col) const;
+    inline T operator()(const uint _row, 
+                        const uint _col
+                       ) const;
     
     /** 
     * @brief Element access without validity check
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    inline T getUnsafe(const int row, const int col) const;
+    inline T getUnsafe(const uint _row,
+                       const uint _col
+                      ) const;
 
     /** 
     * @brief Element access of original values without validity check
     * @author Erik Rodner
     */
-    inline T getOriginal(const int row, const int col) const;
+    inline T getOriginal(const uint _row,
+                         const uint _col
+                        ) const;
 
     /** 
     * @brief Sets a specified element to the given value, performs validity check
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-    inline void set (const int row, const int col, const T & newElement, bool setTransformedValue = false);
+    inline void set (const uint _row, 
+                     const uint _col, 
+                     const T & _newElement, 
+                     bool _setTransformedValue = false
+                    );
     
     /** 
     * @brief Sets a specified element to the given value, without validity check
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    inline void setUnsafe (const int row, const int col, const T & newElement, bool setTransformedValue = false);
+    inline void setUnsafe (const uint _row, 
+                           const uint _col, 
+                           const T & _newElement, 
+                           bool _setTransformedValue = false
+                          );
     
     /** 
     * @brief Access to all element entries of a specified dimension, including validity check
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void getDimension(const int & dim, NICE::SortedVectorSparse<T> & dimension) const;
+    void getDimension(const uint & _dim, 
+                      NICE::SortedVectorSparse<T> & _dimension
+                     ) const;
     
     /** 
     * @brief Access to all element entries of a specified dimension, without validity check
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void getDimensionUnsafe(const int & dim, NICE::SortedVectorSparse<T> & dimension) const;
+    void getDimensionUnsafe(const uint & _dim, 
+                            NICE::SortedVectorSparse<T> & _dimension
+                           ) const;
     
     /** 
     * @brief Finds the first element in a given dimension, which equals elem (orig feature value, not the transformed one)
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void findFirstInDimension(const int & dim, const T & elem, int & position) const;
+    void findFirstInDimension(const uint & _dim, 
+                              const T & _elem, 
+                              uint & _position
+                             ) const;
     
     /** 
     * @brief Finds the last element in a given dimension, which equals elem (orig feature value, not the transformed one)
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)1
     */
-    void findLastInDimension(const int & dim, const T & elem, int & position) const;
+    void findLastInDimension(const uint & _dim, 
+                             const T & _elem, 
+                             uint & _position
+                            ) const;
     
     /** 
     * @brief Finds the first element in a given dimension, which is larger as elem (orig feature value, not the transformed one)
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void findFirstLargerInDimension(const int & dim, const T & elem, int & position) const;
+    void findFirstLargerInDimension(const uint & _dim, 
+                                    const T & elem, 
+                                    uint & position
+                                   ) const;
     
     /** 
     * @brief Finds the last element in a given dimension, which is smaller as elem (orig feature value, not the transformed one)
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void findLastSmallerInDimension(const int & dim, const T & elem, int & position) const;
+    void findLastSmallerInDimension(const uint & _dim, 
+                                    const T & _elem, 
+                                    uint & _position
+                                   ) const;
     
     //------------------------------------------------------
     // high level methods
@@ -266,7 +305,7 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     *
     * @param pf the parameterized function (optional), if not given, nothing will be done
     */    
-    void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf = NULL );
+    void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *_pf = NULL );
     
     /** 
     * @brief Computes the ratio of sparsity across the matrix
@@ -280,13 +319,17 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-    void add_feature(const std::vector<T> & feature, const NICE::ParameterizedFunction *pf = NULL);
+    void add_feature(const std::vector<T> & _feature, 
+                     const NICE::ParameterizedFunction *_pf = NULL
+                    );
     /** 
     * @brief add a new feature and insert its elements in the already ordered structure, will be casted to type T
     * @author Alexander Freytag
     * @date 25-04-2012 (dd-mm-yyyy)
     */    
-    void add_feature(const NICE::SparseVector & feature, const NICE::ParameterizedFunction *pf = NULL);
+    void add_feature(const NICE::SparseVector & _feature, 
+                     const NICE::ParameterizedFunction *_pf = NULL
+                    );
 
     /** 
     * @brief add several new features and insert their elements in the already ordered structure
@@ -300,53 +343,76 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-    void set_features(const std::vector<std::vector<T> > & _features, std::vector<std::vector<int> > & permutations, const int & _dim = -1);
-    void set_features(const std::vector<std::vector<T> > & _features, std::vector<std::map<int,int> > & permutations, const int & _dim = -1);
-    void set_features(const std::vector<std::vector<T> > & _features, const int & _dim = -1);
-    void set_features(const std::vector< const NICE::SparseVector * > & _features, const bool dimensionsOverExamples = false, const int & _dim = -1);
+    void set_features(const std::vector<std::vector<T> > & _features, 
+                      std::vector<std::vector<uint> > & _permutations, 
+                      const uint & _dim = 0
+                     );
+    void set_features(const std::vector<std::vector<T> > & _features, 
+                      std::vector<std::map<uint,uint> > & _permutations, 
+                      const uint & _dim = 0
+                     );
+    void set_features(const std::vector<std::vector<T> > & _features, 
+                      const uint & _dim = 0
+                     );
+    void set_features(const std::vector< const NICE::SparseVector * > & _features, 
+                      const bool _dimensionsOverExamples = false, 
+                      const uint & _dim = 0
+                     );
     
     /**
     * @brief get a permutation vector for each dimension
     *
     * @param resulting permutation matrix
     */
-    void getPermutations( std::vector<std::vector<int> > & permutations) const;
-    void getPermutations( std::vector<std::map<int,int> > & permutations) const;
+    void getPermutations( std::vector<std::vector<uint> > & _permutations) const;
+    void getPermutations( std::vector<std::map<uint,uint> > & _permutations) const;
       
     /** 
     * @brief Prints the whole Matrix (outer loop over dimension, inner loop over features)
     * @author Alexander Freytag
     * @date 07-12-2011 (dd-mm-yyyy)
     */
-    void print(std::ostream & os) const;
+    void print(std::ostream & _os) const;
     
     /** 
     * @brief Computes the whole non-sparse matrix. WARNING: this may result in a really memory-consuming data-structure!
     * @author Alexander Freytag
     * @date 12-01-2012 (dd-mm-yyyy)
     */
-    void computeNonSparseMatrix(NICE::MatrixT<T> & matrix, bool transpose = false) const;
+    void computeNonSparseMatrix(NICE::MatrixT<T> & _matrix, 
+                                bool _transpose = false
+                               ) const;
     
     /** 
     * @brief Computes the whole non-sparse matrix. WARNING: this may result in a really memory-consuming data-structure!
     * @author Alexander Freytag
     * @date 12-01-2012 (dd-mm-yyyy)
     */
-    void computeNonSparseMatrix(std::vector<std::vector<T> > & matrix, bool transpose = false) const;
+    void computeNonSparseMatrix(std::vector<std::vector<T> > & _matrix, 
+                                bool _transpose = false
+                               ) const;
     
     /** 
     * @brief Swaps to specified elements, performing a validity check
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void swap(const int & row1, const int & col1, const int & row2, const int & col2);
+    void swap(const uint & _row1, 
+              const uint & _col1,
+              const uint & _row2, 
+              const uint & _col2
+             );
     
     /** 
     * @brief Swaps to specified elements, without performing a validity check
     * @author Alexander Freytag
     * @date 08-12-2011 (dd-mm-yyyy)
     */
-    void swapUnsafe(const int & row1, const int & col1, const int & row2, const int & col2);
+    void swapUnsafe(const uint & _row1, 
+                    const uint & _col1, 
+                    const uint & _row2, 
+                    const uint & _col2
+                   );
 
     /**
     * @brief direct access to elements
@@ -355,7 +421,7 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     *
     * @return sorted feature values
     */
-    const SortedVectorSparse<T> & getFeatureValues ( int dim ) const { return features[dim]; };
+    const SortedVectorSparse<T> & getFeatureValues ( uint _dim ) const { return this->features[_dim]; };
  
     /**
     * @brief direct read/write access to elements
@@ -364,7 +430,7 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     *
     * @return sorted feature values
     */
-    SortedVectorSparse<T> & getFeatureValues ( int dim ) { return features[dim]; };
+    SortedVectorSparse<T> & getFeatureValues ( uint _dim ) { return this->features[_dim]; };
    
     
     /**
@@ -372,7 +438,7 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     *
     * @param diagonalElements resulting vector
     */
-    void hikDiagonalElements( Vector & diagonalElements ) const;
+    void hikDiagonalElements( Vector & _diagonalElements ) const;
 
     /**
     * @brief Compute the trace of the HIK kernel matrix induced by the features
@@ -386,18 +452,18 @@ template<class T> class FeatureMatrixT : public NICE::Persistent
     *
     * @return number of nonzero elements on the specified dimension
     */ 
-    int getNumberOfNonZeroElementsPerDimension(const int & dim) const;
+    uint getNumberOfNonZeroElementsPerDimension(const uint & _dim) const;
    
     /**
     * @brief Return the number of zero elements in a specified dimension, that are currently stored in the feature matrix
     *
     * @return number of nonzero elements on the specified dimension
     */ 
-    int getNumberOfZeroElementsPerDimension(const int & dim) const;
+    uint getNumberOfZeroElementsPerDimension(const uint & _dim) const;
     
     /** Persistent interface */
-    virtual void restore ( std::istream & is, int format = 0 );
-    virtual void store ( std::ostream & os, int format = 0 ) const;
+    virtual void restore ( std::istream & _is, int _format = 0 );
+    virtual void store ( std::ostream & _os, int _format = 0 ) const;
     virtual void clear ( );
 
 };

Різницю між файлами не показано, бо вона завелика
+ 385 - 300
FeatureMatrixT.tcc


+ 230 - 157
GPHIKClassifier.cpp

@@ -46,7 +46,9 @@ GPHIKClassifier::GPHIKClassifier( )
   
 }
 
-GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSection )
+GPHIKClassifier::GPHIKClassifier( const Config *_conf, 
+                                  const string & _confSection 
+                                )
 {
   ///////////
   // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
@@ -61,13 +63,13 @@ GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSecti
   // here comes the new code part different from the empty constructor
   ///////////
   
-  this->confSection = s_confSection;  
+  this->confSection = _confSection;  
   
   // if no config file was given, we either restore the classifier from an external file, or run ::init with 
   // an emtpy config (using default values thereby) when calling the train-method
-  if ( conf != NULL )
+  if ( _conf != NULL )
   {
-    this->initFromConfig( conf, confSection );
+    this->initFromConfig( _conf, _confSection );
   }
   else
   {
@@ -79,23 +81,26 @@ GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSecti
 
 GPHIKClassifier::~GPHIKClassifier()
 {
-  if ( gphyper != NULL )
-    delete gphyper;
+  if ( this->gphyper != NULL )
+    delete this->gphyper;
 }
 
-void GPHIKClassifier::initFromConfig(const Config *conf, const string & s_confSection)
+void GPHIKClassifier::initFromConfig(const Config *_conf, 
+                                     const string & _confSection
+                                    )
 { 
-  this->noise = conf->gD(confSection, "noise", 0.01);
+  this->d_noise     = _conf->gD( _confSection, "noise", 0.01);
 
-  this->confSection = confSection;
-  this->verbose = conf->gB(confSection, "verbose", false);
-  this->debug = conf->gB(confSection, "debug", false);
-  this->uncertaintyPredictionForClassification = conf->gB( confSection, "uncertaintyPredictionForClassification", false );
+  this->confSection = _confSection;
+  this->b_verbose   = _conf->gB( _confSection, "verbose", false);
+  this->b_debug     = _conf->gB( _confSection, "debug", false);
+  this->uncertaintyPredictionForClassification 
+                    = _conf->gB( _confSection, "uncertaintyPredictionForClassification", false );
   
 
    
   //how do we approximate the predictive variance for classification uncertainty?
-  string s_varianceApproximation = conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
+  string s_varianceApproximation = _conf->gS(_confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
   if ( (s_varianceApproximation.compare("approximate_rough") == 0) || ((s_varianceApproximation.compare("1") == 0)) )
   {
     this->varianceApproximation = APPROXIMATE_ROUGH;
@@ -108,7 +113,7 @@ void GPHIKClassifier::initFromConfig(const Config *conf, const string & s_confSe
     this->varianceApproximation = APPROXIMATE_FINE;    
     
     //security check - compute at least one eigenvalue for this approximation strategy
-    this->gphyper->setNrOfEigenvaluesToConsiderForVarApprox ( std::max( conf->gI(confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 ), 1) );
+    this->gphyper->setNrOfEigenvaluesToConsiderForVarApprox ( std::max( _conf->gI(_confSection, "nrOfEigenvaluesToConsiderForVarApprox", 1 ), 1) );
   }
   else if ( (s_varianceApproximation.compare("exact") == 0)  || ((s_varianceApproximation.compare("3") == 0)) )
   {
@@ -125,18 +130,18 @@ void GPHIKClassifier::initFromConfig(const Config *conf, const string & s_confSe
     this->gphyper->setNrOfEigenvaluesToConsiderForVarApprox ( 0 );
   } 
   
-  if ( this->verbose )
+  if ( this->b_verbose )
     std::cerr << "varianceApproximationStrategy: " << s_varianceApproximation  << std::endl;
   
   //NOTE init all member pointer variables here as well
-  this->gphyper->initFromConfig ( conf, confSection /*possibly delete the handing of confSection*/);
+  this->gphyper->initFromConfig ( _conf, _confSection /*possibly delete the handing of confSection*/);
 }
 
 ///////////////////// ///////////////////// /////////////////////
 //                         GET / SET
 ///////////////////// ///////////////////// ///////////////////// 
 
-std::set<int> GPHIKClassifier::getKnownClassNumbers ( ) const
+std::set<uint> GPHIKClassifier::getKnownClassNumbers ( ) const
 {
   if ( ! this->b_isTrained )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
@@ -149,92 +154,146 @@ std::set<int> GPHIKClassifier::getKnownClassNumbers ( ) const
 //                      CLASSIFIER STUFF
 ///////////////////// ///////////////////// /////////////////////
 
-void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores ) const
+void GPHIKClassifier::classify ( const SparseVector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores 
+                               ) const
 {
   double tmpUncertainty;
-  this->classify( example, result, scores, tmpUncertainty );
+  this->classify( _example, _result, _scores, tmpUncertainty );
 }
 
-void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores ) const
+void GPHIKClassifier::classify ( const NICE::Vector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores 
+                               ) const
 {
   double tmpUncertainty;
-  this->classify( example, result, scores, tmpUncertainty );
+  this->classify( _example, _result, _scores, tmpUncertainty );
 }
 
-void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores, double & uncertainty ) const
+void GPHIKClassifier::classify ( const SparseVector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores, 
+                                 double & _uncertainty 
+                               ) const
 {
   if ( ! this->b_isTrained )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );
+    
+  _scores.clear(); 
   
-  scores.clear();
-  
-  result = gphyper->classify ( *example, scores );
+  if ( this->b_debug )
+  {
+    std::cerr << "GPHIKClassifier::classify (sparse)" << std::endl;
+    _example->store( std::cerr );  
+  }
+ 
+  _result = gphyper->classify ( *_example, _scores );
+
+  if ( this->b_debug )
+  {  
+    _scores.store ( std::cerr ); 
+    std::cerr << "_result: " << _result << std::endl;
+  }
 
-  if ( scores.size() == 0 ) {
-    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  if ( _scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << _example->size() );
   }
   
-  if (uncertaintyPredictionForClassification)
+  if ( this->uncertaintyPredictionForClassification )
   {
-    if (varianceApproximation != NONE)
+    if ( this->b_debug )
     {
-      this->predictUncertainty( example, uncertainty );
+      std::cerr << "GPHIKClassifier::classify -- uncertaintyPredictionForClassification is true"  << std::endl;
+    }
+    
+    if ( this->varianceApproximation != NONE)
+    {
+      this->predictUncertainty( _example, _uncertainty );
     }  
     else
     {
-      //do nothing
-      uncertainty = std::numeric_limits<double>::max();
+//       //do nothing
+      _uncertainty = std::numeric_limits<double>::max();
     }
   }
   else
   {
+    if ( this->b_debug )
+    {
+      std::cerr << "GPHIKClassifier::classify -- uncertaintyPredictionForClassification is false"  << std::endl;
+    }    
+    
     //do nothing
-    uncertainty = std::numeric_limits<double>::max();
+    _uncertainty = std::numeric_limits<double>::max();
   }    
 }
 
-void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores, double & uncertainty ) const
+void GPHIKClassifier::classify ( const NICE::Vector * _example,  
+                                 uint & _result, 
+                                 SparseVector & _scores, 
+                                 double & _uncertainty 
+                               ) const
 {
+  
   if ( ! this->b_isTrained )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
   
-  scores.clear();
+  _scores.clear();
+  
+  if ( this->b_debug )
+  {  
+    std::cerr << "GPHIKClassifier::classify (non-sparse)" << std::endl;
+    std::cerr << *_example << std::endl;
+  }
+    
+  _result = this->gphyper->classify ( *_example, _scores );
   
-  result = gphyper->classify ( *example, scores );
+  if ( this->b_debug )
+  {  
+    std::cerr << "GPHIKClassifier::classify (non-sparse) -- classification done " << std::endl;
+  }
+ 
 
-  if ( scores.size() == 0 ) {
-    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  if ( _scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << _example->size() );
   }
     
-  if (uncertaintyPredictionForClassification)
+  if ( this->uncertaintyPredictionForClassification )
   {
-    if (varianceApproximation != NONE)
+    if ( this->varianceApproximation != NONE)
     {
-      this->predictUncertainty( example, uncertainty );
+      this->predictUncertainty( _example, _uncertainty );
     }  
     else
     {
       //do nothing
-      uncertainty = std::numeric_limits<double>::max();
+      _uncertainty = std::numeric_limits<double>::max();
     }
   }
   else
   {
     //do nothing
-    uncertainty = std::numeric_limits<double>::max();
+    _uncertainty = std::numeric_limits<double>::max();
   }  
 }
 
 /** training process */
-void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels )
+void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                              const NICE::Vector & _labels 
+                            )
 {
+  
+  //FIXME add check whether the classifier has been trained already. if so, discard all previous results.
+    
   // security-check: examples and labels have to be of same size
-  if ( examples.size() != labels.size() ) 
+  if ( _examples.size() != _labels.size() ) 
   {
     fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
   }  
   
-  if (verbose)
+  if (b_verbose)
   {
     std::cerr << "GPHIKClassifier::train" << std::endl;
   }
@@ -242,34 +301,35 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
   Timer t;
   t.start();
   
-  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
-  gphyper->setFastMinKernel ( fmk ); 
+  FastMinKernel *fmk = new FastMinKernel ( _examples, d_noise, this->b_debug );
+
+  this->gphyper->setFastMinKernel ( fmk ); 
   
   t.stop();
-  if (verbose)
+  if (b_verbose)
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
  
 
-  if (verbose)
-    cerr << "Learning ..." << endl;
+  if (b_verbose)
+    std::cerr << "Learning ..." << endl;
 
   // go go go
-  gphyper->optimize ( labels );
-  if (verbose)
+  this->gphyper->optimize ( _labels );
+  if (b_verbose)
     std::cerr << "optimization done" << std::endl;
   
-  if ( ( varianceApproximation != NONE ) )
+  if ( ( this->varianceApproximation != NONE ) )
   {    
-    switch (varianceApproximation)    
+    switch ( this->varianceApproximation )    
     {
       case APPROXIMATE_ROUGH:
       {
-        gphyper->prepareVarianceApproximationRough();
+        this->gphyper->prepareVarianceApproximationRough();
         break;
       }
       case APPROXIMATE_FINE:
       {
-        gphyper->prepareVarianceApproximationFine();
+        this->gphyper->prepareVarianceApproximationFine();
         break;
       }    
       case EXACT:
@@ -288,50 +348,54 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
   this->b_isTrained = true;
 
   // clean up all examples ??
-  if (verbose)
+  if (b_verbose)
     std::cerr << "Learning finished" << std::endl;
 }
 
 /** training process */
-void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
+void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                              std::map<uint, NICE::Vector> & _binLabels 
+                            )
 { 
   // security-check: examples and labels have to be of same size
-  for ( std::map< int, NICE::Vector >::const_iterator binLabIt = binLabels.begin();
-        binLabIt != binLabels.end();
+  for ( std::map< uint, NICE::Vector >::const_iterator binLabIt = _binLabels.begin();
+        binLabIt != _binLabels.end();
         binLabIt++ 
       )
   {
-    if ( examples.size() != binLabIt->second.size() ) 
+    if ( _examples.size() != binLabIt->second.size() ) 
     {
       fthrow(Exception, "Given examples do not match label vector in size -- aborting!" );  
     }
   }
   
-  if (verbose)
+  if ( this->b_verbose )
     std::cerr << "GPHIKClassifier::train" << std::endl;
  
   Timer t;
   t.start();
   
-  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
-  gphyper->setFastMinKernel ( fmk );  
+  FastMinKernel *fmk = new FastMinKernel ( _examples, d_noise, this->b_debug );
+  this->gphyper->setFastMinKernel ( fmk );  
   
   t.stop();
-  if (verbose)
+  if ( this->b_verbose )
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
 
 
 
-  if (verbose)
-    cerr << "Learning ..." << endl;
+  if ( this->b_verbose )
+    std::cerr << "Learning ..." << std::endl;
+  
   // go go go
-  gphyper->optimize ( binLabels );
-  if (verbose)
+  this->gphyper->optimize ( _binLabels );
+  
+  if ( this->b_verbose )
     std::cerr << "optimization done, now prepare for the uncertainty prediction" << std::endl;
   
-  if ( ( varianceApproximation != NONE ) )
+  if ( ( this->varianceApproximation != NONE ) )
   {    
-    switch (varianceApproximation)    
+    switch ( this->varianceApproximation )    
     {
       case APPROXIMATE_ROUGH:
       {
@@ -359,7 +423,7 @@ void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & e
   this->b_isTrained = true;
 
   // clean up all examples ??
-  if (verbose)
+  if ( this->b_verbose )
     std::cerr << "Learning finished" << std::endl;
 }
 
@@ -370,27 +434,30 @@ GPHIKClassifier *GPHIKClassifier::clone () const
   return NULL;
 }
   
-void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const
+void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * _example, 
+                                          double & _uncertainty 
+                                        ) const
 {  
-  if (gphyper == NULL)
+  if ( this->gphyper == NULL )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
   
   //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
-  switch (varianceApproximation)    
+  switch ( this->varianceApproximation )    
   {
     case APPROXIMATE_ROUGH:
     {
-      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceApproximateRough( *_example, _uncertainty );
       break;
     }
     case APPROXIMATE_FINE:
     {
-      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      std::cerr << "gphyper->computePredictiveVarianceApproximateFine" << std::endl;
+      this->gphyper->computePredictiveVarianceApproximateFine( *_example, _uncertainty );
       break;
     }    
     case EXACT:
     {
-      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceExact( *_example, _uncertainty );
       break;
     }
     default:
@@ -400,27 +467,29 @@ void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, do
   }
 }
 
-void GPHIKClassifier::predictUncertainty( const NICE::Vector * example, double & uncertainty ) const
+void GPHIKClassifier::predictUncertainty( const NICE::Vector * _example, 
+                                          double & _uncertainty 
+                                        ) const
 {  
-  if (gphyper == NULL)
+  if ( this->gphyper == NULL )
      fthrow(Exception, "Classifier not trained yet -- aborting!" );  
   
   //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
-  switch (varianceApproximation)    
+  switch ( this->varianceApproximation )    
   {
     case APPROXIMATE_ROUGH:
     {
-      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceApproximateRough( *_example, _uncertainty );
       break;
     }
     case APPROXIMATE_FINE:
     {
-      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceApproximateFine( *_example, _uncertainty );
       break;
     }    
     case EXACT:
     {
-      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      this->gphyper->computePredictiveVarianceExact( *_example, _uncertainty );
       break;
     }
     default:
@@ -434,7 +503,9 @@ void GPHIKClassifier::predictUncertainty( const NICE::Vector * example, double &
 // interface specific methods for store and restore
 ///////////////////// INTERFACE PERSISTENT ///////////////////// 
 
-void GPHIKClassifier::restore ( std::istream & is, int format )
+void GPHIKClassifier::restore ( std::istream & _is, 
+                                int _format 
+                              )
 {
   //delete everything we knew so far...
   this->clear();
@@ -444,13 +515,13 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
   b_restoreVerbose = true;
 #endif  
   
-  if ( is.good() )
+  if ( _is.good() )
   {
     if ( b_restoreVerbose ) 
       std::cerr << " restore GPHIKClassifier" << std::endl;
     
     std::string tmp;
-    is >> tmp; //class name 
+    _is >> tmp; //class name 
     
     if ( ! this->isStartTag( tmp, "GPHIKClassifier" ) )
     {
@@ -464,13 +535,13 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
       gphyper = NULL;
     }    
     
-    is.precision (numeric_limits<double>::digits10 + 1);
+    _is.precision (numeric_limits<double>::digits10 + 1);
     
     bool b_endOfBlock ( false ) ;
     
     while ( !b_endOfBlock )
     {
-      is >> tmp; // start of block 
+      _is >> tmp; // start of block 
       
       if ( this->isEndTag( tmp, "GPHIKClassifier" ) )
       {
@@ -485,58 +556,58 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
       
       if ( tmp.compare("confSection") == 0 )
       {
-        is >> confSection;        
-        is >> tmp; // end of block 
+        _is >> confSection;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("gphyper") == 0 )
       {
-        if ( gphyper == NULL )
-          gphyper = new NICE::FMKGPHyperparameterOptimization();
+        if ( this->gphyper == NULL )
+          this->gphyper = new NICE::FMKGPHyperparameterOptimization();
         
         //then, load everything that we stored explicitely,
         // including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
-        gphyper->restore(is, format);  
+        this->gphyper->restore( _is, _format );  
           
-        is >> tmp; // end of block 
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }   
       else if ( tmp.compare("b_isTrained") == 0 )
       {
-        is >> b_isTrained;        
-        is >> tmp; // end of block 
+        _is >> b_isTrained;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
-      else if ( tmp.compare("noise") == 0 )
+      else if ( tmp.compare("d_noise") == 0 )
       {
-        is >> noise;        
-        is >> tmp; // end of block 
+        _is >> d_noise;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }      
-      else if ( tmp.compare("verbose") == 0 )
+      else if ( tmp.compare("b_verbose") == 0 )
       {
-        is >> verbose;        
-        is >> tmp; // end of block 
+        _is >> b_verbose;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }      
-      else if ( tmp.compare("debug") == 0 )
+      else if ( tmp.compare("b_debug") == 0 )
       {
-        is >> debug;        
-        is >> tmp; // end of block 
+        _is >> b_debug;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }      
       else if ( tmp.compare("uncertaintyPredictionForClassification") == 0 )
       {
-        is >> uncertaintyPredictionForClassification;        
-        is >> tmp; // end of block 
+        _is >> uncertaintyPredictionForClassification;        
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else if ( tmp.compare("varianceApproximation") == 0 )
       {
         unsigned int ui_varianceApproximation;
-        is >> ui_varianceApproximation;        
+        _is >> ui_varianceApproximation;        
         varianceApproximation = static_cast<VarianceApproximation> ( ui_varianceApproximation );
-        is >> tmp; // end of block 
+        _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
       }
       else
@@ -553,59 +624,61 @@ void GPHIKClassifier::restore ( std::istream & is, int format )
   }
 }
 
-void GPHIKClassifier::store ( std::ostream & os, int format ) const
+void GPHIKClassifier::store ( std::ostream & _os, 
+                              int _format 
+                            ) const
 { 
-  if (os.good())
+  if ( _os.good() )
   {
     // show starting point
-    os << this->createStartTag( "GPHIKClassifier" ) << std::endl;    
+    _os << this->createStartTag( "GPHIKClassifier" ) << std::endl;    
     
-    os.precision (numeric_limits<double>::digits10 + 1);
+    _os.precision (numeric_limits<double>::digits10 + 1);
     
-    os << this->createStartTag( "confSection" ) << std::endl;
-    os << confSection << std::endl;
-    os << this->createEndTag( "confSection" ) << std::endl; 
+    _os << this->createStartTag( "confSection" ) << std::endl;
+    _os << confSection << std::endl;
+    _os << this->createEndTag( "confSection" ) << std::endl; 
    
-    os << this->createStartTag( "gphyper" ) << std::endl;
+    _os << this->createStartTag( "gphyper" ) << std::endl;
     //store the underlying data
     //will be done in gphyper->store(of,format)
     //store the optimized parameter values and all that stuff
-    gphyper->store(os, format);
-    os << this->createEndTag( "gphyper" ) << std::endl; 
+    this->gphyper->store( _os, _format );
+    _os << this->createEndTag( "gphyper" ) << std::endl; 
     
     
     /////////////////////////////////////////////////////////
     // store variables which we previously set via config    
     /////////////////////////////////////////////////////////
-    os << this->createStartTag( "b_isTrained" ) << std::endl;
-    os << b_isTrained << std::endl;
-    os << this->createEndTag( "b_isTrained" ) << std::endl; 
+    _os << this->createStartTag( "b_isTrained" ) << std::endl;
+    _os << b_isTrained << std::endl;
+    _os << this->createEndTag( "b_isTrained" ) << std::endl; 
     
-    os << this->createStartTag( "noise" ) << std::endl;
-    os << noise << std::endl;
-    os << this->createEndTag( "noise" ) << std::endl;
+    _os << this->createStartTag( "d_noise" ) << std::endl;
+    _os << d_noise << std::endl;
+    _os << this->createEndTag( "d_noise" ) << std::endl;
     
     
-    os << this->createStartTag( "verbose" ) << std::endl;
-    os << verbose << std::endl;
-    os << this->createEndTag( "verbose" ) << std::endl; 
+    _os << this->createStartTag( "b_verbose" ) << std::endl;
+    _os << b_verbose << std::endl;
+    _os << this->createEndTag( "b_verbose" ) << std::endl; 
     
-    os << this->createStartTag( "debug" ) << std::endl;
-    os << debug << std::endl;
-    os << this->createEndTag( "debug" ) << std::endl; 
+    _os << this->createStartTag( "b_debug" ) << std::endl;
+    _os << b_debug << std::endl;
+    _os << this->createEndTag( "b_debug" ) << std::endl; 
     
-    os << this->createStartTag( "uncertaintyPredictionForClassification" ) << std::endl;
-    os << uncertaintyPredictionForClassification << std::endl;
-    os << this->createEndTag( "uncertaintyPredictionForClassification" ) << std::endl;
+    _os << this->createStartTag( "uncertaintyPredictionForClassification" ) << std::endl;
+    _os << uncertaintyPredictionForClassification << std::endl;
+    _os << this->createEndTag( "uncertaintyPredictionForClassification" ) << std::endl;
     
-    os << this->createStartTag( "varianceApproximation" ) << std::endl;
-    os << varianceApproximation << std::endl;
-    os << this->createEndTag( "varianceApproximation" ) << std::endl;     
+    _os << this->createStartTag( "varianceApproximation" ) << std::endl;
+    _os << varianceApproximation << std::endl;
+    _os << this->createEndTag( "varianceApproximation" ) << std::endl;     
   
     
     
     // done
-    os << this->createEndTag( "GPHIKClassifier" ) << std::endl;    
+    _os << this->createEndTag( "GPHIKClassifier" ) << std::endl;    
   }
   else
   {
@@ -615,10 +688,10 @@ void GPHIKClassifier::store ( std::ostream & os, int format ) const
 
 void GPHIKClassifier::clear ()
 {
-  if ( gphyper != NULL )
+  if ( this->gphyper != NULL )
   {
-    delete gphyper;
-    gphyper = NULL;
+    delete this->gphyper;
+    this->gphyper = NULL;
   }
 }
 
@@ -626,10 +699,10 @@ void GPHIKClassifier::clear ()
 // interface specific methods for incremental extensions
 ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
 
-void GPHIKClassifier::addExample( const NICE::SparseVector * example, 
-			     const double & label, 
-			     const bool & performOptimizationAfterIncrement
-			   )
+void GPHIKClassifier::addExample( const NICE::SparseVector * _example, 
+                                  const double & _label, 
+                                  const bool & _performOptimizationAfterIncrement
+                                )
 {
   
   if ( ! this->b_isTrained )
@@ -638,25 +711,25 @@ void GPHIKClassifier::addExample( const NICE::SparseVector * example,
     std::cerr << "Classifier not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
      
     std::vector< const NICE::SparseVector *> examplesVec;
-    examplesVec.push_back ( example );
+    examplesVec.push_back ( _example );
     
-    NICE::Vector labelsVec ( 1 , label );
+    NICE::Vector labelsVec ( 1 , _label );
     
     this->train ( examplesVec, labelsVec );
   }
   else
   {
-    this->gphyper->addExample( example, label, performOptimizationAfterIncrement );  
+    this->gphyper->addExample( _example, _label, _performOptimizationAfterIncrement );  
   }
 }
 
-void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-				      const NICE::Vector & newLabels,
-				      const bool & performOptimizationAfterIncrement
-				    )
+void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                           const NICE::Vector & _newLabels,
+                                           const bool & _performOptimizationAfterIncrement
+                                         )
 {
   //are new examples available? If not, nothing has to be done
-  if ( newExamples.size() < 1)
+  if ( _newExamples.size() < 1)
     return;
 
   if ( ! this->b_isTrained )
@@ -664,10 +737,10 @@ void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::Sparse
     //call train method instead
     std::cerr << "Classifier not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
     
-    this->train ( newExamples, newLabels );    
+    this->train ( _newExamples, _newLabels );    
   }
   else
   {
-    this->gphyper->addMultipleExamples( newExamples, newLabels, performOptimizationAfterIncrement );     
+    this->gphyper->addMultipleExamples( _newExamples, _newLabels, _performOptimizationAfterIncrement );     
   }
 }

+ 54 - 27
GPHIKClassifier.h

@@ -46,9 +46,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
     ///////////////////////////////////
     
     /** verbose flag for useful output*/
-    bool verbose;
+    bool b_verbose;
     /** debug flag for several outputs useful for debugging*/
-    bool debug;
+    bool b_debug;
     
     //////////////////////////////////////
     //      general specifications      //
@@ -69,7 +69,7 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
     
     
     /** Gaussian label noise for model regularization */
-    double noise;
+    double d_noise;
 
     enum VarianceApproximation{
       APPROXIMATE_ROUGH,
@@ -105,7 +105,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief standard constructor
      * @author Alexander Freytag
      */
-    GPHIKClassifier( const NICE::Config *conf , const std::string & s_confSection = "GPHIKClassifier" );
+    GPHIKClassifier( const NICE::Config *_conf , 
+                     const std::string & s_confSection = "GPHIKClassifier" 
+                   );
       
     /**
      * @brief simple destructor
@@ -119,7 +121,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
     * @param conf Config file to specify variable settings
     * @param s_confSection
     */    
-    void initFromConfig(const NICE::Config *conf, const std::string & s_confSection);    
+    void initFromConfig(const NICE::Config *_conf, 
+                        const std::string & s_confSection
+                       );    
     
     ///////////////////// ///////////////////// /////////////////////
     //                         GET / SET
@@ -129,7 +133,7 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief Return currently known class numbers
      * @author Alexander Freytag
      */    
-    std::set<int> getKnownClassNumbers ( ) const;    
+    std::set<uint> getKnownClassNumbers ( ) const;    
    
     ///////////////////// ///////////////////// /////////////////////
     //                      CLASSIFIER STUFF
@@ -143,18 +147,25 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      */        
-    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores ) const;
+    void classify ( const NICE::SparseVector * _example, 
+                    uint & _result, 
+                    NICE::SparseVector & _scores 
+                  ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
      * @date 19-06-2012 (dd-mm-yyyy)
      * @author Alexander Freytag
      * @param example (SparseVector) to be classified given in a sparse representation
-     * @param result (int) class number of most likely class
+     * @param result (uint) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      * @param uncertainty (double*) predictive variance of the classification result, if computed
      */    
-    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores, double & uncertainty ) const;
+    void classify ( const NICE::SparseVector * _example,  
+                    uint & _result, 
+                    NICE::SparseVector & _scores, 
+                    double & _uncertainty 
+                  ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -165,7 +176,10 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      */        
-    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores ) const;
+    void classify ( const NICE::Vector * _example,  
+                    uint & _result, 
+                    NICE::SparseVector & _scores 
+                  ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -173,11 +187,15 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @date 18-06-2013 (dd-mm-yyyy)
      * @author Alexander Freytag
      * @param example (non-sparse Vector) to be classified given in a non-sparse representation
-     * @param result (int) class number of most likely class
+     * @param result (uint) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      * @param uncertainty (double) predictive variance of the classification result, if computed
      */    
-    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores, double & uncertainty ) const;    
+    void classify ( const NICE::Vector * _example,  
+                    uint & _result, 
+                    NICE::SparseVector & _scores, 
+                    double & _uncertainty 
+                  ) const;    
 
     /**
      * @brief train this classifier using a given set of examples and a given set of binary label vectors 
@@ -186,7 +204,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
      * @param labels (Vector) class labels (multi-class)
      */
-    void train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels );
+    void train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                 const NICE::Vector & _labels 
+               );
     
     /** 
      * @brief train this classifier using a given set of examples and a given set of binary label vectors 
@@ -195,7 +215,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples examples to use given in a sparse data structure
      * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
      */
-    void train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
+    void train ( const std::vector< const NICE::SparseVector *> & _examples, 
+                 std::map<uint, NICE::Vector> & _binLabels 
+               );
     
     /**
      * @brief Clone classifier object
@@ -210,7 +232,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples example for which the classification uncertainty shall be predicted, given in a sparse representation
      * @param uncertainty contains the resulting classification uncertainty
      */       
-    void predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const;
+    void predictUncertainty( const NICE::SparseVector * _example, 
+                             double & _uncertainty 
+                           ) const;
     
     /** 
      * @brief prediction of classification uncertainty
@@ -219,7 +243,9 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @param examples example for which the classification uncertainty shall be predicted, given in a non-sparse representation
      * @param uncertainty contains the resulting classification uncertainty
      */       
-    void predictUncertainty( const NICE::Vector * example, double & uncertainty ) const;    
+    void predictUncertainty( const NICE::Vector * _example, 
+                             double & _uncertainty 
+                           ) const;    
     
 
 
@@ -231,13 +257,17 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief Load classifier from external file (stream)
      * @author Alexander Freytag
      */     
-    void restore ( std::istream & is, int format = 0 );
+    void restore ( std::istream & _is, 
+                   int _format = 0 
+                 );
     
     /** 
      * @brief Save classifier to external file (stream)
      * @author Alexander Freytag
      */     
-    void store ( std::ostream & os, int format = 0 ) const;
+    void store ( std::ostream & _os, 
+                 int _format = 0 
+               ) const;
     
     /** 
      * @brief Clear classifier object
@@ -254,22 +284,19 @@ class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
      * @brief add a new example
      * @author Alexander Freytag
      */    
-    virtual void addExample( const NICE::SparseVector * example, 
-                              const double & label, 
-                              const bool & performOptimizationAfterIncrement = true
+    virtual void addExample( const NICE::SparseVector * _example, 
+                             const double & _label, 
+                             const bool & _performOptimizationAfterIncrement = true
                             );
                           
     /** 
      * @brief add several new examples
      * @author Alexander Freytag
      */    
-    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
-                                      const NICE::Vector & newLabels,
-                                      const bool & performOptimizationAfterIncrement = true
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & _newExamples,
+                                      const NICE::Vector & _newLabels,
+                                      const bool & _performOptimizationAfterIncrement = true
                                     );       
-
-
-
 };
 
 }

+ 50 - 43
GPLikelihoodApprox.cpp

@@ -34,28 +34,28 @@ using namespace NICE;
 using namespace OPTIMIZATION;
 
 
-GPLikelihoodApprox::GPLikelihoodApprox( const std::map<int, NICE::Vector> & binaryLabels,
-                                        ImplicitKernelMatrix *ikm,
-                                        IterativeLinearSolver *linsolver, 
-                                        EigValues *eig,
-                                        bool verifyApproximation,
+GPLikelihoodApprox::GPLikelihoodApprox( const std::map<uint, NICE::Vector> & _binaryLabels,
+                                        ImplicitKernelMatrix *_ikm,
+                                        IterativeLinearSolver *_linsolver, 
+                                        EigValues *_eig,
+                                        bool _verifyApproximation,
                                         int _nrOfEigenvaluesToConsider
                                       ) 
 
-      : CostFunction( ikm->getNumParameters() )
+      : CostFunction( _ikm->getNumParameters() )
 {
-  this->binaryLabels = binaryLabels;
-  this->ikm = ikm;
-  this->linsolver = linsolver;
-  this->eig = eig;
+  this->binaryLabels = _binaryLabels;
+  this->ikm = _ikm;
+  this->linsolver = _linsolver;
+  this->eig = _eig;
 
-  if ( binaryLabels.size() == 1 )
+  if ( _binaryLabels.size() == 1 )
     this->nrOfClasses = 2;
   else
-    this->nrOfClasses = binaryLabels.size();
+    this->nrOfClasses = _binaryLabels.size();
 
   this->min_nlikelihood = std::numeric_limits<double>::max();
-  this->verifyApproximation = verifyApproximation;
+  this->verifyApproximation = _verifyApproximation;
   
   this->nrOfEigenvaluesToConsider = _nrOfEigenvaluesToConsider;
     
@@ -73,7 +73,7 @@ GPLikelihoodApprox::~GPLikelihoodApprox()
     this->initialAlphaGuess = NULL;
 }
 
-const std::map<int, Vector> & GPLikelihoodApprox::getBestAlphas () const
+const std::map<uint, Vector> & GPLikelihoodApprox::getBestAlphas () const
 {
   if ( this->min_alphas.size() > 0 )
   {
@@ -91,7 +91,12 @@ const std::map<int, Vector> & GPLikelihoodApprox::getBestAlphas () const
   return this->min_alphas;
 }
 
-void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax )
+void GPLikelihoodApprox::calculateLikelihood ( double _mypara, 
+                                               const FeatureMatrix & _f, 
+                                               const std::map< uint, NICE::Vector > & _yset, 
+                                               double _noise, 
+                                               double lambdaMax 
+                                             )
 {
   // robust cholesky routine without noise !!
   CholeskyRobust cr ( true /*verbose*/, 0.0, false /*useCuda*/ );
@@ -102,8 +107,8 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   Matrix K;
   IntersectionKernelFunction<double> hik;
   //old version, not needed anymore - we explore sparsity
-//   K = hik.computeKernelMatrix(data_matrix, noise); // = K + sigma^2 I
-  K = hik.computeKernelMatrix(f, noise);
+//   K = hik.computeKernelMatrix(data_matrix, _noise); // = K + sigma^2 I
+  K = hik.computeKernelMatrix(_f, _noise);
   t.stop();
   cerr << "VERIFY: Time used for calculating kernel matrix is: " << t.getLast() << endl;
 
@@ -116,11 +121,11 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   t.start();
   Matrix choleskyMatrix; 
   cr.robustChol ( K, choleskyMatrix ); // K = choleskyMatrix^T * choleskyMatrix
-  double gt_logdet = (yset.size()) * cr.getLastLogDet();
+  double gt_logdet = (_yset.size()) * cr.getLastLogDet();
   cerr << "chol * chol^T: " << ( choleskyMatrix * choleskyMatrix.transpose() )(0,0,4,4) << endl;
 
   double gt_dataterm = 0.0;
-  for ( std::map< int, NICE::Vector >::const_iterator i = yset.begin(); i != yset.end(); i++ )
+  for ( std::map< uint, NICE::Vector >::const_iterator i = _yset.begin(); i != _yset.end(); i++ )
   {
     const NICE::Vector & y = i->second;
     Vector gt_alpha;
@@ -137,10 +142,12 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   
   
   double gt_nlikelihood = gt_logdet + gt_dataterm;
-  cerr << "OPTGT: " << mypara << " " << gt_nlikelihood << " " << gt_logdet << " " << gt_dataterm << endl;
+  cerr << "OPTGT: " << _mypara << " " << gt_nlikelihood << " " << gt_logdet << " " << gt_dataterm << endl;
 }
 
-void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x, const NICE::Vector & eigenValues )
+void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & _x, 
+                                            const NICE::Vector & _eigenValues 
+                                           )
 {
   Timer t;
   
@@ -155,15 +162,15 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
   
 
   // all alpha vectors will be stored!
-  std::map<int, NICE::Vector> alphas;
+  std::map<uint, NICE::Vector> alphas;
 
   // This has to be done m times for the multi-class case
   if ( this->verbose )
     std::cerr << "run ILS for every bin label. binaryLabels.size(): " << binaryLabels.size() << std::endl;
-  for ( std::map<int, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
+  for ( std::map<uint, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
   {
     // (b) y^T (K+sI)^{-1} y
-    int classCnt = j->first;
+    uint classCnt = j->first;
     if ( this->verbose )
     {
       std::cerr << "Solving linear equation system for class " << classCnt << " ..." << std::endl;
@@ -185,7 +192,7 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
      */
     NICE::Vector alpha;
     
-    alpha = (binaryLabels[classCnt] * (1.0 / eigenValues[0]) );
+    alpha = (binaryLabels[classCnt] * (1.0 / _eigenValues[0]) );
     
     if ( verbose )
       std::cerr << "Using the standard solver ..." << std::endl;
@@ -194,7 +201,7 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
     linsolver->solveLin ( *ikm, binaryLabels[classCnt], alpha );
     t.stop();
    
-    alphas.insert( std::pair<int, NICE::Vector> ( classCnt, alpha) );
+    alphas.insert( std::pair<uint, NICE::Vector> ( classCnt, alpha) );
   }  
   
   // save the parameter value and alpha vectors
@@ -202,13 +209,13 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x,
   this->min_alphas = alphas;
 }
 
-double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
+double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & _x)
 {
   NICE::Vector xv;
    
-  xv.resize ( x.rows() );
-  for ( uint i = 0 ; i < x.rows(); i++ )
-    xv[i] = x(i,0);
+  xv.resize ( _x.rows() );
+  for ( uint i = 0 ; i < _x.rows(); i++ )
+    xv[i] = _x(i,0);
 
   // check whether we have been here before
   unsigned long hashValue = xv.getHashValue();
@@ -275,16 +282,16 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   
 
   // all alpha vectors will be stored!
-  std::map<int, NICE::Vector> alphas;
+  std::map<uint, NICE::Vector> alphas;
 
   // This has to be done m times for the multi-class case
   if ( this->verbose )
     std::cerr << "run ILS for every bin label. binaryLabels.size(): " << binaryLabels.size() << std::endl;
   
-  for ( std::map<int, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
+  for ( std::map<uint, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
   {
     // (b) y^T (K+sI)^{-1} y
-    int classCnt = j->first;
+    uint classCnt = j->first;
     if ( this->verbose )
     {
       std::cerr << "Solving linear equation system for class " << classCnt << " ..." << std::endl;
@@ -308,7 +315,7 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
     NICE::Vector alpha;
     if ( this->initialAlphaGuess != NULL )
     {
-      std::map<int, NICE::Vector>::iterator myIt = this->initialAlphaGuess->find(classCnt);
+      std::map<uint, NICE::Vector>::iterator myIt = this->initialAlphaGuess->find(classCnt);
       if ( myIt != this->initialAlphaGuess->end() )
         alpha = myIt->second;
       else
@@ -375,8 +382,8 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
     cerr << "Time used for approximating logdet(K): " << t.getLast() << endl;
 
   // (c) adding the two terms
-  double nlikelihood = nrOfClasses*logdet;
-  double dataterm = binaryDataterms.sum();
+  double nlikelihood = this->nrOfClasses*logdet;
+  double dataterm    = binaryDataterms.sum();
   nlikelihood += dataterm;
 
   if ( this->verbose )
@@ -386,32 +393,32 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   {
     min_nlikelihood = nlikelihood;
     ikm->getParameters ( min_parameter );
-    min_alphas = alphas;
+    this->min_alphas = alphas;
   }
 
-  alreadyVisited.insert ( pair<int, double> ( hashValue, nlikelihood ) );
+  this->alreadyVisited.insert ( std::pair<unsigned long, double> ( hashValue, nlikelihood ) );
   return nlikelihood;
 }
 
 void GPLikelihoodApprox::setParameterLowerBound(const double & _parameterLowerBound)
 {
-  parameterLowerBound = _parameterLowerBound;
+  this->parameterLowerBound = _parameterLowerBound;
 }
   
 void GPLikelihoodApprox::setParameterUpperBound(const double & _parameterUpperBound)
 {
-  parameterUpperBound = _parameterUpperBound;
+  this->parameterUpperBound = _parameterUpperBound;
 }
 
-void GPLikelihoodApprox::setInitialAlphaGuess(std::map< int, NICE::Vector >* _initialAlphaGuess)
+void GPLikelihoodApprox::setInitialAlphaGuess(std::map< uint, NICE::Vector >* _initialAlphaGuess)
 {
   this->initialAlphaGuess = _initialAlphaGuess;
 }
 
 
-void GPLikelihoodApprox::setBinaryLabels(const std::map<int, Vector> & _binaryLabels)
+void GPLikelihoodApprox::setBinaryLabels(const std::map<uint, Vector> & _binaryLabels)
 {
-  binaryLabels = _binaryLabels;
+  this->binaryLabels = _binaryLabels;
 }
 
 void GPLikelihoodApprox::setVerbose( const bool & _verbose )

+ 20 - 14
GPLikelihoodApprox.h

@@ -48,22 +48,26 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     ImplicitKernelMatrix *ikm;
 
     /** set of binary label vectors */
-    std::map<int, Vector> binaryLabels;
+    std::map<uint, Vector> binaryLabels;
    
     /** number of classes */
-    int nrOfClasses;
+    uint nrOfClasses;
     
     /** To define how fine the approximation of the squared frobenius norm will be*/
     int nrOfEigenvaluesToConsider;
     
     //! only for debugging purposes, printing some statistics
-    void calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax );
+    void calculateLikelihood ( double _mypara, 
+                               const FeatureMatrix & _f, 
+                               const std::map< uint, NICE::Vector > & _yset, 
+                               double _noise, 
+                               double _lambdaMax );
 
     //! last alpha vectors computed (from previous IL-step)
-    std::map<int, NICE::Vector> * initialAlphaGuess;
+    std::map<uint, NICE::Vector> * initialAlphaGuess;
     
     //! alpha vectors of the best solution
-    std::map<int, Vector> min_alphas;
+    std::map<uint, Vector> min_alphas;
 
     //! minimal value of the likelihood
     double min_nlikelihood;
@@ -92,11 +96,11 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
 
     // ------ constructors and destructors ------
     /** simple constructor */
-    GPLikelihoodApprox( const std::map<int, Vector> & binaryLabels, 
-                        ImplicitKernelMatrix *ikm,
-                        IterativeLinearSolver *linsolver,
-                        EigValues *eig,
-                        bool verifyApproximation = false,
+    GPLikelihoodApprox( const std::map<uint, Vector> & _binaryLabels, 
+                        ImplicitKernelMatrix *_ikm,
+                        IterativeLinearSolver *_linsolver,
+                        EigValues *_eig,
+                        bool _verifyApproximation = false,
                         int _nrOfEigenvaluesToConsider = 1
                       );
       
@@ -112,7 +116,9 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     *
     * @return void
     */    
-    void computeAlphaDirect(const OPTIMIZATION::matrix_type & x, const NICE::Vector & eigenValues);
+    void computeAlphaDirect(const OPTIMIZATION::matrix_type & _x, 
+                            const NICE::Vector & _eigenValues
+                           );
     
     /**
     * @brief Evaluate the likelihood for given hyperparameters
@@ -126,13 +132,13 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     
     // ------ get and set methods ------
     const NICE::Vector & getBestParameters () const { return min_parameter; };
-    const std::map<int, Vector> & getBestAlphas () const;
+    const std::map<uint, Vector> & getBestAlphas () const;
     
     void setParameterLowerBound(const double & _parameterLowerBound);
     void setParameterUpperBound(const double & _parameterUpperBound);
     
-    void setInitialAlphaGuess(std::map<int, NICE::Vector> * _initialAlphaGuess);
-    void setBinaryLabels(const std::map<int, Vector> & _binaryLabels);
+    void setInitialAlphaGuess(std::map<uint, NICE::Vector> * _initialAlphaGuess);
+    void setBinaryLabels(const std::map<uint, Vector> & _binaryLabels);
     
     void setVerbose( const bool & _verbose );
     void setDebug( const bool & _debug );

+ 29 - 22
Quantization.cpp

@@ -13,12 +13,12 @@ using namespace NICE;
 
 Quantization::Quantization( )
 {
-  this->numBins = 1;
+  this->ui_numBins = 1;
 }
 
-Quantization::Quantization( uint numBins )
+Quantization::Quantization( uint _numBins )
 {
-  this->numBins = numBins;
+  this->ui_numBins = _numBins;
 }
 
 Quantization::~Quantization()
@@ -27,26 +27,31 @@ Quantization::~Quantization()
 
 uint Quantization::size() const
 {
-  return numBins;
+  return this->ui_numBins;
 }
   
-double Quantization::getPrototype (uint bin) const
+double Quantization::getPrototype (uint _bin) const
 {
-  return bin / (double)(numBins-1);
+  return _bin / (double)(this->ui_numBins-1);
 }
   
-uint Quantization::quantize (double value) const
+uint Quantization::quantize (double _value) const
 {
-  if ( value <= 0.0 ) return 0;
-  else if ( value >= 1.0 ) return numBins-1;
-  else return (uint)( value * (numBins-1) + 0.5 );
+  if ( _value <= 0.0 ) 
+    return 0;
+  else if ( _value >= 1.0 ) 
+    return this->ui_numBins-1;
+  else 
+    return (uint)( _value * (this->ui_numBins-1) + 0.5 );
 }
 
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
-void Quantization::restore ( std::istream & is, int format )
+void Quantization::restore ( std::istream & _is, 
+                             int _format 
+                           )
 {
-  if (is.good())
+  if ( _is.good() )
   {    
     std::string tmp;    
 
@@ -54,7 +59,7 @@ void Quantization::restore ( std::istream & is, int format )
     
     while ( !b_endOfBlock )
     {
-      is >> tmp; // start of block 
+      _is >> tmp; // start of block 
       
       if ( this->isEndTag( tmp, "Quantization" ) )
       {
@@ -64,9 +69,9 @@ void Quantization::restore ( std::istream & is, int format )
       
       tmp = this->removeStartTag ( tmp );
       
-      if ( tmp.compare("numBins") == 0 )
+      if ( tmp.compare("ui_numBins") == 0 )
       {
-          is >> numBins;
+          _is >> this->ui_numBins;
       }
       else
       {
@@ -74,7 +79,7 @@ void Quantization::restore ( std::istream & is, int format )
         throw;  
       }
       
-      is >> tmp; // end of block 
+      _is >> tmp; // end of block 
       tmp = this->removeEndTag ( tmp );      
     }
    }
@@ -84,15 +89,17 @@ void Quantization::restore ( std::istream & is, int format )
   }
 }
 
-void Quantization::store ( std::ostream & os, int format ) const
+void Quantization::store ( std::ostream & _os, 
+                           int _format 
+                         ) const
 {
   // show starting point
-  os << this->createStartTag( "Quantization" ) << std::endl;
+  _os << this->createStartTag( "Quantization" ) << std::endl;
   
-  os << this->createStartTag( "numBins" ) << std::endl;
-  os << numBins << std::endl;
-  os << this->createEndTag( "numBins" ) << std::endl;
+  _os << this->createStartTag( "ui_numBins" ) << std::endl;
+  _os << this->ui_numBins << std::endl;
+  _os << this->createEndTag( "ui_numBins" ) << std::endl;
     
   // done
-  os << this->createEndTag( "Quantization" ) << std::endl;
+  _os << this->createEndTag( "Quantization" ) << std::endl;
 }

+ 10 - 6
Quantization.h

@@ -30,7 +30,7 @@ class Quantization  : public NICE::Persistent
 
   protected:
 
-    uint numBins;
+    uint ui_numBins;
 
   public:
 
@@ -47,7 +47,7 @@ class Quantization  : public NICE::Persistent
    * @author Erik Rodner
    * @date 
    */
-  Quantization( uint numBins );
+  Quantization( uint _numBins );
     
   /** simple destructor */
   virtual ~Quantization();
@@ -64,7 +64,7 @@ class Quantization  : public NICE::Persistent
   *
   * @return value of the prototype
   */
-  virtual double getPrototype (uint bin) const;
+  virtual double getPrototype (uint _bin) const;
 
   /**
   * @brief Determine for a given signal value the bin in the vocabulary. This is not the corresponding prototype, which 
@@ -74,13 +74,17 @@ class Quantization  : public NICE::Persistent
   *
   * @return index of the bin entry corresponding to the given signal value
   */
-  virtual uint quantize (double value) const;
+  virtual uint quantize (double _value) const;
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   ///////////////////// INTERFACE PERSISTENT /////////////////////
-  virtual void restore ( std::istream & is, int format = 0 );
-  virtual void store ( std::ostream & os, int format = 0 ) const; 
+  virtual void restore ( std::istream & _is, 
+                         int _format = 0 
+                       );
+  virtual void store ( std::ostream & _os, 
+                       int _format = 0 
+                     ) const; 
   virtual void clear () {};  
      
 };

+ 303 - 283
SortedVectorSparse.h

@@ -35,23 +35,23 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
 
   public:
     //! original index, transformed feature value
-    typedef typename std::pair< int, T > dataelement;
+    typedef typename std::pair< uint, T > dataelement;
     typedef typename std::multimap< T, dataelement >::iterator elementpointer;
     typedef typename std::multimap< T, dataelement >::const_iterator const_elementpointer;
     typedef typename std::multimap< T, dataelement >::const_reverse_iterator const_reverse_elementpointer;
 
   protected:
     T tolerance;
-    int n;
+    uint ui_n;
     
-    //! verbose flag for output after calling the restore-function
-    bool verbose;
+    //! b_verbose flag for output after calling the restore-function
+    bool b_verbose;
 
     //! mapping of the original feature value to the index and the transformed feature value
     std::multimap< T, dataelement > nzData;
 
     //! non zero index mapping, original index -> pointer to the element
-    std::map<int, elementpointer > nonzero_indices;
+    std::map<uint, elementpointer > nonzero_indices;
 
   public:
     /**
@@ -60,9 +60,9 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @date 10-01-2012 (dd-mm-yyyy)
     */
     SortedVectorSparse() {
-      n = 0;
-      tolerance = ( T ) 10e-10;
-      verbose = false;
+      this->ui_n = 0;
+      this->tolerance = ( T ) 10e-10;
+      this->b_verbose = false;
     }
 
     /**
@@ -70,20 +70,20 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @author Alexander Freytag
     * @date 10-01-2012 (dd-mm-yyyy)
     */
-    SortedVectorSparse ( const SortedVectorSparse<T> &v ) : nzData ( v.nzData )
+    SortedVectorSparse ( const SortedVectorSparse<T> &_v ) : nzData ( _v.nzData )
     {
-      this->tolerance = v.getTolerance();
-      this->n = v.getN();
-      this->nonzero_indices = v.nonzero_indices;
-      this->verbose = v.getVerbose();      
+      this->tolerance = _v.getTolerance();
+      this->ui_n = _v.getN();
+      this->nonzero_indices = _v.nonzero_indices;
+      this->b_verbose = _v.getVerbose();      
     }
 
-    SortedVectorSparse ( const std::vector<T> &v, const T & _tolerance )
+    SortedVectorSparse ( const std::vector<T> &_v, const T & _tolerance )
     {
-      tolerance = _tolerance;
-      n = 0;
-      insert ( v );
-      verbose = false;
+      this->tolerance = _tolerance;
+      this->ui_n = 0;
+      this->insert ( _v );
+      this->b_verbose = false;
     }
 
     /**
@@ -94,10 +94,10 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     ~SortedVectorSparse() {}
 
     T getTolerance() const {
-      return tolerance;
+      return this->tolerance;
     };
-    int getN() const {
-      return n;
+    uint getN() const {
+      return this->ui_n;
     };
     void setTolerance ( const T & _tolerance ) {
       if ( _tolerance < 0 )
@@ -107,37 +107,42 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     };
 
 
-    void setN ( const int & _n ) {
-      n = _n;
+    void setN ( const uint & _n ) {
+      this->ui_n = _n;
     };
-    int getZeros() const {
+    uint getZeros() const {
       //std::cerr << "n in getZeros: " << n << std::endl;
-      return n - nzData.size();
+      return this->ui_n - this->nzData.size();
     };
-    int getNonZeros() const {
-      return nzData.size();
+    uint getNonZeros() const {
+      return this->nzData.size();
     };
 
     /**
     * @brief add an element to the vector. If feature number is set, we do not check, wether this feature was already available or not!
     *
-    * @param newElement element which will be added
-    * @param featureNumber the index of the new element (optional)
+    * @param _newElement element which will be added
+    * @param _specifyFeatureNumber specify whether to use the optinally given index
+    * @param _featureNumber the index of the new element (optional)
     */
-    void insert ( const T & newElement, const int & featureNumber = -1 )
+    void insert ( const T & _newElement, 
+                  const bool _specifyFeatureNumber = false,
+                  const uint & _featureNumber = 0
+                )
     {
-      int newIndex ( featureNumber);
-      if ( featureNumber < 0)
-        newIndex = n;      
       
-      if ( !checkSparsity ( newElement ) )
+      uint newIndex ( this->ui_n );
+      if ( _specifyFeatureNumber )
+        newIndex = _featureNumber;
+      
+      if ( !checkSparsity ( _newElement ) )
       {
         // element is not sparse
-        std::pair<T, dataelement > p ( newElement, dataelement ( newIndex, newElement ) );
-        elementpointer it = nzData.insert ( p );
-        nonzero_indices.insert ( std::pair<int, elementpointer> ( newIndex, it ) );
+        std::pair<T, dataelement > p ( _newElement, dataelement ( newIndex, _newElement ) );
+        elementpointer it = this->nzData.insert ( p );
+        this->nonzero_indices.insert ( std::pair<uint, elementpointer> ( newIndex, it ) );
       }
-      n++;
+      this->ui_n++;
     }
   
     /**
@@ -145,23 +150,28 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     *
     * @param newElement element which will be added
     * @param newElementTransformed transformed feature value
+    * @param _specifyFeatureNumber specify whether to use the optinally given index* 
     * @param featureNumber the index of the new element (optional)
     */
-    void insert ( const T & newElement, const T & newElementTransformed, const int & featureNumber = -1 )
+    void insert ( const T & _newElement, 
+                  const T & _newElementTransformed, 
+                  const bool _specifyFeatureNumber = false,                  
+                  const uint & _featureNumber = 0 
+                )
     {
-      int newIndex ( featureNumber);
-      if ( featureNumber < 0)
-        newIndex = n;
+      uint newIndex ( this->ui_n );
+      if ( _specifyFeatureNumber )
+        newIndex = _featureNumber;
       
-      if ( !checkSparsity ( newElement ) )
+      if ( !checkSparsity ( _newElement ) )
       {
         // element is not sparse
         
-        std::pair<T, dataelement > p ( newElement, dataelement ( newIndex, newElementTransformed ) );
-        elementpointer it = nzData.insert ( p );
-        nonzero_indices.insert ( std::pair<int, elementpointer> ( newIndex, it ) );
+        std::pair<T, dataelement > p ( _newElement, dataelement ( newIndex,_newElementTransformed ) );
+        elementpointer it = this->nzData.insert ( p );
+        this->nonzero_indices.insert ( std::pair<uint, elementpointer> ( newIndex, it ) );
       }
-      n++;
+      this->ui_n++;
     }
 
     /**
@@ -169,21 +179,21 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     *
     * @param v new element which will be added
     */
-    void insert ( const std::vector<T> &v )
+    void insert ( const std::vector<T> &_v )
     {
-      for ( uint i = 0; i < v.size(); i++ )
-        insert ( v[i] );
+      for ( uint i = 0; i < _v.size(); i++ )
+        this->insert ( _v[i] );
     }
     /**
     * @brief add a vector of new elements to the vector. It doesn't make much sense to have such a function, but who knows...
     *
     * @param v Vector of new Elements
     */
-    void insert ( const NICE::SparseVector* v )
+    void insert ( const NICE::SparseVector* _v )
     {
-      for (NICE::SparseVector::const_iterator vIt = v->begin(); vIt != v->end(); vIt++)
+      for (NICE::SparseVector::const_iterator vIt = _v->begin(); vIt != _v->end(); vIt++)
       {
-        insert((T)vIt->second);
+        this->insert((T)vIt->second);
       }
     }
     
@@ -194,10 +204,10 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     *
     * @return value of the element (not the original value)
     */
-    T accessNonZero ( int a ) const
+    T accessNonZero ( uint _a ) const
     {
-      const_elementpointer it = nzData.begin();
-      advance ( it, a );
+      const_elementpointer it = this->nzData.begin();
+      advance ( it, _a );
       dataelement de = it->second;
 
       return de.second;
@@ -210,10 +220,10 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     *
     * @return value of the element
     */
-    inline T access ( int a ) const
+    inline T access ( uint _a ) const
     {
-      typename std::map<int, elementpointer>::const_iterator i = nonzero_indices.find ( a );
-      if ( i != nonzero_indices.end() ) {
+      typename std::map<uint, elementpointer>::const_iterator i = this->nonzero_indices.find ( _a );
+      if ( i != this->nonzero_indices.end() ) {
         // accessing a nonzero element
         const elementpointer & it = i->second;
         const dataelement & de = it->second;
@@ -233,10 +243,10 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     *
     * @return value of the element
     */
-    inline T accessOriginal ( int a ) const
+    inline T accessOriginal ( uint _a ) const
     {
-      typename std::map<int, elementpointer>::const_iterator i = nonzero_indices.find ( a );
-      if ( i != nonzero_indices.end() ) {
+      typename std::map<uint, elementpointer>::const_iterator i = this->nonzero_indices.find ( _a );
+      if ( i != this->nonzero_indices.end() ) {
         // accessing a nonzero element
         elementpointer it = i->second;
         return it->first;
@@ -248,17 +258,17 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
 
     std::multimap< T, dataelement > & nonzeroElements()
     {
-      return nzData;
+      return this->nzData;
     }
 
     const std::multimap< T, dataelement > & nonzeroElements() const
     {
-      return nzData;
+      return this->nzData;
     }
 
-    const std::map< int, elementpointer> & nonzeroIndices() const
+    const std::map< uint, elementpointer> & nonzeroIndices() const
     {
-      return nonzero_indices;
+      return this->nonzero_indices;
     }
 
     /**
@@ -268,11 +278,11 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     *
     * @return
     */
-    bool checkSparsity ( T element )
+    bool checkSparsity ( T _element )
     {
-      if ( element > tolerance )
+      if ( _element > this->tolerance )
         return false;
-      if ( element < -tolerance )
+      if ( _element < -this->tolerance )
         return false;
 
       return true;
@@ -287,58 +297,60 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @param a proper index
     * @param newElement element value
     */
-    void set ( int a, T newElement, bool setTransformedValue = false )
+    void set ( uint _a, 
+               T _newElement, 
+               bool _setTransformedValue = false )
     {
-      if ( a >= n || a < 0 )
+      if ( _a >= this->ui_n || _a < 0 )
         fthrow ( Exception, "SortedVectorSparse::set(): out of bounds" );
 
-      typename std::map<int, elementpointer>::iterator i = nonzero_indices.find ( a );
+      typename std::map<uint, elementpointer>::iterator i = this->nonzero_indices.find ( _a );
 
       // check whether the element was previously non-sparse
-      if ( i != nonzero_indices.end() ) {
+      if ( i != this->nonzero_indices.end() ) {
         elementpointer it = i->second;
 
-        if ( checkSparsity ( newElement ) ) {
+        if ( checkSparsity ( _newElement ) ) {
           // old: non-sparse, new:sparse
           // delete the element
-          nzData.erase ( it );
-          nonzero_indices.erase ( i );
+          this->nzData.erase ( it );
+          this->nonzero_indices.erase ( i );
         } else {
           // old: non-sparse, new: non-sparse
           // The following statement would be nice, but it is not allowed.
           // This is also the reason why we implemented the transformed feature value ability.
           // it->first = newElement;
-          if ( setTransformedValue ) {
+          if ( _setTransformedValue ) {
             // set the transformed value
-            it->second.second = newElement;
+            it->second.second = _newElement;
           } else {
             // the following is a weird tricky and expensive
-            set ( a, 0.0 );
+            this->set ( _a, 0.0 );
             //std::cerr << "Element after step 1: " << access(a) << std::endl;
-            set ( a, newElement );
+            this->set ( _a, _newElement );
           }
           //std::cerr << "Element after step 2: " << access(a) << std::endl;
         }
       } else {
         // the element was previously sparse
-        if ( !checkSparsity ( newElement ) )
+        if ( !checkSparsity ( _newElement ) )
         {
           //std::cerr << "changing a zero value to a non-zero value " << newElement << std::endl;
           // old element is not sparse
-          dataelement de ( a, newElement );
-          std::pair<T, dataelement> p ( newElement, de );
-          elementpointer it = nzData.insert ( p );
-          nonzero_indices.insert ( std::pair<int, elementpointer> ( a, it ) );
+          dataelement de ( _a, _newElement );
+          std::pair<T, dataelement> p ( _newElement, de );
+          elementpointer it = this->nzData.insert ( p );
+          this->nonzero_indices.insert ( std::pair<uint, elementpointer> ( _a, it ) );
         }
       }
     }
 
-    SortedVectorSparse<T> operator= ( const SortedVectorSparse<T> & F )
+    SortedVectorSparse<T> operator= ( const SortedVectorSparse<T> & _F )
     {
-      this->tolerance = F.getTolerance();
-      this->n = F.getN();
-      this->nonzero_indices = F.nonzero_indices;
-      this->nzData = F.nzData;
+      this->tolerance = _F.getTolerance();
+      this->ui_n = _F.getN();
+      this->nonzero_indices = _F.nonzero_indices;
+      this->nzData = _F.nzData;
 
       return *this;
     }
@@ -348,11 +360,11 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @author Alexander Freytag
     * @date 10-01-2012 (dd-mm-yyyy)
     */
-    std::vector<int> getPermutationNonZero() const
+    std::vector<uint> getPermutationNonZero() const
     {
-      std::vector<int> rv ( nzData.size() );
-      int idx = 0;
-      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      std::vector<uint> rv ( this->nzData.size() );
+      uint idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = this->nzData.begin(); it != this->nzData.end(); it++, idx++ )
       {
         rv[idx] = it->second.first;
       }
@@ -363,11 +375,11 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @brief Computes the permutation of the non-zero elements for a proper (ascending) ordering
     * @author Alexander Freytag
     * @date 23-01-2012 (dd-mm-yyyy)
-    * @return  std::map<int, int>, with the absolute feature numbers as key element and their permutation as second
+    * @return  std::map<uint, uint>, with the absolute feature numbers as key element and their permutation as second
     */
-    std::map<int, int> getPermutationNonZeroReal() const
+    std::map<uint, uint> getPermutationNonZeroReal() const
     {
-      std::map<int, int> rv;
+      std::map<uint, uint> rv;
 //         int idx = 0;
 //         for (typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++)
 //         {
@@ -379,13 +391,13 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
 //           //rv.insert(std::pair<int,int>(idx,it->second.first));
 //         }
 
-      int nrZeros ( this->getZeros() );
+      uint nrZeros ( this->getZeros() );
 
-      int idx = 0;
-      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      uint idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = this->nzData.begin(); it != this->nzData.end(); it++, idx++ )
       {
         //inserts the real feature number as key
-        rv.insert ( std::pair<int, int> ( nrZeros + idx, it->second.first ) );
+        rv.insert ( std::pair<uint, uint> ( nrZeros + idx, it->second.first ) );
       }
       return rv;
     };
@@ -394,18 +406,18 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @brief Computes the permutation of the non-zero elements for a proper (ascending) ordering
     * @author Alexander Freytag
     * @date 23-01-2012 (dd-mm-yyyy)
-    * @return  std::map<int, int>, with the relative feature numbers as key element  (realtive to non-zero elements) and their permutation as second
+    * @return  std::map<uint, uint>, with the relative feature numbers as key element  (realtive to non-zero elements) and their permutation as second
     */
-    std::map<int, int> getPermutationNonZeroRelative() const
+    std::map<uint, uint> getPermutationNonZeroRelative() const
     {
-      std::map<int, int> rv;
-      int idx = 0;
-      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      std::map<uint, uint> rv;
+      uint idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = this->nzData.begin(); it != this->nzData.end(); it++, idx++ )
       {
         //inserts the real feature number as key
         //rv.insert(std::pair<int,int>(it->second.first,it->second.first));
         //if we want to use the relative feature number (realtive to non-zero elements), use the following
-        rv.insert ( std::pair<int, int> ( idx, it->second.first ) );
+        rv.insert ( std::pair<uint, uint> ( idx, it->second.first ) );
       }
       return rv;
     };
@@ -415,25 +427,26 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     /**
     * @brief Computes the permutation of the elements for a proper (ascending) ordering
     */
-    std::vector<int> getPermutation() const
+    std::vector<uint> getPermutation() const
     {
-      std::vector<int> rv ( n );
+      std::vector<uint> rv ( this->ui_n );
 
-      int idx = n - 1;
+      uint idx = std::max( this->ui_n - 1, (uint) 0 );
       typename std::multimap<T, dataelement>::const_reverse_iterator it ;
-      for ( it = nzData.rbegin(); it != nzData.rend() && ( it->first > tolerance ); it++, idx-- )
+      for ( it = this->nzData.rbegin(); it != this->nzData.rend() && ( it->first > tolerance ); it++, idx-- )
       {
         rv[ idx ] = it->second.first;
       }
 
-      for ( int i = n - 1 ; i >= 0 ; i-- )
+      uint i = std::max( this->ui_n - 1, (uint) 0 );
+      for ( int iCnt = this->ui_n - 1 ; iCnt >= 0 ; i--, iCnt-- )
         if ( nonzero_indices.find ( i ) == nonzero_indices.end() )
         {
           rv[ idx ] = i;
           idx--;
         }
 
-      for ( ; it != nzData.rend(); it++, idx-- )
+      for ( ; it != this->nzData.rend(); it++, idx-- )
       {
         rv[ idx ] = it->second.first;
       }
@@ -446,12 +459,12 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @author Alexander Freytag
     * @date 10-01-2012 (dd-mm-yyyy)
     */
-    std::vector<std::pair<int, T> > getOrderInSeparateVector() const
+    std::vector<std::pair<uint, T> > getOrderInSeparateVector() const
     {
-      std::vector<std::pair<int, T> > rv;
-      rv.resize ( nzData.size() );
+      std::vector<std::pair<uint, T> > rv;
+      rv.resize ( this->nzData.size() );
       uint idx = 0;
-      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      for ( typename std::multimap<T, dataelement>::const_iterator it = this->nzData.begin(); it != this->nzData.end(); it++, idx++ )
       {
         rv[idx].first = it->second.first;
         rv[idx].second = it->second.second;
@@ -466,24 +479,24 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     */
     T getMedian() const
     {
-      if ( n % 2 == 1 ) 
+      if ( this->ui_n % 2 == 1 ) 
       {
         // even number of training examples
-        int medianPosition = nzData.size() - (int)(n/2);
-        if ( medianPosition < 0 ) 
+        uint medianPosition = this->nzData.size() - this->ui_n/2;
+        if ( medianPosition < 0 ) //FIXME not possible with uint anymore
           return 0.0;
         else
-          return accessNonZero(medianPosition); 
+          return this->accessNonZero(medianPosition); 
       } else {
         // odd number of training examples
-        int medianA = nzData.size() - (int)(n/2);
-        int medianB = nzData.size() - (int)((n+1)/2);
+        uint medianA = this->nzData.size() - this->ui_n/2;
+        uint medianB = this->nzData.size() - (this->ui_n+1)/2;
         T a = 0.0;
         T b = 0.0;
         if ( medianA >= 0)
-          a = accessNonZero( medianA );
+          a = this->accessNonZero( medianA );
         if ( medianB >= 0)
-          b = accessNonZero( medianB );
+          b = this->accessNonZero( medianB );
         return (a + b)/2.0;
       }
     }
@@ -495,8 +508,8 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     */
     T getMax() const
     {
-      if (nzData.size() > 0)
-        return accessNonZero(nzData.size()-1);
+      if ( this->nzData.size() > 0 )
+        return this->accessNonZero( this->nzData.size()-1 );
       return (T) 0.0;
     }
     
@@ -507,9 +520,9 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     */
     T getMin() const
     {
-      if (nzData.size() < (uint) n)
+      if ( this->nzData.size() < this->ui_n )
         return (T) 0.0;
-      return accessNonZero(0);
+      return this->accessNonZero(0);
     }
     
     
@@ -524,31 +537,34 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @param elementCounts this vector contains the number of examples for each class, compute this using the labels
     * for efficiency reasons
     */
-    void getClassMedians ( SparseVector & classMedians, const Vector & labels, const Vector & elementCounts ) const
+    void getClassMedians ( SparseVector & _classMedians, 
+                           const Vector & _labels, 
+                           const Vector & _elementCounts 
+                         ) const
     {
-      if ( labels.size() != n )
+      if ( _labels.size() != this->ui_n )
         fthrow(Exception, "Label vector has to have the same size as the SortedVectorSparse structure");
-      Vector c ( elementCounts );
+      Vector c ( _elementCounts );
       for ( uint i = 0 ; i < c.size(); i++ )
         c[i] /= 2;
       // now we have in c the position of the current median
       typename std::multimap<T, dataelement>::const_reverse_iterator it;
 
-      for ( it = nzData.rbegin(); it != nzData.rend(); it++ )
+      for ( it = this->nzData.rbegin(); it != this->nzData.rend(); it++ )
       {
         const dataelement & de = it->second;
-        int origIndex = de.first;
+        uint origIndex = de.first;
         double value = de.second;
-        int classno = labels[origIndex];
+        int classno = _labels[origIndex];
         c[ classno ]--;
         if ( c[classno] == 0 )
-          classMedians[classno] = value;
+          _classMedians[classno] = value;
       }
 
       // remaining medians are zero!
       for ( uint classno = 0 ; classno < c.size(); classno++ )
         if ( c[classno] > 0 )
-          classMedians[classno] = 0.0;
+          _classMedians[classno] = 0.0;
     }
 
     /**
@@ -556,186 +572,190 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     * @author Alexander Freytag
     * @date 12-01-2012 (dd-mm-yyyy)
     */
-    void print(std::ostream & os) const
+    void print(std::ostream & _os) const
     {
       typename std::multimap<T, dataelement>::const_iterator it = nzData.begin();
 
-      if (os.good())
+      if (_os.good())
       {
         for ( ; it != nzData.end() ; it++ )
         {
           if ( it->first < ( T ) 0.0 )
-            os << it->first << " ";
+            _os << it->first << " ";
           else
             break;
         }
 
-        for ( int i = 0; i < getZeros(); i++ )
+        for ( int i = 0; i < this->getZer_os(); i++ )
         {
-          os << ( T ) 0.0 << " " ;
+          _os << ( T ) 0.0 << " " ;
         }
 
-        for ( ; ( it != nzData.end() ); it++ )
+        for ( ; ( it != this->nzData.end() ); it++ )
         {
-          os << it->second.second << " ";
+          _os << it->second.second << " ";
         }
-        os << std::endl;
+        _os << std::endl;
       }
     }
     
-    /** set verbose flag used for restore-functionality*/
-    void setVerbose( const bool & _verbose) { verbose = _verbose;};
-    bool getVerbose( ) const { return verbose;};
+    /** set b_verbose flag used for restore-functionality*/
+    void setVerbose( const bool & _verbose) { this->b_verbose = _verbose;};
+    bool getVerbose( ) const { return this->b_verbose;};
     
     
     /** Persistent interface */
-    virtual void restore ( std::istream & is, int format = 0 )
+    virtual void restore ( std::istream & _is, 
+                           int _format = 0 
+                         )
     {
       bool b_restoreVerbose ( false );
-      if ( is.good() )
+      if ( _is.good() )
       {
-	if ( b_restoreVerbose ) 
-	  std::cerr << " restore SortedVectorSparse" << std::endl;
-	
-	std::string tmp;
-	is >> tmp; //class name 
-	
-	if ( ! this->isStartTag( tmp, "SortedVectorSparse" ) )
-	{
-	    std::cerr << " WARNING - attempt to restore SortedVectorSparse, but start flag " << tmp << " does not match! Aborting... " << std::endl;
-	    throw;
-	}   
-	    
-	is.precision ( std::numeric_limits<double>::digits10 + 1);
-	
-	bool b_endOfBlock ( false ) ;
-	
-	while ( !b_endOfBlock )
-	{
-	  is >> tmp; // start of block 
-	  
-	  if ( this->isEndTag( tmp, "SortedVectorSparse" ) )
-	  {
-	    b_endOfBlock = true;
-	    continue;
-	  }      
-	  
-	  tmp = this->removeStartTag ( tmp );
-	  
-	  if ( b_restoreVerbose )
-	    std::cerr << " currently restore section " << tmp << " in SortedVectorSparse" << std::endl;
-	  
-	  if ( tmp.compare("tolerance") == 0 )
-	  {
-	    is >> tolerance;        
-	    is >> tmp; // end of block 
-	    tmp = this->removeEndTag ( tmp );
-	  }
-	  else if ( tmp.compare("n") == 0 )
-	  {
-	    is >> n;        
-	    is >> tmp; // end of block 
-	    tmp = this->removeEndTag ( tmp );
-	  }
-	  else if ( tmp.compare("underlying_data_(sorted)") == 0 )
-	  {
-	    is >> tmp; // start of block 
-	    
-	    int nonZeros;
-	    if ( ! this->isStartTag( tmp, "nonZeros" ) )
-	    {
-	      std::cerr << "Attempt to restore SortedVectorSparse, but found no information about nonZeros elements. Aborting..." << std::endl;
-	      throw;
-	    }
-	    else
-	    {
-	      is >> nonZeros;
-	      is >> tmp; // end of block 
-	      tmp = this->removeEndTag ( tmp );     
-	    }
-	    
-	    is >> tmp; // start of block 
-	    
-	    if ( ! this->isStartTag( tmp, "data" ) )
-	    {
-	      std::cerr << "Attempt to restore SortedVectorSparse, but found no data. Aborting..." << std::endl;
-	      throw;
-	    }
-	    else
-	    {	    
-	      T origValue;
-	      int origIndex;
-	      T transformedValue;
-	      
-	      nzData.clear();
-	      for (int i = 0; i < nonZeros; i++)
-	      {
-	      
-		is >> origValue;
-		is >> origIndex;
-		is >> transformedValue;
-	      
-		std::pair<T, dataelement > p ( origValue, dataelement ( origIndex, transformedValue ) );
-		elementpointer it = nzData.insert ( p);
-		nonzero_indices.insert ( std::pair<int, elementpointer> ( origIndex, it ) );
-	      }
-	      
-	      is >> tmp; // end of block 
-	      tmp = this->removeEndTag ( tmp );  
-	    }
-	    
-	    
-	    is >> tmp; // end of block 
-	    tmp = this->removeEndTag ( tmp );	    
-	  }
-	  else
-	  {
-	    std::cerr << "WARNING -- unexpected SortedVectorSparse object -- " << tmp << " -- for restoration... aborting" << std::endl;
-	    throw;	
-	  }
-	}        
+        if ( b_restoreVerbose ) 
+          std::cerr << " restore SortedVectorSparse" << std::endl;
+        
+        std::string tmp;
+        _is >> tmp; //class name 
+        
+        if ( ! this->isStartTag( tmp, "SortedVectorSparse" ) )
+        {
+            std::cerr << " WARNING - attempt to restore SortedVectorSparse, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+            throw;
+        }   
+            
+        _is.precision ( std::numeric_limits<double>::digits10 + 1);
+        
+        bool b_endOfBlock ( false ) ;
+        
+        while ( !b_endOfBlock )
+        {
+          _is >> tmp; // start of block 
+          
+          if ( this->isEndTag( tmp, "SortedVectorSparse" ) )
+          {
+            b_endOfBlock = true;
+            continue;
+          }      
+          
+          tmp = this->removeStartTag ( tmp );
+          
+          if ( b_restoreVerbose )
+            std::cerr << " currently restore section " << tmp << " in SortedVectorSparse" << std::endl;
+          
+          if ( tmp.compare("tolerance") == 0 )
+          {
+            _is >> this->tolerance;        
+            _is >> tmp; // end of block 
+            tmp = this->removeEndTag ( tmp );
+          }
+          else if ( tmp.compare("ui_n") == 0 )
+          {
+            _is >> this->ui_n;        
+            _is >> tmp; // end of block 
+            tmp = this->removeEndTag ( tmp );
+          }
+          else if ( tmp.compare("underlying_data_(sorted)") == 0 )
+          {
+            _is >> tmp; // start of block 
+            
+            uint nonZeros;
+            if ( ! this->isStartTag( tmp, "nonZeros" ) )
+            {
+              std::cerr << "Attempt to restore SortedVectorSparse, but found no information about nonZeros elements. Aborting..." << std::endl;
+              throw;
+            }
+            else
+            {
+              _is >> nonZeros;
+              _is >> tmp; // end of block 
+              tmp = this->removeEndTag ( tmp );     
+            }
+            
+            _is >> tmp; // start of block 
+            
+            if ( ! this->isStartTag( tmp, "data" ) )
+            {
+              std::cerr << "Attempt to restore SortedVectorSparse, but found no data. Aborting..." << std::endl;
+              throw;
+            }
+            else
+            {
+              T origValue;
+              uint origIndex;
+              T transformedValue;
+              
+              this->nzData.clear();
+              for ( uint i = 0; i < nonZeros; i++)
+              {              
+                _is >> origValue;
+                _is >> origIndex;
+                _is >> transformedValue;
+                    
+                std::pair<T, dataelement > p ( origValue, dataelement ( origIndex, transformedValue ) );
+                elementpointer it = this->nzData.insert ( p);
+                this->nonzero_indices.insert ( std::pair<uint, elementpointer> ( origIndex, it ) );
+              }
+              
+              _is >> tmp; // end of block 
+              tmp = this->removeEndTag ( tmp );  
+            }
+            
+            
+            _is >> tmp; // end of block 
+            tmp = this->removeEndTag ( tmp );
+          }
+          else
+          {
+            std::cerr << "WARNING -- unexpected SortedVectorSparse object -- " << tmp << " -- for restoration... aborting" << std::endl;
+            throw;
+          }
+        }        
 
       }
       else
       {
         std::cerr << "SortedVectorSparse::restore -- InStream not initialized - restoring not possible!" << std::endl;
-	throw;
+        throw;
       }      
     };
-    virtual void store ( std::ostream & os, int format = 0 ) const
+    
+    virtual void store ( std::ostream & _os, 
+                         int _format = 0 
+                       ) const
     {
-      if (os.good())
+      if ( _os.good() )
       {
-	// show starting point
-	os << this->createStartTag( "SortedVectorSparse" ) << std::endl;
-	
-        os.precision (std::numeric_limits<double>::digits10 + 1);
-	
-	os << this->createStartTag( "tolerance" ) << std::endl;
-	os << tolerance << std::endl;
-	os << this->createEndTag( "tolerance" ) << std::endl;
-	
-	os << this->createStartTag( "n" ) << std::endl;
-	os << n << std::endl;
-	os << this->createEndTag( "n" ) << std::endl;
-		
-
-        os << this->createStartTag( "underlying_data_(sorted)" ) << std::endl;
-	  os << this->createStartTag( "nonZeros" ) << std::endl;
-	  os << this->getNonZeros() << std::endl;
-	  os << this->createEndTag( "nonZeros" ) << std::endl;
-	  
-	  os << this->createStartTag( "data" ) << std::endl;  
-	  for (const_elementpointer elP = nzData.begin();  elP != nzData.end(); elP++)
-	  {
-	    os << elP->first << " " << elP->second.first << " " << elP->second.second << " ";
-	  }
-	  os << std::endl;
-	  os << this->createEndTag( "data" ) << std::endl;
-	os << this->createEndTag( "underlying_data_(sorted)" ) << std::endl;
-	
-	// done
-	os << this->createEndTag( "SortedVectorSparse" ) << std::endl;	
+        // show starting point
+        _os << this->createStartTag( "SortedVectorSparse" ) << std::endl;
+        
+        _os.precision (std::numeric_limits<double>::digits10 + 1);
+        
+        _os << this->createStartTag( "tolerance" ) << std::endl;
+        _os << tolerance << std::endl;
+        _os << this->createEndTag( "tolerance" ) << std::endl;
+        
+        _os << this->createStartTag( "ui_n" ) << std::endl;
+        _os << this->ui_n << std::endl;
+        _os << this->createEndTag( "ui_n" ) << std::endl;
+
+
+        _os << this->createStartTag( "underlying_data_(sorted)" ) << std::endl;
+        _os << this->createStartTag( "nonZeros" ) << std::endl;
+        _os << this->getNonZeros() << std::endl;
+        _os << this->createEndTag( "nonZeros" ) << std::endl;
+        
+        _os << this->createStartTag( "data" ) << std::endl;  
+        for (const_elementpointer elP = this->nzData.begin();  elP != this->nzData.end(); elP++)
+        {
+          _os << elP->first << " " << elP->second.first << " " << elP->second.second << " ";
+        }
+        _os << std::endl;
+        _os << this->createEndTag( "data" ) << std::endl;
+        _os << this->createEndTag( "underlying_data_(sorted)" ) << std::endl;
+      
+        // done
+        _os << this->createEndTag( "SortedVectorSparse" ) << std::endl;
       }
       else
       {

+ 6 - 6
matlab/ConverterMatlabToNICE.cpp

@@ -57,7 +57,7 @@ std::vector< const NICE::SparseVector * > MatlabConversion::convertSparseMatrixT
           )
       {
           //note: no complex data supported her
-          sparseMatrix[ ir[current_row_index] ]->insert( std::pair<int, double>( col, pr[total++] ) );
+          sparseMatrix[ ir[current_row_index] ]->insert( std::pair<uint, double>( col, pr[total++] ) );
       } // for-loop
       
     }
@@ -100,7 +100,7 @@ NICE::SparseVector MatlabConversion::convertSparseVectorToNice(
   
 
   NICE::SparseVector svec( std::max(dimx, dimy) );
-   
+  
   
   if ( dimx > 1)
   {
@@ -116,9 +116,9 @@ NICE::SparseVector MatlabConversion::convertSparseVectorToNice(
           //note: no complex data supported her
             double value ( pr[total++] );
             if ( b_adaptIndexMtoC ) 
-                svec.insert( std::pair<int, double>( row+1,  value ) );
+                svec.insert( std::pair<uint, double>( row+1,  value ) );
             else
-                svec.insert( std::pair<int, double>( row,  value ) );
+                svec.insert( std::pair<uint, double>( row,  value ) );
         }
     } // for loop over cols      
   }
@@ -131,9 +131,9 @@ NICE::SparseVector MatlabConversion::convertSparseVectorToNice(
         //note: no complex data supported her
         double value ( pr[total++] );
         if ( b_adaptIndexMtoC ) 
-            svec.insert( std::pair<int, double>( ir[colNonZero]+1, value  ) );
+            svec.insert( std::pair<uint, double>( ir[colNonZero]+1, value  ) );
         else
-            svec.insert( std::pair<int, double>( ir[colNonZero], value  ) );
+            svec.insert( std::pair<uint, double>( ir[colNonZero], value  ) );
     }          
   }
 

+ 1 - 1
matlab/ConverterNICEToMatlab.cpp

@@ -14,7 +14,7 @@ mxArray* MatlabConversion::convertSparseVectorFromNice( const NICE::SparseVector
        matlabSparseVec = mxCreateSparse( niceSvec.getDim() -1 /*m*/, 1/*n*/, niceSvec.size() -1 /*nzmax*/, mxREAL);
     else
       matlabSparseVec = mxCreateSparse( niceSvec.getDim() /*m*/, 1/*n*/, niceSvec.size() /*nzmax*/, mxREAL);
-
+    
     
     // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
     // mxCreateSparse allocates space for:

+ 15 - 15
matlab/GPHIKClassifierMex.cpp

@@ -56,6 +56,7 @@ NICE::Config parseParametersGPHIKClassifier(const mxArray *prhs[], int nrhs)
     /////////////////////////////////////////
     if( (variable == "verboseTime") || 
         (variable == "verbose") ||
+        (variable == "debug") ||            
         (variable == "optimize_noise") || 
         (variable == "uncertaintyPredictionForClassification") ||
         (variable == "use_quantization") || 
@@ -349,7 +350,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
         
         //------------- read the data --------------
 
-        int result;
+        uint result;
         NICE::SparseVector scores;
         double uncertainty;        
 
@@ -376,8 +377,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
 
           // output
           plhs[0] = mxCreateDoubleScalar( result ); 
-          
-          
+                    
           if(nlhs >= 2)
           {
             plhs[1] = MatlabConversion::convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
@@ -462,17 +462,17 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
         
         // determine classes known during training and corresponding mapping
         // thereby allow for non-continous class labels
-        std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+        std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
         
-        int noClassesKnownTraining ( classesKnownTraining.size() );
-        std::map<int,int> mapClNoToIdxTrain;
-        std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-        for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-            mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+        uint noClassesKnownTraining ( classesKnownTraining.size() );
+        std::map< uint, uint > mapClNoToIdxTrain;
+        std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+        for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+            mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
         
         // determine classes known during testing and corresponding mapping
         // thereby allow for non-continous class labels
-        std::set<int> classesKnownTest;
+        std::set< uint > classesKnownTest;
         classesKnownTest.clear();
         
 
@@ -487,10 +487,10 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
         }          
         
         int noClassesKnownTest ( classesKnownTest.size() );  
-        std::map<int,int> mapClNoToIdxTest;
-        std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-        for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-            mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );          
+        std::map< uint, uint> mapClNoToIdxTest;
+        std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+        for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+            mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  );          
         
 
 
@@ -520,7 +520,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
             //----------------- convert data to sparse data structures ---------
           
 
-            int result;
+            uint result;
             NICE::SparseVector exampleScoresSparse;
 
             if ( dataIsSparse )

+ 12 - 5
matlab/plot1dExampleClassification.m

@@ -15,6 +15,8 @@ b_verboseTime                       = false;
 
 %interested in outputs?
 b_verbose                           = false;  
+b_debug                             = false;  
+
 
 % important for plotting!
 b_uncertaintyPredictionForClassification ...
@@ -26,7 +28,7 @@ b_ils_verbose                       = false;
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%
 %% integer
 i_nrOfEigenvaluesToConsiderForVarApprox ...
-                                    = 2;
+                                    = 1;
 i_num_bins                          = 100; % default
 i_ils_max_iterations                = 1000; % default
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -47,9 +49,9 @@ s_ils_method                        = 'CG'; % default
 % options: 'none', 'greedy', 'downhillsimplex'
 s_optimization_method               = 'downhillsimplex';
 
-% options:  'identity', 'abs', 'absexp'
+% options:  'identity', 'exp', 'absexp'
 % with settings above, this equals 'identity'
-s_transform                         = 'absexp'; 
+s_transform                         = 'identity'; 
 
 % options: 'exact', 'approximate_fine', 'approximate_rough', and 'none'
 s_varianceApproximation             = 'approximate_fine'; 
@@ -59,6 +61,7 @@ myGPHIKClassifier = ...
         GPHIKClassifier ( ...
                           'verboseTime',                               b_verboseTime, ...
                           'verbose',                                   b_verbose, ...
+                          'debug',                                     b_debug, ...                          
                           'uncertaintyPredictionForClassification',    b_uncertaintyPredictionForClassification, ...
                           'optimize_noise',                            b_optimize_noise, ...
                           'use_quantization',                          b_use_quantization, ...
@@ -95,7 +98,11 @@ scores = zeros(size(myDataTest,1),1);
 uncertainties = zeros(size(myDataTest,1),1);
 for i=1:size(myDataTest,1)
     example = myDataTest(i,:);
-    [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( example );
+
+%     [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( sparse(example) );
+    [ classNoEst, score, uncertainties(i)] = myGPHIKClassifier.classify( example );    
+%      [ classNoEst, score] = myGPHIKClassifier.classify( example );
+%     [ classNoEst, score] = myGPHIKClassifier.classify( sparse(  example ) );    
     scores(i) = score(1);
 end
 
@@ -108,7 +115,7 @@ set ( classificationFig, 'name', 'Classification with GPHIK');
 hold on;
 
 %#initialize x array
-x=0:0.01:1;
+x=myDataTest(:,1)';
 
 %#create first curve
 uncLower=scores-uncertainties;

+ 3 - 3
matlab/plot1dExampleRegression.m

@@ -45,7 +45,7 @@ d_parameter_upper_bound             = 1.0;
 s_ils_method                        = 'CG'; % default
 
 % options: 'none', 'greedy', 'downhillsimplex'
-s_optimization_method               = 'downhillsimplex';
+s_optimization_method               = 'none';
 
 % options:  'identity', 'abs', 'absexp'
 % with settings above, this equals 'identity'
@@ -83,7 +83,7 @@ myGPHIKRegression = ...
     
 
 %% run train method
-myGPHIKRegression.train( myData, myValues );
+myGPHIKRegression.train( sparse(myData), myValues );
 
 
 %% evaluate model on test data
@@ -96,7 +96,7 @@ myDataTest = cat(1, myDataTest, 1-myDataTest)';
 scores = zeros(size(myDataTest,1),1);
 uncertainties = zeros(size(myDataTest,1),1);
 for i=1:size(myDataTest,1)
-    example = myDataTest(i,:);
+    example = sparse(myDataTest(i,:));
     [ scores(i), uncertainties(i)] = myGPHIKRegression.estimate( example );
 end
 

+ 2 - 2
matlab/testMatlabConversionFunctionsMex.cpp

@@ -117,8 +117,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
 
         NICE::SparseVector t_vecTest = MatlabConversion::convertSparseVectorToNice( t_pData );
 	
-	NICE::Vector t_fullVector;
-	t_vecTest.convertToVectorT( t_fullVector );
+        NICE::Vector t_fullVector;
+        t_vecTest.convertToVectorT( t_fullVector );
         std::cerr << "convertDoubleSparseVector: full version:" << t_fullVector << std::endl;
 
         // output

+ 24 - 19
progs/classifyDatasetGPHIK.cpp

@@ -19,20 +19,23 @@
 #include "gp-hik-core/GPHIKClassifier.h"
 
 
-void readSparseExamples ( const std::string & fn,  std::vector< const NICE::SparseVector * > & examples, NICE::Vector & labels )
+void readSparseExamples ( const std::string & _fn,  
+                          std::vector< const NICE::SparseVector * > & _examples, 
+                          NICE::Vector & _labels 
+                        )
 {
   // initially cleaning of variables
-  examples.clear();
-  labels.clear();
+    _examples.clear();
+    _labels.clear();
   
   std::vector<double> labels_std;
   labels_std.clear();
   
-  std::cerr << "Reading " << fn << std::endl;
-  std::ifstream ifs ( fn.c_str(), std::ios::in );
+  std::cerr << "Reading " << _fn << std::endl;
+  std::ifstream ifs ( _fn.c_str(), std::ios::in );
   if ( ! ifs.good() )
   {
-      std::cerr <<  "Unable to read " << fn << std::endl;
+      std::cerr <<  "Unable to read " << _fn << std::endl;
       return;
   }
   
@@ -67,23 +70,25 @@ void readSparseExamples ( const std::string & fn,  std::vector< const NICE::Spar
     }
           
     
-    examples.push_back ( v );
+        _examples.push_back ( v );
   }
   ifs.close();
   
-  labels = NICE::Vector( labels_std );
+    _labels = NICE::Vector( labels_std );
 }
 
-void mapClassNumbersToIndices( const NICE::Vector & labels, std::map<int,int> & mapClassNoToIdx )
+void mapClassNumbersToIndices( const NICE::Vector & _labels, 
+                               std::map< uint, uint > & _mapClassNoToIdx 
+                             )
 {
-  mapClassNoToIdx.clear();
+  _mapClassNoToIdx.clear();
   int classCnt ( 0 );
   
-  for ( NICE::Vector::const_iterator it_labels = labels.begin(); it_labels != labels.end(); it_labels++ )
+  for ( NICE::Vector::const_iterator it_labels = _labels.begin(); it_labels != _labels.end(); it_labels++ )
   {
-    if ( mapClassNoToIdx.find( *it_labels ) == mapClassNoToIdx.end() )
+    if ( _mapClassNoToIdx.find( *it_labels ) == _mapClassNoToIdx.end() )
     {
-      mapClassNoToIdx.insert( std::pair<int,int>( (int) round(*it_labels), classCnt ) );
+            _mapClassNoToIdx.insert( std::pair< uint, uint >( (uint) round(*it_labels), classCnt ) );
       classCnt++;
     }
   }
@@ -114,7 +119,7 @@ int main (int argc, char* argv[])
   readSparseExamples ( s_fn_trainingSet, examplesTrain, labelsTrain );
 
   //map the occuring classes to a minimal set of indices
-  std::map<int,int> map_classNoToClassIdx_train; // < classNo, Idx>
+  std::map< uint, uint > map_classNoToClassIdx_train; // < classNo, Idx>
   
   mapClassNumbersToIndices( labelsTrain, map_classNoToClassIdx_train );
   
@@ -136,7 +141,7 @@ int main (int argc, char* argv[])
   readSparseExamples ( s_fn_testSet, examplesTest, labelsTest );
   
   //map the occuring classes to a minimal set of indices
-  std::map<int,int> map_classNoToClassIdx_test; // < classNo, Idx>
+  std::map< uint, uint > map_classNoToClassIdx_test; // < classNo, Idx>
   
   mapClassNumbersToIndices( labelsTest, map_classNoToClassIdx_test );
 
@@ -151,14 +156,14 @@ int main (int argc, char* argv[])
   
   for (std::vector< const NICE::SparseVector *>::const_iterator itTestExamples = examplesTest.begin(); itTestExamples != examplesTest.end(); itTestExamples++, idx++)
   {
-    int classno_groundtruth = labelsTest( idx );
-    int classno_predicted;
+    uint classno_groundtruth = labelsTest( idx );
+    uint classno_predicted;
 
     classifier.classify ( *itTestExamples, classno_predicted, scores /* not needed anyway in that evaluation*/ );
     
     
-    int idx_classno_groundtruth ( map_classNoToClassIdx_test[ classno_groundtruth ] );
-    int idx_classno_predicted ( map_classNoToClassIdx_train[ classno_predicted ] );
+    uint idx_classno_groundtruth ( map_classNoToClassIdx_test[ classno_groundtruth ] );
+    uint idx_classno_predicted ( map_classNoToClassIdx_train[ classno_predicted ] );
         
     confusion( idx_classno_groundtruth, idx_classno_predicted ) += 1;
   }

+ 12 - 12
progs/toyExample.cpp

@@ -132,17 +132,17 @@ int main (int argc, char* argv[])
   
   // determine classes known during training and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
   
-  int noClassesKnownTraining ( classesKnownTraining.size() );
-  std::map<int,int> mapClNoToIdxTrain;
-  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+  uint noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map< uint, uint > mapClNoToIdxTrain;
+  std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
   for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+      mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
   
   // determine classes known during testing and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTest;
+  std::set< uint > classesKnownTest;
   classesKnownTest.clear();
   
 
@@ -156,11 +156,11 @@ int main (int argc, char* argv[])
     }
   }          
   
-  int noClassesKnownTest ( classesKnownTest.size() );  
-  std::map<int,int> mapClNoToIdxTest;
-  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+  uint noClassesKnownTest ( classesKnownTest.size() );  
+  std::map< uint, uint > mapClNoToIdxTest;
+  std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+  for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  ); 
           
   
   NICE::Matrix confusionMatrix( noClassesKnownTraining, noClassesKnownTest, 0.0);
@@ -177,7 +177,7 @@ int main (int argc, char* argv[])
   {
     NICE::Vector example ( dataTest.getRow(i) );
     NICE::SparseVector scores;
-    int result;
+    uint result;
     
     // and classify
     t.start();

+ 13 - 13
progs/toyExampleStoreRestore.cpp

@@ -121,17 +121,17 @@ int main (int argc, char* argv[])
   
   // determine classes known during training and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
   
-  int noClassesKnownTraining ( classesKnownTraining.size() );
-  std::map<int,int> mapClNoToIdxTrain;
-  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  uint noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map< uint, uint > mapClNoToIdxTrain;
+  std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
   
   // determine classes known during testing and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTest;
+  std::set< uint > classesKnownTest;
   classesKnownTest.clear();
   
 
@@ -145,11 +145,11 @@ int main (int argc, char* argv[])
     }
   }          
   
-  int noClassesKnownTest ( classesKnownTest.size() );  
-  std::map<int,int> mapClNoToIdxTest;
-  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+  uint noClassesKnownTest ( classesKnownTest.size() );  
+  std::map< uint, uint > mapClNoToIdxTest;
+  std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+  for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  ); 
           
   
   NICE::Matrix confusionMatrix         ( noClassesKnownTraining, noClassesKnownTest, 0.0);
@@ -165,7 +165,7 @@ int main (int argc, char* argv[])
   {
     NICE::Vector example ( dataTest.getRow(i) );
     NICE::SparseVector scores;
-    int result;
+    uint result;
     
     // classify with trained classifier 
     t.start();

+ 44 - 20
tests/TestFastHIK.cpp

@@ -15,6 +15,17 @@
 
 #include "TestFastHIK.h"
 
+const bool b_debug = false;
+const bool verbose = false;
+const bool verboseStartEnd = true;
+const bool solveLinWithoutRand = false;
+const uint n = 30;//1500;//1500;//10;
+const uint d = 5;//200;//2;
+const uint numBins = 11;//1001;//1001;
+const uint solveLinMaxIterations = 1000;
+const double sparse_prob = 0.6;
+const bool smallTest = false;
+
 bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
 {
   bool result(true);
@@ -67,15 +78,7 @@ bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const
   return result;
 }
 
-const bool verbose = false;
-const bool verboseStartEnd = true;
-const bool solveLinWithoutRand = false;
-const uint n = 30;//1500;//1500;//10;
-const uint d = 5;//200;//2;
-const uint numBins = 11;//1001;//1001;
-const uint solveLinMaxIterations = 1000;
-const double sparse_prob = 0.6;
-const bool smallTest = false;
+
 
 using namespace NICE;
 using namespace std;
@@ -146,7 +149,8 @@ void TestFastHIK::testKernelMultiplication()
   NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
   //toc
   float time_slowComputation = (float) (clock() - slow_start);
-  std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;  
+  if (verbose)
+    std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;  
 
   // tic
   time_t  slow_sparse_start = clock();
@@ -154,7 +158,8 @@ void TestFastHIK::testKernelMultiplication()
   NICE::Matrix KSparseCalculated (hikSlow.computeKernelMatrix(fmk.featureMatrix(), noise));
   //toc
   float time_slowComputation_usingSparsity = (float) (clock() - slow_sparse_start);
-  std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;    
+  if (verbose)
+    std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;    
 
   if ( verbose ) 
     cerr << "K = " << K << endl;
@@ -674,7 +679,8 @@ void TestFastHIK::testLinSolve()
   NICE::Vector alpha;
   NICE::Vector alphaRandomized;
 
-  std::cerr << "solveLin with randomization" << std::endl;
+  if ( verbose )
+    std::cerr << "solveLin with randomization" << std::endl;
   // tic
   NICE::Timer t;
   t.start();
@@ -682,8 +688,9 @@ void TestFastHIK::testLinSolve()
   fmk.solveLin(y,alphaRandomized,q,pf,true,solveLinMaxIterations,30);
   //toc
   t.stop();
-  float time_randomizedSolving = t.getLast();
-  std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;  
+  float time_randomizedSolving = t.getLast();  
+  if ( verbose )
+    std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;  
   
   // test the case, where we first transform and then use the multiply stuff
   std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
@@ -697,12 +704,17 @@ void TestFastHIK::testLinSolve()
   
   if (solveLinWithoutRand)
   {
-    std::cerr << "solveLin without randomization" << std::endl;
+    if ( verbose )
+      std::cerr << "solveLin without randomization" << std::endl;
     fmk.solveLin(y,alpha,q,pf,false,1000);
     Vector K_alpha;
     K_alpha.multiply(gK, alpha);
-    std::cerr << "now assert that K_alpha == y" << std::endl;
-    std::cerr << "(K_alpha-y).normL1(): " << (K_alpha-y).normL1() << std::endl;
+    
+    if ( verbose )
+    {
+      std::cerr << "now assert that K_alpha == y" << std::endl;
+      std::cerr << "(K_alpha-y).normL1(): " << (K_alpha-y).normL1() << std::endl;
+    }
   }
    
 //   std::cerr << "alpha: " << alpha << std::endl;
@@ -718,8 +730,11 @@ void TestFastHIK::testLinSolve()
 //   std::cerr << "test_alpha (CGM): " << test_alpha << std::endl;
 //   std::cerr << "K_times_alpha (CGM): " << K_alpha << std::endl;
   
-  std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
-  std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl;
+  if ( verbose )
+  {
+    std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
+    std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl; 
+  }
   
 
 //   CPPUNIT_ASSERT_DOUBLES_EQUAL((K_alphaRandomized-y).normL1(), 0.0, 1e-6);
@@ -746,7 +761,8 @@ void TestFastHIK::testKernelVector()
   }
 
   double noise = 1.0;
-  FastMinKernel fmk ( dataMatrix, noise );
+  FastMinKernel fmk ( dataMatrix, noise, b_debug );
+  
 
   std::vector<double> xStar; xStar.push_back(0.2);xStar.push_back(0.7);xStar.push_back(0.1);
   NICE::Vector xStarVec (xStar);
@@ -755,9 +771,17 @@ void TestFastHIK::testKernelVector()
   
   NICE::SparseVector xStarsparse( xStarVec );
   NICE::SparseVector x2sparse( x2Vec );
+
+
+  if ( b_debug )
+  {
+    fmk.store ( std::cerr );
+    xStarsparse.store ( std::cerr );
+  }
   
   NICE::Vector k1;
   fmk.hikComputeKernelVector( xStarsparse, k1 );
+
   
   NICE::Vector k2;
   fmk.hikComputeKernelVector( x2sparse, k2 );

+ 2 - 2
tests/TestFeatureMatrixT.cpp

@@ -36,7 +36,7 @@ void TestFeatureMatrixT::testSetup()
 
   generateRandomFeatures ( d, n, dataMatrix );
 
-  int nrZeros(0);
+  uint nrZeros(0);
   for ( uint i = 0 ; i < d; i++ )
   {
     for ( uint k = 0; k < n; k++ )
@@ -64,7 +64,7 @@ void TestFeatureMatrixT::testSetup()
   }
   
   transposeVectorOfVectors(dataMatrix);
-  std::vector<std::vector<int> > permutations;
+  std::vector<std::vector<uint> > permutations;
   if (verbose)
     std::cerr << "now try to set_features" << std::endl;
   fm.set_features(dataMatrix, permutations);

+ 28 - 24
tests/TestGPHIKOnlineLearnable.cpp

@@ -56,20 +56,24 @@ void readData ( const std::string filename, NICE::Matrix & data, NICE::Vector &
   }    
 }
 
-void prepareLabelMappings (std::map<int,int> & mapClNoToIdxTrain, const GPHIKClassifier * classifier, std::map<int,int> & mapClNoToIdxTest, const NICE::Vector & yMultiTest)
+void prepareLabelMappings (std::map< uint, uint > & mapClNoToIdxTrain, 
+                           const GPHIKClassifier * classifier, 
+                           std::map< uint,uint > & mapClNoToIdxTest, 
+                           const NICE::Vector & yMultiTest
+                          )
 {
   // determine classes known during training and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
   
-  int noClassesKnownTraining ( classesKnownTraining.size() );
-  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  uint noClassesKnownTraining ( classesKnownTraining.size() );
+  std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
   
   // determine classes known during testing and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTest;
+  std::set< uint> classesKnownTest;
   classesKnownTest.clear();
   
 
@@ -83,18 +87,18 @@ void prepareLabelMappings (std::map<int,int> & mapClNoToIdxTrain, const GPHIKCla
     }
   }          
   
-  int noClassesKnownTest ( classesKnownTest.size() );  
-  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );   
+  uint noClassesKnownTest ( classesKnownTest.size() );  
+  std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+  for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair< uint,uint > ( *clTestIt, i )  );   
 }
 
 void evaluateClassifier ( NICE::Matrix & confusionMatrix, 
                           const NICE::GPHIKClassifier * classifier, 
                           const NICE::Matrix & data,
                           const NICE::Vector & yMulti,
-                          const std::map<int,int> & mapClNoToIdxTrain,
-                          const std::map<int,int> & mapClNoToIdxTest
+                          const std::map< uint,uint > & mapClNoToIdxTrain,
+                          const std::map< uint,uint > & mapClNoToIdxTest
                         ) 
 {
   int i_loopEnd  ( (int)data.rows() );  
@@ -103,7 +107,7 @@ void evaluateClassifier ( NICE::Matrix & confusionMatrix,
   {
     NICE::Vector example ( data.getRow(i) );
     NICE::SparseVector scores;
-    int result;    
+    uint result;    
     
     // classify with incrementally trained classifier 
     classifier->classify( &example, result, scores );
@@ -124,14 +128,14 @@ void compareClassifierOutputs ( const NICE::GPHIKClassifier * classifier,
     NICE::Vector example ( data.getRow(i) );
     
     NICE::SparseVector scores;
-    int result;    
+    uint result;    
     
     // classify with incrementally trained classifier 
     classifier->classify( &example, result, scores );
 
     
     NICE::SparseVector scoresScratch;
-    int resultScratch;
+    uint resultScratch;
     classifierScratch->classify( &example, resultScratch, scoresScratch );
     
     
@@ -215,8 +219,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   
@@ -352,8 +356,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   
@@ -492,8 +496,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningBinarytoMultiClass()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   
@@ -651,8 +655,8 @@ void TestGPHIKOnlineLearnable::testOnlineLearningMultiClass()
   
   // determine classes known during training/testing and corresponding mapping
   // thereby allow for non-continous class labels  
-  std::map<int,int> mapClNoToIdxTrain;
-  std::map<int,int> mapClNoToIdxTest;
+  std::map< uint,uint > mapClNoToIdxTrain;
+  std::map< uint,uint > mapClNoToIdxTest;
   prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
   
   

+ 23 - 18
tests/TestGPHIKPersistent.cpp

@@ -24,7 +24,7 @@ using namespace std; //C basics
 using namespace NICE;  // nice-core
 
 const bool verboseStartEnd = true;
-const bool verbose = true;
+const bool verbose = false;
 
 
 CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKPersistent );
@@ -83,9 +83,14 @@ void TestGPHIKPersistent::testPersistentMethods()
   yBinTrain -= 1;
   yBinTrain *= -1;
   
-  std::cerr << yBinTrain << std::endl;
+  if ( verbose )
+  {
+    std::cerr << yBinTrain << std::endl;
+  
+    std::cerr << "train classifier with artifially disturbed labels" << std::endl;
+    
+  }
   
-  std::cerr << "train classifier with artifially disturbed labels" << std::endl;
   classifier->train ( examplesTrain , yBinTrain);//yMultiTrain );
   
   
@@ -147,17 +152,17 @@ void TestGPHIKPersistent::testPersistentMethods()
   
   // determine classes known during training and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  std::set< uint > classesKnownTraining = classifier->getKnownClassNumbers();
   
-  int noClassesKnownTraining ( classesKnownTraining.size() );
-  std::map<int,int> mapClNoToIdxTrain;
-  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
-  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
-      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  uint noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map< uint, uint > mapClNoToIdxTrain;
+  std::set< uint >::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( uint i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair< uint, uint > ( *clTrIt, i )  );
   
   // determine classes known during testing and corresponding mapping
   // thereby allow for non-continous class labels
-  std::set<int> classesKnownTest;
+  std::set< uint > classesKnownTest;
   classesKnownTest.clear();
   
 
@@ -171,23 +176,23 @@ void TestGPHIKPersistent::testPersistentMethods()
     }
   }          
   
-  int noClassesKnownTest ( classesKnownTest.size() );  
-  std::map<int,int> mapClNoToIdxTest;
-  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
-  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
-      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+  uint noClassesKnownTest ( classesKnownTest.size() );  
+  std::map< uint, uint > mapClNoToIdxTest;
+  std::set< uint >::const_iterator clTestIt = classesKnownTest.begin();
+  for ( uint i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair< uint, uint > ( *clTestIt, i )  ); 
           
   
   if ( verbose )
   {
     std::cout << "Train data mapping: " << std::endl;
-    for ( std::map<int,int>::const_iterator clTrainIt = mapClNoToIdxTrain.begin(); clTrainIt != mapClNoToIdxTrain.end(); clTrainIt++ )
+    for ( std::map< uint, uint >::const_iterator clTrainIt = mapClNoToIdxTrain.begin(); clTrainIt != mapClNoToIdxTrain.end(); clTrainIt++ )
     {
       std::cout << " " << clTrainIt->first << " " << clTrainIt->second << std::endl;
     }
 
     std::cout << "Test data mapping: " << std::endl;
-    for ( std::map<int,int>::const_iterator clTestIt = mapClNoToIdxTest.begin(); clTestIt != mapClNoToIdxTest.end(); clTestIt++ )
+    for ( std::map< uint, uint >::const_iterator clTestIt = mapClNoToIdxTest.begin(); clTestIt != mapClNoToIdxTest.end(); clTestIt++ )
     {
       std::cout << " " << clTestIt->first << " " << clTestIt->second << std::endl;
     }    
@@ -203,7 +208,7 @@ void TestGPHIKPersistent::testPersistentMethods()
   {
     NICE::Vector example ( dataTest.getRow(i) );
     NICE::SparseVector scores;
-    int result;
+    uint result;
     
     // classify with trained classifier 
     classifier->classify( &example, result, scores );

+ 5 - 2
tests/TestGPHIKRegression.cpp

@@ -117,8 +117,11 @@ void TestGPHIKRegression::testRegressionHoldInData()
   NICE::GPHIKRegression * regressionMethod;
   regressionMethod = new NICE::GPHIKRegression ( &conf );
   regressionMethod->train ( examplesTrain , yValues );
-  std::cerr << " yValues used for training regression object" << std::endl;
-  std::cerr << yValues << std::endl;
+  if (verbose)
+  {
+    std::cerr << " yValues used for training regression object" << std::endl;
+    std::cerr << yValues << std::endl;
+  }
   
   double holdInLoss ( 0.0 );
   

+ 26 - 26
tests/TestVectorSorter.cpp

@@ -27,16 +27,16 @@ void TestVectorSorter::checkData ( const vector<double> & all_elements, const NI
   if (verboseStartEnd)
     std::cerr << "================== TestVectorSorter::checkData ===================== " << std::endl;
   
-  vector< pair<double, int> > all_elements_sorted;
+  std::vector< pair<double, uint> > all_elements_sorted;
 
-  vector< pair<double, int> > nonzero_elements;
+  std::vector< pair<double, uint> > nonzero_elements;
   for (uint i = 0 ; i < all_elements.size(); i++ )
   {
     if ( fabs(all_elements[i]) > sparse_tolerance ) {
-      nonzero_elements.push_back( pair<double, int> ( all_elements[i], i ) );
-      all_elements_sorted.push_back( pair<double, int> ( all_elements[i], i ) );
+      nonzero_elements.push_back( pair<double, uint> ( all_elements[i], i ) );
+      all_elements_sorted.push_back( pair<double, uint> ( all_elements[i], i ) );
     } else {
-      all_elements_sorted.push_back( pair<double, int> ( 0.0, i ) );
+      all_elements_sorted.push_back( pair<double, uint> ( 0.0, i ) );
     }
   }
 
@@ -46,34 +46,32 @@ void TestVectorSorter::checkData ( const vector<double> & all_elements, const NI
   // looping through all non-zero values
   uint k = 0;
   for (NICE::SortedVectorSparse<double>::const_elementpointer it = vSS.nonzeroElements().begin(); it != vSS.nonzeroElements().end(); it++,k++)
-	{
+  {
     CPPUNIT_ASSERT_DOUBLES_EQUAL( nonzero_elements[k].first, it->first, 0.0 );
     CPPUNIT_ASSERT_EQUAL( nonzero_elements[k].second, it->second.first );
-	}
+  }
 
   // 2 3 0 1 5 4
-	std::vector<int> vSSPerm(vSS.getPermutation());
-	for (int k = 0;k < vSSPerm.size(); k++)
+	std::vector<uint> vSSPerm(vSS.getPermutation());
+	for ( uint k = 0;k < vSSPerm.size(); k++ )
 	{
     CPPUNIT_ASSERT_EQUAL( all_elements_sorted[k].second, vSSPerm[k] );
 	}
 	
-	std::vector<int> vSSPermNNZ (vSS.getPermutationNonZero());
-  vector<pair<int,double> > sv ( vSS.getOrderInSeparateVector() );
-	for (int k = 0;k < vSSPermNNZ.size(); k++)
-	{
+  std::vector<uint> vSSPermNNZ (vSS.getPermutationNonZero());
+  vector<pair<uint,double> > sv ( vSS.getOrderInSeparateVector() );
+  for ( int k = 0;k < vSSPermNNZ.size(); k++ )
+  {
     CPPUNIT_ASSERT_EQUAL( nonzero_elements[k].second, vSSPermNNZ[k] );
     CPPUNIT_ASSERT_EQUAL( sv[k].first, vSSPermNNZ[k] );
     CPPUNIT_ASSERT_EQUAL( sv[k].second, vSS.access( sv[k].first ) );
-	}
+  }
 
-//   cerr << endl;
-  for (int k = 0;k < vSS.getN();k++)
+
+  for ( uint k = 0;k < vSS.getN();k++ )
   {
     CPPUNIT_ASSERT_DOUBLES_EQUAL( all_elements[k], vSS.access(k), sparse_tolerance ); 
-//     cerr << "Element " << k << " = " << vSS.access(k) << endl;
   }
-//     vSS.print();
 
   if (verboseStartEnd)
     std::cerr << "================== TestVectorSorter::checkData done ===================== " << std::endl;  
@@ -86,7 +84,7 @@ void TestVectorSorter::testVectorSorter()
   if (verboseStartEnd)
     std::cerr << "================== TestVectorSorter::testVectorSorter ===================== " << std::endl;
   
-  vector<double> all_elements;
+  std::vector<double> all_elements;
   all_elements.push_back(2);
   all_elements.push_back(4);
   all_elements.push_back(0);
@@ -98,28 +96,30 @@ void TestVectorSorter::testVectorSorter()
 
   // Now we put everything in a vectorsortersparse object
   NICE::SortedVectorSparse<double> vSS;
-	vSS.setTolerance(sparse_tolerance);
+  vSS.setTolerance(sparse_tolerance);
   for (uint i = 0 ; i < all_elements.size(); i++ )
+  {
     vSS.insert( all_elements[i] );
+  }
 
   checkData( all_elements, vSS );
   
 
 //   cerr << endl;
 //   cerr << "v[1] = 3.0 ";
-  vSS.set(1, 3.0);
+  vSS.set( 1, 3.0);
   all_elements[1] = 3.0;
   checkData( all_elements, vSS );
   
 //   cerr << endl;
 //   cerr << "v[1] = 0.0 ";
-  vSS.set(1, 0.0);
+  vSS.set( 1, 0.0);
   all_elements[1] = 0.0;
   checkData( all_elements, vSS );
 
 //   cerr << endl;
 //   cerr << "v[5] = -3.0 ";
-  vSS.set(5, -3.0);
+  vSS.set( 5, -3.0);
   all_elements[5] = -3.0;
   checkData( all_elements, vSS );
 
@@ -137,19 +137,19 @@ void TestVectorSorter::testVectorSorter()
 
 //   cerr << endl;
 //   cerr << "v[0] = -10.0 ";
-  vSS.set(0, -10.0);
+  vSS.set( 0, -10.0);
   all_elements[0] = -10.0;
   checkData( all_elements, vSS );
 
 //   cerr << endl;
 //   cerr << "v[5] = 2.0 ";
-  vSS.set(5, 2.0);
+  vSS.set( 5, 2.0);
   all_elements[5] = 2.0;
   checkData( all_elements, vSS );
 
 //   cerr << endl;
 //   cerr << "v[5] = 0.0 ";
-  vSS.set(5, 0.0);
+  vSS.set( 5, 0.0);
   all_elements[5] = 0.0;
   checkData( all_elements, vSS ); 
 

Деякі файли не було показано, через те що забагато файлів було змінено