Browse Source

merge commit

Johannes Ruehle 11 years ago
parent
commit
0339a699b6
43 changed files with 4689 additions and 1820 deletions
  1. 382 357
      FMKGPHyperparameterOptimization.cpp
  2. 126 49
      FMKGPHyperparameterOptimization.h
  3. 286 241
      FastMinKernel.cpp
  4. 78 58
      FastMinKernel.h
  5. 8 6
      FeatureMatrixT.h
  6. 93 40
      FeatureMatrixT.tcc
  7. 19 8
      GMHIKernel.cpp
  8. 19 3
      GMHIKernel.h
  9. 429 119
      GPHIKClassifier.cpp
  10. 65 21
      GPHIKClassifier.h
  11. 65 92
      GPLikelihoodApprox.cpp
  12. 17 14
      GPLikelihoodApprox.h
  13. 83 16
      IKMLinearCombination.cpp
  14. 23 5
      IKMLinearCombination.h
  15. 86 110
      IKMNoise.cpp
  16. 24 13
      IKMNoise.h
  17. 25 5
      ImplicitKernelMatrix.h
  18. 46 0
      OnlineLearnable.h
  19. 138 53
      SortedVectorSparse.h
  20. 1 0
      corefiles.cmake
  21. 854 0
      matlab/GPHIK.cpp
  22. 5 0
      matlab/Makefile
  23. 142 0
      matlab/classHandleMtoC.h
  24. 75 18
      parameterizedFunctions/PFAbsExp.h
  25. 72 14
      parameterizedFunctions/PFExp.h
  26. 131 12
      parameterizedFunctions/PFMKL.h
  27. 72 19
      parameterizedFunctions/PFWeightedDim.h
  28. 51 8
      parameterizedFunctions/ParameterizedFunction.cpp
  29. 8 5
      parameterizedFunctions/ParameterizedFunction.h
  30. 2 0
      progfiles.cmake
  31. 6 6
      progs/classifyDatasetGPHIK.cpp
  32. 125 37
      progs/toyExample.cpp
  33. 204 0
      progs/toyExampleStoreRestore.cpp
  34. 3 0
      testfiles.cmake
  35. 32 486
      tests/TestFastHIK.cpp
  36. 0 4
      tests/TestFastHIK.h
  37. 1 1
      tests/TestFeatureMatrixT.cpp
  38. 382 0
      tests/TestGPHIKOnlineLearnable.cpp
  39. 38 0
      tests/TestGPHIKOnlineLearnable.h
  40. 225 0
      tests/TestGPHIKPersistent.cpp
  41. 30 0
      tests/TestGPHIKPersistent.h
  42. 64 0
      tests/toyExampleSmallScaleTrain.data
  43. 154 0
      tests/toyExampleTest.data

File diff suppressed because it is too large
+ 382 - 357
FMKGPHyperparameterOptimization.cpp


+ 126 - 49
FMKGPHyperparameterOptimization.h

@@ -1,6 +1,6 @@
 /** 
 * @file FMKGPHyperparameterOptimization.h
-* @brief Heart of the framework to set up everything, perform optimization, incremental updates, classification, variance prediction (Interface)
+* @brief Heart of the framework to set up everything, perform optimization, classification, and variance prediction (Interface)
 * @author Erik Rodner, Alexander Freytag
 * @date 01/02/2012
 
@@ -8,10 +8,12 @@
 #ifndef _NICE_FMKGPHYPERPARAMETEROPTIMIZATIONINCLUDE
 #define _NICE_FMKGPHYPERPARAMETEROPTIMIZATIONINCLUDE
 
+// STL includes
 #include <vector>
 #include <set>
 #include <map>
 
+// NICE-core includes
 #include <core/algebra/EigValues.h>
 #include <core/algebra/IterativeLinearSolver.h>
 #include <core/basics/Config.h>
@@ -22,10 +24,13 @@
 #include <core/matlabAccess/MatFileIO.h>
 #endif
 
-#include "FastMinKernel.h"
-#include "GPLikelihoodApprox.h"
-#include "IKMLinearCombination.h"
-#include "Quantization.h"
+// gp-hik-core includes
+#include "gp-hik-core/FastMinKernel.h"
+#include "gp-hik-core/GPLikelihoodApprox.h"
+#include "gp-hik-core/IKMLinearCombination.h"
+#include "gp-hik-core/OnlineLearnable.h"
+#include "gp-hik-core/Quantization.h"
+
 
 #include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
 
@@ -33,11 +38,11 @@ namespace NICE {
   
   /** 
  * @class FMKGPHyperparameterOptimization
- * @brief Heart of the framework to set up everything, perform optimization, incremental updates, classification, variance prediction
+ * @brief Heart of the framework to set up everything, perform optimization, classification, and variance prediction
  * @author Erik Rodner, Alexander Freytag
  */
   
-class FMKGPHyperparameterOptimization : NICE::Persistent
+class FMKGPHyperparameterOptimization : public NICE::Persistent, public NICE::OnlineLearnable
 {
   protected:
     enum {
@@ -102,52 +107,40 @@ class FMKGPHyperparameterOptimization : NICE::Persistent
     double * precomputedTForVarEst;
 
     //! optimize noise with the GP likelihood
-    bool optimizeNoise;
-
-    //! learn in a balanced manner
-    bool learnBalanced;       
+    bool optimizeNoise;     
        
-    //! k largest eigenvalues for every kernel matrix (k == nrOfEigenvaluesToConsider, if we do not use balanced learning, we have only 1 entry)
-    std::vector< NICE::Vector> eigenMax;
+    //! k largest eigenvalues of the kernel matrix (k == nrOfEigenvaluesToConsider)
+    NICE::Vector eigenMax;
 
-    //! eigenvectors corresponding to k largest eigenvalues for every matrix (k == nrOfEigenvaluesToConsider) -- format: nxk
-    std::vector< NICE::Matrix> eigenMaxVectors;
+    //! eigenvectors corresponding to k largest eigenvalues (k == nrOfEigenvaluesToConsider) -- format: nxk
+    NICE::Matrix eigenMaxVectors;
     
     //! needed for optimization and variance approximation
-    std::map<int, IKMLinearCombination * > ikmsums;
+    IKMLinearCombination * ikmsum;
     
     //! storing the labels is needed for Incremental Learning (re-optimization)
     NICE::Vector labels;
     
-    //! we store the alpha vectors for good initializations in the IL setting
-    std::map<int, NICE::Vector> lastAlphas;
 
     //! calculate binary label vectors using a multi-class label vector
     int prepareBinaryLabels ( std::map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses);     
     
-    //! prepare the GPLike objects for given binary labels and already given ikmsum-objects
-    inline void setupGPLikelihoodApprox(std::map<int,GPLikelihoodApprox * > & gplikes, const std::map<int, NICE::Vector> & binaryLabels, std::map<int,uint> & parameterVectorSizes);    
+    //! prepare the GPLike object for given binary labels and already given ikmsum-object
+    inline void setupGPLikelihoodApprox( GPLikelihoodApprox * & gplike, const std::map<int, NICE::Vector> & binaryLabels, uint & parameterVectorSize);    
     
     //! update eigenvectors and eigenvalues for given ikmsum-objects and a method to compute eigenvalues
-    inline void updateEigenVectors();
+    inline void updateEigenDecomposition( const int & i_noEigenValues );
     
     //! core of the optimize-functions
-    inline void performOptimization(std::map<int,GPLikelihoodApprox * > & gplikes, const std::map<int,uint> & parameterVectorSizes, const bool & roughOptimization = true);
+    inline void performOptimization( GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
     
     //! apply the optimized transformation values to the underlying features
-    inline void transformFeaturesWithOptimalParameters(const std::map<int,GPLikelihoodApprox * > & gplikes, const std::map<int,uint> & parameterVectorSizes);
+    inline void transformFeaturesWithOptimalParameters(const GPLikelihoodApprox & gplike, const uint & parameterVectorSize);
     
     //! build the resulting matrices A and B as well as lookup tables T for fast evaluations using the optimized parameter settings
-    inline void computeMatricesAndLUTs(const std::map<int,GPLikelihoodApprox * > & gplikes);
-    
-    //! Update matrices (A, B, LUTs) and optionally find optimal parameters after adding a new example.  
-    void updateAfterSingleIncrement (const NICE::SparseVector & x, const bool & performOptimizationAfterIncrement = false);    
-    //! Update matrices (A, B, LUTs) and optionally find optimal parameters after adding multiple examples.  
-    void updateAfterMultipleIncrements (const std::vector<const NICE::SparseVector*> & x, const bool & performOptimizationAfterIncrement = false);   
-    
-    //! use the alphas from the last iteration as initial guess for the ILS?
-    bool usePreviousAlphas;
+    inline void computeMatricesAndLUTs( const GPLikelihoodApprox & gplike);
     
+     
     //! store the class number of the positive class (i.e., larger class no), only used in binary settings
     int binaryLabelPositive;
     //! store the class number of the negative class (i.e., smaller class no), only used in binary settings
@@ -155,8 +148,19 @@ class FMKGPHyperparameterOptimization : NICE::Persistent
     
     //! contains all class numbers of the currently known classes
     std::set<int> knownClasses;
-    //! contains the class numbers of new classes - only needed within the increment step
-    std::set<int> newClasses;
+    
+    bool b_usePreviousAlphas;
+    
+    //! we store the alpha vectors for good initializations in the IL setting
+    std::map<int, NICE::Vector> lastAlphas;  
+
+    //! Update matrices (A, B, LUTs) and optionally find optimal parameters after adding (a) new example(s).  
+    void updateAfterIncrement (
+      const std::set<int> newClasses,
+      const bool & performOptimizationAfterIncrement = false
+    );    
+  
+
     
   public:  
     
@@ -175,11 +179,17 @@ class FMKGPHyperparameterOptimization : NICE::Persistent
     /** simple destructor */
     virtual ~FMKGPHyperparameterOptimization();
     
-    // get and set methods
+    ///////////////////// ///////////////////// /////////////////////
+    //                         GET / SET
+    ///////////////////// ///////////////////// ///////////////////// 
     void setParameterUpperBound(const double & _parameterUpperBound);
     void setParameterLowerBound(const double & _parameterLowerBound);  
     
-    //high level methods
+    std::set<int> getKnownClassNumbers ( ) const;
+    
+    ///////////////////// ///////////////////// /////////////////////
+    //                      CLASSIFIER STUFF
+    ///////////////////// ///////////////////// /////////////////////  
     
     void initialize( const Config *conf, ParameterizedFunction *pf, FastMinKernel *fmk = NULL, const std::string & confSection = "GPHIKClassifier" );
        
@@ -219,11 +229,18 @@ class FMKGPHyperparameterOptimization : NICE::Persistent
     void optimize ( std::map<int, NICE::Vector> & binaryLabels );    
     
     /**
-    * @brief Compute the necessary variables for appxorimations of predictive variance, assuming an already initialized fmk object
+    * @brief Compute the necessary variables for appxorimations of predictive variance (LUTs), assuming an already initialized fmk object
+    * @author Alexander Freytag
+    * @date 11-04-2012 (dd-mm-yyyy)
+    */       
+    void prepareVarianceApproximationRough();
+    
+    /**
+    * @brief Compute the necessary variables for fine appxorimations of predictive variance (EVs), assuming an already initialized fmk object
     * @author Alexander Freytag
     * @date 11-04-2012 (dd-mm-yyyy)
     */       
-    void prepareVarianceApproximation();
+    void prepareVarianceApproximationFine();    
     
     /**
     * @brief classify an example 
@@ -249,44 +266,104 @@ class FMKGPHyperparameterOptimization : NICE::Persistent
     */
     int classify ( const NICE::Vector & x, SparseVector & scores ) const;    
 
+    //////////////////////////////////////////
+    // variance computation: sparse inputs
+    //////////////////////////////////////////
+    
     /**
     * @brief compute predictive variance for a given test example using a rough approximation: k_{**} -  k_*^T (K+\sigma I)^{-1} k_* <= k_{**} - |k_*|^2 * 1 / \lambda_max(K + \sigma I), where we approximate |k_*|^2 by neglecting the mixed terms
     * @author Alexander Freytag
     * @date 10-04-2012 (dd-mm-yyyy)
     * @param x input example
-    * @param predVariances contains the approximations of the predictive variances
+    * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & x, NICE::Vector & predVariances) const;
-
+    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & x, double & predVariance ) const;
+    
     /**
     * @brief compute predictive variance for a given test example using a fine approximation  (k eigenvalues and eigenvectors to approximate the quadratic term)
     * @author Alexander Freytag
     * @date 18-04-2012 (dd-mm-yyyy)
     * @param x input example
-    * @param predVariances contains the approximations of the predictive variances
+     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & x, NICE::Vector & predVariances) const;    
+    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & x, double & predVariance ) const; 
     
     /**
     * @brief compute exact predictive variance for a given test example using ILS methods (exact, but more time consuming than approx versions)
     * @author Alexander Freytag
     * @date 10-04-2012 (dd-mm-yyyy)
     * @param x input example
-    * @param predVariances contains the approximations of the predictive variances
+     * @param predVariance contains the approximation of the predictive variance
     *
     */    
-    void computePredictiveVarianceExact(const NICE::SparseVector & x, NICE::Vector & predVariances) const;
+    void computePredictiveVarianceExact(const NICE::SparseVector & x, double & predVariance ) const; 
+    
+    
+    //////////////////////////////////////////
+    // variance computation: non-sparse inputs
+    //////////////////////////////////////////
+    
+    /**
+    * @brief compute predictive variance for a given test example using a rough approximation: k_{**} -  k_*^T (K+\sigma I)^{-1} k_* <= k_{**} - |k_*|^2 * 1 / \lambda_max(K + \sigma I), where we approximate |k_*|^2 by neglecting the mixed terms
+    * @author Alexander Freytag
+    * @date 19-12-2013 (dd-mm-yyyy)
+    * @param x input example
+    * @param predVariance contains the approximation of the predictive variance
+    *
+    */    
+    void computePredictiveVarianceApproximateRough(const NICE::Vector & x, double & predVariance ) const;    
+
+   
+    
+    /**
+    * @brief compute predictive variance for a given test example using a fine approximation  (k eigenvalues and eigenvectors to approximate the quadratic term)
+    * @author Alexander Freytag
+    * @date 19-12-2013 (dd-mm-yyyy)
+    * @param x input example
+     * @param predVariance contains the approximation of the predictive variance
+    *
+    */    
+    void computePredictiveVarianceApproximateFine(const NICE::Vector & x, double & predVariance ) const;      
+    
+
+    
+   /**
+    * @brief compute exact predictive variance for a given test example using ILS methods (exact, but more time consuming than approx versions)
+    * @author Alexander Freytag
+    * @date 19-12-2013 (dd-mm-yyyy)
+    * @param x input example
+    * @param predVariance contains the approximation of the predictive variance
+    *
+    */    
+    void computePredictiveVarianceExact(const NICE::Vector & x, double & predVariance ) const;  
+    
+    
+    
+    
+    
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT ///////////////////// 
     
-    /** Persistent interface */
     void restore ( std::istream & is, int format = 0 );
     void store ( std::ostream & os, int format = 0 ) const;
     void clear ( ) ;
     
-    void addExample( const NICE::SparseVector & x, const double & label, const bool & performOptimizationAfterIncrement = true);
-    void addMultipleExamples( const std::vector<const NICE::SparseVector*> & newExamples, const NICE::Vector & labels, const bool & performOptimizationAfterIncrement = false);
-        
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////    
+    
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   );
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    );         
 };
 
 }

+ 286 - 241
FastMinKernel.cpp

@@ -4,11 +4,15 @@
  * @author Alexander Freytag
  * @date 06-12-2011 (dd-mm-yyyy)
 */
+
+// STL includes
 #include <iostream>
-//#include "tools.h"
 
-#include "core/basics/vectorio.h"
-#include "core/basics/Timer.h"
+// NICE-core includes
+#include <core/basics/vectorio.h>
+#include <core/basics/Timer.h>
+
+// gp-hik-core includes
 #include "FastMinKernel.h"
 
 using namespace std;
@@ -53,7 +57,7 @@ FastMinKernel::FastMinKernel ( const sparse_t & X, const double noise, const std
 }
 #endif
 
-FastMinKernel::FastMinKernel ( const vector< SparseVector * > & X, const double noise, const bool _debug, const bool & dimensionsOverExamples, const int & _dim)
+FastMinKernel::FastMinKernel ( const std::vector< const NICE::SparseVector * > & X, const double noise, const bool _debug, const bool & dimensionsOverExamples, const int & _dim)
 {
   this->setDebug(_debug);
   this->hik_prepare_kernel_multiplications ( X, this->X_sorted, dimensionsOverExamples, _dim);
@@ -68,6 +72,36 @@ FastMinKernel::~FastMinKernel()
 {
 }
 
+
+///////////////////// ///////////////////// /////////////////////
+//                         GET / SET
+///////////////////// ///////////////////// ///////////////////// 
+
+void FastMinKernel::setVerbose( const bool & _verbose)
+{
+  verbose = _verbose;
+}
+
+bool FastMinKernel::getVerbose( )   const
+{
+  return verbose;
+}
+
+void FastMinKernel::setDebug( const bool & _debug)
+{
+  debug = _debug;
+  X_sorted.setDebug( _debug );
+}
+
+bool FastMinKernel::getDebug( )   const
+{
+  return debug;
+}
+
+///////////////////// ///////////////////// /////////////////////
+//                      CLASSIFIER STUFF
+///////////////////// ///////////////////// /////////////////////
+
 void FastMinKernel::applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf)
 {
   this->X_sorted.applyFunctionToFeatureMatrix(pf);
@@ -78,7 +112,7 @@ void FastMinKernel::hik_prepare_kernel_multiplications(const std::vector<std::ve
   X_sorted.set_features(X, _dim);
 }
 
-void FastMinKernel::hik_prepare_kernel_multiplications(const std::vector< NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim)
+void FastMinKernel::hik_prepare_kernel_multiplications(const std::vector< const NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim)
 {
   X_sorted.set_features(X, dimensionsOverExamples, _dim);
 }
@@ -1142,6 +1176,10 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
   return Tlookup;  
 }
 
+    //////////////////////////////////////////
+    // variance computation: sparse inputs
+    //////////////////////////////////////////    
+
 void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & A, const NICE::SparseVector & xstar, double & norm, const ParameterizedFunction *pf ) 
 {
   norm = 0.0;
@@ -1252,289 +1290,296 @@ void FastMinKernel::hikComputeKernelVector ( const NICE::SparseVector& xstar, NI
   }  
 }
 
-// ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
+    //////////////////////////////////////////
+    // variance computation: non-sparse inputs
+    //////////////////////////////////////////  
 
-void FastMinKernel::restore ( std::istream & is, int format )
+void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & A, const NICE::Vector & xstar, double & norm, const ParameterizedFunction *pf ) 
 {
-  if (is.good())
+  norm = 0.0;
+  int dim ( 0 );
+  for (Vector::const_iterator i = xstar.begin(); i != xstar.end(); i++, dim++)
   {
-    is.precision (numeric_limits<double>::digits10 + 1);  
+  
+    double fval = *i;
     
-    string tmp;
-    is >> tmp; //class name
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n ) {
+      // all features are zero so let us ignore them completely
+      continue;
+    }
+
+    int position;
+
+    //where is the example x^z_i located in
+    //the sorted array? -> perform binary search, runtime O(log(n))
+    // search using the original value
+    X_sorted.findFirstLargerInDimension(dim, fval, position);
+    position--;
+  
+    //NOTE again - pay attention! This is only valid if all entries are NOT negative! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+    double firstPart(0.0);
+    //TODO in the "overnext" line there occurs the following error
+    // Invalid read of size 8    
+    if (position >= 0) 
+      firstPart = (A[dim][position-nrZeroIndices]);
+    else
+      firstPart = 0.0;
     
-    is >> tmp;
-    is >> n;
+    double secondPart( 0.0);
+      
+    if ( pf != NULL )
+      fval = pf->f ( dim, fval );
     
-    is >> tmp;
-    is >> d;
+    fval = fval * fval;
     
-    is >> tmp;
-    is >> noise;
+    if (position >= 0) 
+      secondPart = fval * (n-nrZeroIndices-(position+1));
+    else //if x_d^* is smaller than every non-zero training example
+      secondPart = fval * (n-nrZeroIndices);
     
-    is >> tmp;
-    int approxSchemeInt;
-    is >> approxSchemeInt;
-    setApproximationScheme(approxSchemeInt);
-   
-    X_sorted.restore(is,format);
-   }
-  else
-  {
-    std::cerr << "FastMinKernel::restore -- InStream not initialized - restoring not possible!" << std::endl;
+    // but apply using the transformed one
+    norm += firstPart + secondPart;
   }  
 }
-void FastMinKernel::store ( std::ostream & os, int format ) const
+
+void FastMinKernel::hikComputeKVNApproximationFast(const double *Tlookup, const Quantization & q, const NICE::Vector & xstar, double & norm) const
 {
-  if (os.good())
-  {
-    os.precision (numeric_limits<double>::digits10 + 1);
-    os << "FastMinKernel" << std::endl;
-    os << "n: " << n << std::endl;
-    os << "d: " << d << std::endl;
-    os << "noise: " << noise << std::endl;
-    os << "approxScheme: " << approxScheme << std::endl;    
-    X_sorted.store(os,format);  
-  }
-  else
+  norm = 0.0;
+  // runtime is O(d) if the quantizer is O(1)
+  int dim ( 0 );
+  for (Vector::const_iterator i = xstar.begin(); i != xstar.end(); i++, dim++ )
   {
-    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
-  }    
+    double v = *i;
+    // we do not need a parameterized function here, since the quantizer works on the original feature values. 
+    // nonetheless, the lookup table was created using the parameterized function    
+    uint qBin = q.quantize(v);
+    
+    norm += Tlookup[dim*q.size() + qBin];
+  }  
 }
 
-void FastMinKernel::clear ()
-{
-  std::cerr << "FastMinKernel clear-function called" << std::endl;
-}
 
-void FastMinKernel::setVerbose( const bool & _verbose)
+void FastMinKernel::hikComputeKernelVector( const NICE::Vector & xstar, NICE::Vector & kstar) const
 {
-  verbose = _verbose;
-}
+  //init
+  kstar.resize(this->n);
+  kstar.set(0.0);
+  
+  //let's start :)
+  int dim ( 0 );
+  for (NICE::Vector::const_iterator i = xstar.begin(); i != xstar.end(); i++, dim++)
+  {
+  
+    double fval = *i;
+    
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n ) {
+      // all features are zero so let us ignore them completely
+      continue;
+    }
+    
 
-bool FastMinKernel::getVerbose( )   const
-{
-  return verbose;
-}
+    int position;
 
-void FastMinKernel::setDebug( const bool & _debug)
-{
-  debug = _debug;
-  X_sorted.setDebug( _debug );
-}
+    //where is the example x^z_i located in
+    //the sorted array? -> perform binary search, runtime O(log(n))
+    // search using the original value
+    X_sorted.findFirstLargerInDimension(dim, fval, position);
+    position--;
+    
+    //get the non-zero elements for this dimension  
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+    
+    //run over the non-zero elements and add the corresponding entries to our kernel vector
 
-bool FastMinKernel::getDebug( )   const
-{
-  return debug;
+    int count(nrZeroIndices);
+    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, count++ )
+    {
+      int origIndex(i->second.first); //orig index (i->second.second would be the transformed feature value)
+      if (count <= position)
+        kstar[origIndex] += i->first; //orig feature value
+      else
+        kstar[origIndex] += fval;
+    }
+  }  
 }
 
-// ----------------- INCREMENTAL LEARNING METHODS -----------------------
-void FastMinKernel::addExample(const NICE::SparseVector & _v, const ParameterizedFunction *pf )
-{
-  X_sorted.add_feature(_v, pf );
-  n++;
-}
-void FastMinKernel::addExample(const std::vector<double> & _v, const ParameterizedFunction *pf )
-{
-  X_sorted.add_feature(_v, pf );
-  n++;
-}
+///////////////////// INTERFACE PERSISTENT /////////////////////
+// interface specific methods for store and restore
+///////////////////// INTERFACE PERSISTENT ///////////////////// 
 
-void FastMinKernel::updatePreparationForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, NICE::VVector & A, NICE::VVector & B, const ParameterizedFunction *pf) const
-{ 
-  NICE::SparseVector::const_iterator it = _v.begin();
-  for (int dim = 0; dim < this->d; dim++)
+void FastMinKernel::restore ( std::istream & is, int format )
+{
+  bool b_restoreVerbose ( false );
+  if ( is.good() )
   {
-    if (it->first == dim)
+    if ( b_restoreVerbose ) 
+      std::cerr << " restore FastMinKernel" << std::endl;
+    
+    std::string tmp;
+    is >> tmp; //class name 
+    
+    if ( ! this->isStartTag( tmp, "FastMinKernel" ) )
     {
-      //increase both datastructures by one
-      A[dim].append(0.0);
-      B[dim].append(0.0);
+        std::cerr << " WARNING - attempt to restore FastMinKernel, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+	throw;
+    }   
+        
+    is.precision (numeric_limits<double>::digits10 + 1);
+    
+    bool b_endOfBlock ( false ) ;
+    
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "FastMinKernel" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }      
       
-      //this is the index of the new example in this dimension, which was already added
-      int idx;
-      X_sorted.findLastInDimension(dim, it->second, idx);
-      //actually we do not want to have the next position, but the current one
-      idx--;
+      tmp = this->removeStartTag ( tmp );
       
-      // and we do not care about zero elements since we store matrices A and B only for non-zero elements in the training data
-      idx -= X_sorted.getNumberOfZeroElementsPerDimension(dim);
+      if ( b_restoreVerbose )
+	std::cerr << " currently restore section " << tmp << " in FastMinKernel" << std::endl;
       
-      // we start using the last old element, which is located at size-2
-      for( int i = A[dim].size()-2; i >= (idx-1); i--)
+      if ( tmp.compare("n") == 0 )
       {
-        if (pf != NULL)
-          A[dim][i+1] = A[dim][i] + alpha * pf->f ( 1, it->second );
-        else
-        {
-          A[dim][i+1] = A[dim][i] + alpha * it->second;
-        }
-      }    
-          
-      // remember: in contrast to the explanations in our ECCV-paper, we store the alpha-values of the INCREASINGLY ordered features
-      // in the matrix B, not in decreasing order
-      for (int i = B[dim].size()-1; i >= std::max(1,idx); i--)
+        is >> n;        
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("d") == 0 )
+      {
+        is >> d;        
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );
+      } 
+      else if ( tmp.compare("noise") == 0 )
       {
-        B[dim][i] = B[dim][i-1] + alpha;
+        is >> noise;
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );
       }
-      
-      //special case
-      if (idx == 0)
+      else if ( tmp.compare("approxScheme") == 0 )
       {
-        if (pf != NULL)
-          A[dim][0] = alpha * pf->f ( 1, it->second );
-        else
-          A[dim][0] = alpha * it->second;
-        
-        B[dim][0] = alpha;
-      }      
-      
-      it++;
-    }
-    else //_v is zero for that dimension
-    {
-      //nothing to do, since we do not store any information about zero elements
-    }
-  }
-}
-
-void FastMinKernel::updateLookupTableForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, double * T, const Quantization & q, const ParameterizedFunction *pf) const
-{
-  //be aware, index n-1 is only valid, if we do not explicitely changed the indices while inserting elements
-  //actually, the code written below works equally to the following line, but is more efficient since we do not have to call the feature matrix several times
-//   this->hikUpdateLookupTable(T, alpha, 0.0, n-1, q, pf );
-  if (T == NULL)
-  {
-    fthrow(Exception, "FastMinKernel::updateLookupTableForAlphaMultiplications LUT not initialized, run FastMinKernel::hikPrepareLookupTable first!");
-    return;
-  }
-  
-  // number of quantization bins
-  uint hmax = q.size();
-
-  // store (transformed) prototypes
-  double *prototypes = new double [ hmax ];
-  for ( uint i = 0 ; i < hmax ; i++ )
-    if ( pf != NULL ) {
-      // FIXME: the transformed prototypes could change from dimension to another dimension
-      // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
-    } else {
-      prototypes[i] = q.getPrototype(i);
-    }
-  
-  // loop through all dimensions
-  for (NICE::SparseVector::const_iterator it = _v.begin(); it != _v.end(); it++)
-  {
-
-    int dim(it->first);
-
-    double x_i = it->second;
-    //as usually, we quantize the original features, but use the quantized transformed features lateron
-    int q_bin = q.quantize(x_i);      
-
-    //TODO we could speed up this with first do a binary search for the position where the min changes, and then do two separate for-loops
-    for (uint j = 0; j < hmax; j++)
-    {
-      double fval;
-      
-      if (q_bin > j)
-        fval = prototypes[j]; //the prototypes are already transformed
+	int approxSchemeInt;
+	is >> approxSchemeInt;
+	setApproximationScheme(approxSchemeInt);
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );	
+      }
+      else if ( tmp.compare("X_sorted") == 0 )
+      {
+	X_sorted.restore(is,format);
+	
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );
+      }       
       else
       {
-        if (pf != NULL)
-          fval = pf->f( 1, x_i );
-        else
-          fval = x_i;
+	std::cerr << "WARNING -- unexpected FastMinKernel object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	throw;	
       }
-      
-      // pay attention: we use either the quantized prototypes or the REAL feature values, not the quantized ones!
-      T[ dim*hmax + j ] += alpha*fval;
     }
+   }
+  else
+  {
+    std::cerr << "FastMinKernel::restore -- InStream not initialized - restoring not possible!" << std::endl;
   }
-
-  delete [] prototypes;
 }
 
-void FastMinKernel::updatePreparationForKVNApproximation(const NICE::SparseVector & _v, NICE::VVector & A, const ParameterizedFunction *pf) const
+void FastMinKernel::store ( std::ostream & os, int format ) const
 {
-  for (NICE::SparseVector::const_iterator it = _v.begin(); it != _v.end(); it++)
-  {
-    int dim(it->first);  
-    int idx;
+  if (os.good())
+  {    
+    // show starting point
+    os << this->createStartTag( "FastMinKernel" ) << std::endl;    
+    
+    os.precision (numeric_limits<double>::digits10 + 1);
+
+    os << this->createStartTag( "n" ) << std::endl;
+    os << n << std::endl;
+    os << this->createEndTag( "n" ) << std::endl;
     
-    // we use the original feature value for this search, not the transformed one (see FeatureMatrixT)
-    // we assume that the nex example was already inserted to the FeatureMatrix
-    X_sorted.findLastInDimension(dim, it->second, idx);  
-    //we do not want to considere zero elements, since we store it in a sparse way
-    idx -= X_sorted.getNumberOfZeroElementsPerDimension(dim);
-    // not the next one, but the current (position vs index)
-    idx--;
     
-    // perform a resize operations, since we have a new element
-    A[dim].resize(A[dim].size()+1);
+    os << this->createStartTag( "d" ) << std::endl;
+    os << d << std::endl;
+    os << this->createEndTag( "d" ) << std::endl;
+
     
-    // update :)
-    for( int i = A[dim].size()-1; i >= idx; i--)
-    {
-      if (pf != NULL)
-        A[dim][i] = A[dim][i-1] + pow(pf->f ( 1, it->second ), 2);
-      else
-        A[dim][i] = A[dim][i-1] + pow(it->second, 2);
-    }   
+    os << this->createStartTag( "noise" ) << std::endl;
+    os << noise << std::endl;
+    os << this->createEndTag( "noise" ) << std::endl;
+
+    
+    os << this->createStartTag( "approxScheme" ) << std::endl;
+    os << approxScheme << std::endl;
+    os << this->createEndTag( "approxScheme" ) << std::endl;
+    
+    os << this->createStartTag( "X_sorted" ) << std::endl;
+    //store the underlying data
+    X_sorted.store(os, format);
+    os << this->createEndTag( "X_sorted" ) << std::endl;   
+    
+    
+    // done
+    os << this->createEndTag( "FastMinKernel" ) << std::endl;        
   }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }    
 }
 
-void FastMinKernel::updateLookupTableForKVNApproximation(const NICE::SparseVector & _v, double * T, const Quantization & q, const ParameterizedFunction *pf) const
+void FastMinKernel::clear ()
 {
-  if (T == NULL)
-  {
-    fthrow(Exception, "FastMinKernel::updateLookupTableForKernelVectorNorm LUT not initialized, run FastMinKernel::hikPrepareLookupTableForKernelVectorNorm first!");
-    return;
-  }
-  
-  // number of quantization bins
-  uint hmax = q.size();
+  std::cerr << "FastMinKernel clear-function called" << std::endl;
+}
 
-  // store (transformed) prototypes
-  double *prototypes = new double [ hmax ];
-  for ( uint i = 0 ; i < hmax ; i++ )
-    if ( pf != NULL ) {
-      // FIXME: the transformed prototypes could change from dimension to another dimension
-      // We skip this flexibility ...but it should be changed in the future
-      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
-    } else {
-      prototypes[i] = q.getPrototype(i);
-    }
-   
-  // loop through all dimensions
-  for (NICE::SparseVector::const_iterator it = _v.begin(); it != _v.end(); it++)
-  {
-    int dim(it->first);
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+// interface specific methods for incremental extensions
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
 
-    double x_i = it->second;
-    //as usually, we quantize the original features, but use the quantized transformed features lateron
-    int q_bin = q.quantize(x_i);      
+void FastMinKernel::addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement
+			   )
+{
+  // no parameterized function was given - use default 
+  this->addExample ( example );
+}
 
-    //TODO we could speed up this with first do a binary search for the position where the min changes, and then do two separate for-loops
-    for (uint j = 0; j < hmax; j++)
-    {
-      double fval;
-      
-      if (q_bin > j)
-        fval = prototypes[j]; //the prototypes are already transformed
-      else
-      {
-        if (pf != NULL)
-          fval = pf->f( 1, x_i );
-        else
-          fval = x_i;
-      }
-      
-      // pay attention: we use either the quantized prototypes or the REAL feature values, not the quantized ones!
-      T[ dim*hmax + j ] += pow( fval, 2 );
-    }
-  }
-  
-  delete [] prototypes;  
+
+void FastMinKernel::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement
+				    )
+{
+  // no parameterized function was given - use default   
+  this->addMultipleExamples ( newExamples );
 }
+
+void FastMinKernel::addExample( const NICE::SparseVector * example, 
+			          const NICE::ParameterizedFunction *pf
+			        )
+{ 
+  X_sorted.add_feature( *example, pf );
+  n++;
+}
+
+void FastMinKernel::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				           const NICE::ParameterizedFunction *pf
+				         )
+{
+  for ( std::vector< const NICE::SparseVector * >::const_iterator exIt = newExamples.begin();
+        exIt != newExamples.end();
+        exIt++ )
+  {
+    X_sorted.add_feature( **exIt, pf );
+    n++;     
+  } 
+}
+

+ 78 - 58
FastMinKernel.h

@@ -7,16 +7,24 @@
 #ifndef FASTMINKERNELINCLUDE
 #define FASTMINKERNELINCLUDE
 
+// STL includes
 #include <iostream>
 
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Exception.h>
+#include <core/basics/Persistent.h>
+// 
+// 
 #include <core/vector/MatrixT.h>
 #include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
 #include <core/vector/VVector.h>
-#include <core/basics/Exception.h>
-#include "core/basics/Persistent.h"
 
-#include "FeatureMatrixT.h"
-#include "Quantization.h"
+// gp-hik-core includes
+#include "gp-hik-core/FeatureMatrixT.h"
+#include "gp-hik-core/OnlineLearnable.h"
+#include "gp-hik-core/Quantization.h"
 #include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
 
 namespace NICE {
@@ -29,7 +37,7 @@ namespace NICE {
  */  
   
   /** interface to FastMinKernel implementation*/
-  class FastMinKernel : NICE::Persistent
+  class FastMinKernel : public NICE::Persistent, public OnlineLearnable
   {
 
     protected:
@@ -71,7 +79,7 @@ namespace NICE {
       */
       void hik_prepare_kernel_multiplications(const std::vector<std::vector<double> > & X, NICE::FeatureMatrixT<double> & X_sorted, const int & _dim = -1);
       
-      void hik_prepare_kernel_multiplications ( const std::vector< NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim = -1);
+      void hik_prepare_kernel_multiplications ( const std::vector< const NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim = -1);
       
       void randomPermutation(NICE::Vector & permutation, const std::vector<int> & oldIndices, const int & newSize) const;
       
@@ -105,7 +113,7 @@ namespace NICE {
       * @param X vector of sparse vector pointers
       * @param noise GP noise
       */
-      FastMinKernel( const std::vector< SparseVector * > & X, const double noise, const bool _debug = false, const bool & dimensionsOverExamples=false, const int & _dim = -1);
+      FastMinKernel( const std::vector< const NICE::SparseVector * > & X, const double noise, const bool _debug = false, const bool & dimensionsOverExamples=false, const int & _dim = -1);
 
 #ifdef NICE_USELIB_MATIO
       /**
@@ -352,6 +360,10 @@ namespace NICE {
       */
       double* hikPrepareLookupTableForKVNApproximation(const Quantization & q, const ParameterizedFunction *pf = NULL) const;
       
+    //////////////////////////////////////////
+    // variance computation: sparse inputs
+    //////////////////////////////////////////      
+      
       /**
       * @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
       * @author Alexander Freytag
@@ -386,76 +398,84 @@ namespace NICE {
       */      
       void hikComputeKernelVector( const NICE::SparseVector & xstar, NICE::Vector & kstar) const;
       
-      /** Persistent interface */
-      virtual void restore ( std::istream & is, int format = 0 );
-      virtual void store ( std::ostream & os, int format = 0 ) const; 
-      virtual void clear ();
-      
-      // ----------------- INCREMENTAL LEARNING METHODS -----------------------
+    //////////////////////////////////////////
+    // variance computation: non-sparse inputs
+    //////////////////////////////////////////     
       
       /**
-      * @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
-      * @author Alexander Freytag
-      * @date 25-04-2012 (dd-mm-yyyy)
-      *
-      * @param _v new feature vector
-      */       
-      void addExample(const NICE::SparseVector & _v, const ParameterizedFunction *pf = NULL);
-      /**
-      * @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
+      * @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
       * @author Alexander Freytag
-      * @date 25-04-2012 (dd-mm-yyyy)
-      *
-      * @param _v new feature vector
-      */       
-      void addExample(const std::vector<double> & _v, const ParameterizedFunction *pf = NULL);
+      * @date 19-12-2013 (dd-mm-yyyy)
+      * 
+      * @param A pre-computation matrix (VVector) (use the prepare method) 
+      * @param xstar new feature vector (Vector)
+      * @param norm result of the squared norm approximation
+      * @param pf optional feature transformation
+      */
+      void hikComputeKVNApproximation(const NICE::VVector & A, const NICE::Vector & xstar, double & norm, const ParameterizedFunction *pf = NULL ) ;
       
       /**
-      * @brief Updates A and B matrices for fast kernel multiplications and kernel sums. You need to compute the new alpha value and run addExample first!
+      * @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
       * @author Alexander Freytag
-      * @date 25-04-2012 (dd-mm-yyyy)
+      * @date 19-12-2013 (dd-mm-yyyy)
       *
-      * @param _v new feature vector
-      * @param alpha new alpha value for the corresponding feature
-      * @param A precomputed matrix A which will be updated accordingly
-      * @param B precomputed matrix B which will be updated accordingly
-      * @param pf optional feature transformation
-      */       
-      void updatePreparationForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, NICE::VVector & A, NICE::VVector & B, const ParameterizedFunction *pf = NULL) const;
+      * @param Tlookup large lookup table
+      * @param q Quantization object
+      * @param xstar feature vector (indirect k_*)
+      * @param norm result of the calculation
+      */
+      void hikComputeKVNApproximationFast(const double *Tlookup, const Quantization & q, const NICE::Vector & xstar, double & norm ) const;      
+      
       /**
-      * @brief Updates LUT T for very fast kernel multiplications and kernel sums. You need to compute the new alpha value and run addExample first!
+      * @brief Compute the kernel vector k_* between training examples and test example. Runtime. O(n \times D). Does not exploit sparsity - deprecated!
       * @author Alexander Freytag
-      * @date 26-04-2012 (dd-mm-yyyy)
+      * @date 19-12-2013 (dd-mm-yyyy)
       *
-      * @param _v new feature vector
-      * @param alpha new alpha value for the corresponding feature
-      * @param T precomputed lookup table, which will be updated
-      * @param q quantization object to quantize possible test samples
-      * @param pf optional feature transformation
-      */       
-      void updateLookupTableForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, double * T, const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+      * @param xstar feature vector
+      * @param kstar kernel vector
+      */      
+      void hikComputeKernelVector( const NICE::Vector & xstar, NICE::Vector & kstar) const;      
       
+      /** Persistent interface */
+      virtual void restore ( std::istream & is, int format = 0 );
+      virtual void store ( std::ostream & os, int format = 0 ) const; 
+      virtual void clear ();
+      
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+      
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   );
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    );  
+    
+
       /**
-      * @brief Updates matrix A for approximations of the kernel vector norm. You need to run addExample first!
+      * @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
       * @author Alexander Freytag
-      * @date 26-04-2012 (dd-mm-yyyy)
+      * @date 02-01-2014 (dd-mm-yyyy)
       *
-      * @param _v new feature vector
-      * @param A precomputed matrix A which will be updated accordingly
-      * @param pf optional feature transformation
+      * @param example new feature vector
       */       
-      void updatePreparationForKVNApproximation(const NICE::SparseVector & _v, NICE::VVector & A, const ParameterizedFunction *pf = NULL) const;
+      void addExample(const NICE::SparseVector * example, const NICE::ParameterizedFunction *pf = NULL);
+      
       /**
-      * @brief Updates LUT T for fast approximations of the kernel vector norm. You need to run addExample first!
+      * @brief Add multiple new example to the feature-storage. You have to update the corresponding variables explicitely after that.
       * @author Alexander Freytag
-      * @date 26-04-2012 (dd-mm-yyyy)
+      * @date 02-01-2014 (dd-mm-yyyy)
       *
-      * @param _v new feature vector
-      * @param T precomputed lookup table, which will be updated
-      * @param q quantization object to quantize possible test samples
-      * @param pf optional feature transformation
+      * @param newExamples new feature vectors
       */       
-      void updateLookupTableForKVNApproximation(const NICE::SparseVector & _v, double * T, const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+      void addMultipleExamples(const std::vector<const NICE::SparseVector * > & newExamples, const NICE::ParameterizedFunction *pf = NULL);        
+      
+      
+     
 
   };
 

+ 8 - 6
FeatureMatrixT.h

@@ -7,23 +7,25 @@
 #ifndef FEATUREMATRIXINCLUDE
 #define FEATUREMATRIXINCLUDE
 
+// STL includes
 #include <vector>
 #include <set>
 #include <map>
 #include <iostream>
 #include <limits>
 
+// NICE-core includes
 #include <core/basics/Exception.h>
-#include "core/basics/Persistent.h"
-
+#include <core/basics/Persistent.h>
+// 
 #include <core/vector/MatrixT.h>
 #include <core/vector/SparseVectorT.h>
-
+// 
 #ifdef NICE_USELIB_MATIO
   #include <core/matlabAccess/MatFileIO.h> 
 #endif
   
-
+// gp-hik-core includes
 #include "SortedVectorSparse.h"
 #include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
 
@@ -95,7 +97,7 @@ template<class T> class FeatureMatrixT : NICE::Persistent
 #endif
 
     /** just another constructor for sparse features */
-    FeatureMatrixT(const std::vector< SparseVector * > & X, const bool dimensionsOverExamples = false, const int & _dim = -1);
+    FeatureMatrixT(const std::vector< const NICE::SparseVector * > & X, const bool dimensionsOverExamples = false, const int & _dim = -1);
     
 #ifdef NICE_USELIB_MATIO
     /**
@@ -301,7 +303,7 @@ template<class T> class FeatureMatrixT : NICE::Persistent
     void set_features(const std::vector<std::vector<T> > & _features, std::vector<std::vector<int> > & permutations, const int & _dim = -1);
     void set_features(const std::vector<std::vector<T> > & _features, std::vector<std::map<int,int> > & permutations, const int & _dim = -1);
     void set_features(const std::vector<std::vector<T> > & _features, const int & _dim = -1);
-    void set_features(const std::vector< NICE::SparseVector * > & _features, const bool dimensionsOverExamples = false, const int & _dim = -1);
+    void set_features(const std::vector< const NICE::SparseVector * > & _features, const bool dimensionsOverExamples = false, const int & _dim = -1);
     
     /**
     * @brief get a permutation vector for each dimension

+ 93 - 40
FeatureMatrixT.tcc

@@ -7,7 +7,7 @@
 // #ifndef FEATUREMATRIX_TCC
 // #define FEATUREMATRIX_TCC
 
-
+// gp-hik-core includes
 #include "FeatureMatrixT.h"
 
 namespace NICE {
@@ -51,7 +51,7 @@ namespace NICE {
     //Constructor reading data from a vector of sparse vector pointers
     template <typename T>
     FeatureMatrixT<T>::
-    FeatureMatrixT(const std::vector< SparseVector * > & X, const bool dimensionsOverExamples, const int & _dim)
+    FeatureMatrixT(const std::vector< const NICE::SparseVector * > & X, const bool dimensionsOverExamples, const int & _dim)
     {
       features.clear();
       
@@ -630,7 +630,7 @@ namespace NICE {
     }
     
     template <typename T>
-    void FeatureMatrixT<T>::set_features(const std::vector< NICE::SparseVector * > & _features, const bool dimensionsOverExamples, const int & _dim)
+    void FeatureMatrixT<T>::set_features(const std::vector< const NICE::SparseVector * > & _features, const bool dimensionsOverExamples, const int & _dim)
     {   
       features.clear();
       if (_features.size() == 0)
@@ -881,40 +881,79 @@ namespace NICE {
     template <typename T>
     void FeatureMatrixT<T>::restore ( std::istream & is, int format )
     {
-      if (is.good())
-      {
-        is.precision (std::numeric_limits<double>::digits10 + 1);
-        std::string tmp;
-        
-        is >> tmp; //classname
-        
-        is >> tmp;
-        is >> n;
-
-        
-        is >> tmp;
-        is >> d;
-        
-        features.resize(d);
-        //now read features for every dimension
-        for (int dim = 0; dim < d; dim++)
-        {
-          NICE::SortedVectorSparse<T> svs;
-          features[dim] = svs;          
-          features[dim].restore(is,format);
-        }
-        
-        if (verbose)
-        {
-          std::cerr << "FeatureMatrixT<T>::restore" << std::endl;
-          std::cerr << "n: " << n << std::endl;          
-          std::cerr << "d: " << d << std::endl;
-          this->print(std::cerr);
-        }
+      bool b_restoreVerbose ( false );
+      if ( is.good() )
+      {
+	if ( b_restoreVerbose ) 
+	  std::cerr << " restore FeatureMatrixT" << std::endl;
+	
+	std::string tmp;
+	is >> tmp; //class name 
+	
+	if ( ! this->isStartTag( tmp, "FeatureMatrixT" ) )
+	{
+	    std::cerr << " WARNING - attempt to restore FeatureMatrixT, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+	    throw;
+	}   
+	    
+	is.precision ( std::numeric_limits<double>::digits10 + 1);
+	
+	bool b_endOfBlock ( false ) ;
+	
+	while ( !b_endOfBlock )
+	{
+	  is >> tmp; // start of block 
+	  
+	  if ( this->isEndTag( tmp, "FeatureMatrixT" ) )
+	  {
+	    b_endOfBlock = true;
+	    continue;
+	  }      
+	  
+	  tmp = this->removeStartTag ( tmp );
+	  
+	  if ( b_restoreVerbose )
+	    std::cerr << " currently restore section " << tmp << " in FeatureMatrixT" << std::endl;
+	  
+	  if ( tmp.compare("n") == 0 )
+	  {
+	    is >> n;        
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );
+	  }
+	  else if ( tmp.compare("d") == 0 )
+	  {
+	    is >> d;        
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );
+	  } 
+	  else if ( tmp.compare("features") == 0 )
+	  {
+	    //NOTE assumes d to be read first!
+	    features.resize(d);
+	    //now read features for every dimension
+	    for (int dim = 0; dim < d; dim++)
+	    {
+	      NICE::SortedVectorSparse<T> svs;
+	      features[dim] = svs;          
+	      features[dim].restore(is,format);
+	    }
+	    
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );
+	  }       
+	  else
+	  {
+	    std::cerr << "WARNING -- unexpected FeatureMatrixT object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	    throw;	
+	  }
+	}
+         
       }
       else
       {
         std::cerr << "FeatureMatrixT<T>::restore -- InStream not initialized - restoring not possible!" << std::endl;
+        throw;
       }
     }
 
@@ -923,16 +962,30 @@ namespace NICE {
     {
       if (os.good())
       {
+	// show starting point
+	os << this->createStartTag( "FeatureMatrixT" ) << std::endl;
+	
         os.precision (std::numeric_limits<double>::digits10 + 1);
-        os << "FeatureMatrixT" << std::endl;
-        os << "n: " << n << std::endl;
-        os << "d: " << d << std::endl;
+	
+	os << this->createStartTag( "n" ) << std::endl;
+	os << n << std::endl;
+	os << this->createEndTag( "n" ) << std::endl;
+	
+	
+	os << this->createStartTag( "d" ) << std::endl;
+	os << d << std::endl;
+	os << this->createEndTag( "d" ) << std::endl;
         
         //now write features for every dimension
-        for (int dim = 0; dim < d; dim++)
-        {
-          features[dim].store(os,format);
-        }
+	os << this->createStartTag( "features" ) << std::endl;
+	for (int dim = 0; dim < d; dim++)
+	{
+	  features[dim].store(os,format);
+	}
+        os << this->createEndTag( "features" ) << std::endl;
+        
+	// done
+	os << this->createEndTag( "FeatureMatrixT" ) << std::endl;       
       }
       else
       {

+ 19 - 8
GMHIKernel.cpp

@@ -199,11 +199,22 @@ void GMHIKernel::setApproximationScheme(const int & _approxScheme)
   this->fmk->setApproximationScheme(_approxScheme);
 }
 
-// ----------------- INCREMENTAL LEARNING METHODS -----------------------
-void GMHIKernel::addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels)
-{
-  // we could add the example to the fmk, but we won't do it here
-  // reason: if we have a balanced learning, we have multiple identical GMHI-objects
-  // if we would add the example here, it would be added as often as we have those objects
-  // therefor we add the example already in the FMKGPHypOpt class
-}
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+// interface specific methods for incremental extensions
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+
+void GMHIKernel::addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement
+			   )
+{
+  //nothing has to be done here, the fmk-object got new examples already in outer struct (FMKGPHyperparameterOptimization)
+}
+
+void GMHIKernel::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement
+				    )
+{
+  //nothing has to be done here, the fmk-object got new examples already in outer struct (FMKGPHyperparameterOptimization)
+}

+ 19 - 3
GMHIKernel.h

@@ -73,14 +73,30 @@ class GMHIKernel : public ImplicitKernelMatrix
     virtual double approxFrobNorm() const;
     virtual void setApproximationScheme(const int & _approxScheme);
     
-    /** Persistent interface */
+    void setFastMinKernel(NICE::FastMinKernel * _fmk){fmk = _fmk;};
+    
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
     virtual void restore ( std::istream & is, int format = 0 ) {};//fmk->restore( is, format );};
     virtual void store ( std::ostream & os, int format = 0 ) const {};//fmk->store( os, format );};
     virtual void clear () {};
     
-    virtual void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels);
+
     
-    void setFastMinKernel(NICE::FastMinKernel * _fmk){fmk = _fmk;};
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////    
+    
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   );
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    );     
      
 };
 

+ 429 - 119
GPHIKClassifier.cpp

@@ -5,11 +5,15 @@
 * @date 02/01/2012
 
 */
+
+// STL includes
 #include <iostream>
 
-#include "core/basics/numerictools.h"
+// NICE-core includes
+#include <core/basics/numerictools.h>
 #include <core/basics/Timer.h>
 
+// gp-hik-core includes
 #include "GPHIKClassifier.h"
 #include "gp-hik-core/parameterizedFunctions/PFAbsExp.h"
 #include "gp-hik-core/parameterizedFunctions/PFExp.h"
@@ -18,43 +22,17 @@
 using namespace std;
 using namespace NICE;
 
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PROTECTED METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
 
-GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & confSection ) 
-{
-  //default settings, may be overwritten lateron
-  gphyper = NULL;
-  pf = NULL;
-  confCopy = NULL;
-  //just a default value
-  uncertaintyPredictionForClassification = false;
-  
-  if ( conf == NULL )
-  {
-     fthrow(Exception, "GPHIKClassifier: the config is NULL -- use a default config and the restore-function instaed!");
-  }
-  else
-    this->init(conf, confSection);
-}
-
-GPHIKClassifier::~GPHIKClassifier()
-{
-  if ( gphyper != NULL )
-    delete gphyper;
-  
-  if (pf != NULL)
-    delete pf;
-
-  if ( confCopy != NULL )
-    delete confCopy;
-}
-
-void GPHIKClassifier::init(const Config *conf, const string & confSection)
+void GPHIKClassifier::init(const Config *conf, const string & s_confSection)
 {
-  double parameterLowerBound = conf->gD(confSection, "parameter_lower_bound", 1.0 );
   double parameterUpperBound = conf->gD(confSection, "parameter_upper_bound", 5.0 );
+  double parameterLowerBound = conf->gD(confSection, "parameter_lower_bound", 1.0 );  
 
-  if (gphyper == NULL)
-    this->gphyper = new FMKGPHyperparameterOptimization;
   this->noise = conf->gD(confSection, "noise", 0.01);
 
   string transform = conf->gS(confSection, "transform", "absexp" );
@@ -74,7 +52,8 @@ void GPHIKClassifier::init(const Config *conf, const string & confSection)
       fthrow(Exception, "Transformation type is unknown " << transform);
     }
   }
-  else{
+  else
+  {
     //we already know the pf from the restore-function
   }
   this->confSection = confSection;
@@ -83,23 +62,23 @@ void GPHIKClassifier::init(const Config *conf, const string & confSection)
   this->uncertaintyPredictionForClassification = conf->gB( confSection, "uncertaintyPredictionForClassification", false );
   
   if (confCopy != conf)
-  {  
+  {
     this->confCopy = new Config ( *conf );
     //we do not want to read until end of file for restoring    
     confCopy->setIoUntilEndOfFile(false);    
   }
    
   //how do we approximate the predictive variance for classification uncertainty?
-  string varianceApproximationString = conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
-  if ( (varianceApproximationString.compare("approximate_rough") == 0) || ((varianceApproximationString.compare("1") == 0)) )
+  string s_varianceApproximation = conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
+  if ( (s_varianceApproximation.compare("approximate_rough") == 0) || ((s_varianceApproximation.compare("1") == 0)) )
   {
     this->varianceApproximation = APPROXIMATE_ROUGH;
   }
-  else if ( (varianceApproximationString.compare("approximate_fine") == 0) || ((varianceApproximationString.compare("2") == 0)) )
+  else if ( (s_varianceApproximation.compare("approximate_fine") == 0) || ((s_varianceApproximation.compare("2") == 0)) )
   {
     this->varianceApproximation = APPROXIMATE_FINE;
   }
-  else if ( (varianceApproximationString.compare("exact") == 0)  || ((varianceApproximationString.compare("3") == 0)) )
+  else if ( (s_varianceApproximation.compare("exact") == 0)  || ((s_varianceApproximation.compare("3") == 0)) )
   {
     this->varianceApproximation = EXACT;
   }
@@ -107,23 +86,79 @@ void GPHIKClassifier::init(const Config *conf, const string & confSection)
   {
     this->varianceApproximation = NONE;
   } 
-  std::cerr << "varianceApproximationStrategy: " << varianceApproximationString  << std::endl;
+  
+  if ( this->verbose )
+    std::cerr << "varianceApproximationStrategy: " << s_varianceApproximation  << std::endl;
+}
+
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+//                 PUBLIC METHODS
+/////////////////////////////////////////////////////
+/////////////////////////////////////////////////////
+GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & s_confSection ) 
+{
+  //default settings, may be overwritten lateron
+  gphyper = NULL;
+  pf = NULL;
+  confCopy = NULL;
+  //just a default value
+  uncertaintyPredictionForClassification = false;
+  
+  this->confSection = s_confSection;
+  
+  // if no config file was given, we either restore the classifier from an external file, or run ::init with 
+  // an emtpy config (using default values thereby) when calling the train-method
+  if ( conf != NULL )
+    this->init(conf, confSection);
 }
 
-void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores )
+GPHIKClassifier::~GPHIKClassifier()
+{
+  if ( gphyper != NULL )
+    delete gphyper;
+  
+  if (pf != NULL)
+    delete pf;
+
+  if ( confCopy != NULL )
+    delete confCopy;
+}
+
+///////////////////// ///////////////////// /////////////////////
+//                         GET / SET
+///////////////////// ///////////////////// ///////////////////// 
+
+std::set<int> GPHIKClassifier::getKnownClassNumbers ( ) const
+{
+  if (gphyper == NULL)
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );  
+  
+  return gphyper->getKnownClassNumbers();
+}
+
+
+///////////////////// ///////////////////// /////////////////////
+//                      CLASSIFIER STUFF
+///////////////////// ///////////////////// /////////////////////
+
+void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores ) const
 {
   double tmpUncertainty;
   this->classify( example, result, scores, tmpUncertainty );
 }
 
-void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores )
+void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores ) const
 {
   double tmpUncertainty;
   this->classify( example, result, scores, tmpUncertainty );
 }
 
-void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores, double & uncertainty )
+void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores, double & uncertainty ) const
 {
+  if (gphyper == NULL)
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );
+  
   scores.clear();
   
   int classno = gphyper->classify ( *example, scores );
@@ -138,9 +173,7 @@ void GPHIKClassifier::classify ( const SparseVector * example,  int & result, Sp
   {
     if (varianceApproximation != NONE)
     {
-      NICE::Vector uncertainties;
-      this->predictUncertainty( example, uncertainties );
-      uncertainty = uncertainties.Max();
+      this->predictUncertainty( example, uncertainty );
     }  
     else
     {
@@ -155,8 +188,11 @@ void GPHIKClassifier::classify ( const SparseVector * example,  int & result, Sp
   }    
 }
 
-void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores, double & uncertainty )
+void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, SparseVector & scores, double & uncertainty ) const
 {
+  if (gphyper == NULL)
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );  
+  
   scores.clear();
   
   int classno = gphyper->classify ( *example, scores );
@@ -166,13 +202,12 @@ void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, Sp
   }
   
   result = scores.maxElement();
-   
+  
   if (uncertaintyPredictionForClassification)
   {
     if (varianceApproximation != NONE)
     {
-      std::cerr << "ERROR: Uncertainty computation is currently not supported for NICE::Vector - use SparseVector instead" << std::endl;
-      uncertainty = std::numeric_limits<double>::max();
+      this->predictUncertainty( example, uncertainty );
     }  
     else
     {
@@ -184,14 +219,23 @@ void GPHIKClassifier::classify ( const NICE::Vector * example,  int & result, Sp
   {
     //do nothing
     uncertainty = std::numeric_limits<double>::max();
-  }    
+  }  
 }
 
 /** training process */
-void GPHIKClassifier::train ( const std::vector< NICE::SparseVector *> & examples, const NICE::Vector & labels )
+void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels )
 {
   if (verbose)
+  {
     std::cerr << "GPHIKClassifier::train" << std::endl;
+  }
+  
+  if ( this->confCopy == NULL )
+  {
+    std::cerr << "WARNING -- No config used so far, initialize values with empty config file now..." << std::endl;
+    NICE::Config tmpConfEmpty ;
+    this->init ( &tmpConfEmpty, this->confSection );
+  }
 
   Timer t;
   t.start();
@@ -201,6 +245,13 @@ void GPHIKClassifier::train ( const std::vector< NICE::SparseVector *> & example
   if (verbose)
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
   
+  if (gphyper != NULL)
+     delete gphyper;
+  
+  
+  if ( ( varianceApproximation != APPROXIMATE_FINE) )
+    confCopy->sI ( confSection, "nrOfEigenvaluesToConsiderForVarApprox", 0);
+  
   gphyper = new FMKGPHyperparameterOptimization ( confCopy, pf, fmk, confSection ); 
 
   if (verbose)
@@ -209,14 +260,36 @@ void GPHIKClassifier::train ( const std::vector< NICE::SparseVector *> & example
   // go go go
   gphyper->optimize ( labels );
   if (verbose)
-    std::cerr << "optimization done, now prepare for the uncertainty prediction" << std::endl;
+    std::cerr << "optimization done" << std::endl;
   
-  if ( (varianceApproximation == APPROXIMATE_ROUGH) )
+  if ( ( varianceApproximation != NONE ) )
   {
-    //prepare for variance computation (approximative)
-    gphyper->prepareVarianceApproximation();
+    std::cerr << "now prepare for the uncertainty prediction" << std::endl;
+    
+    switch (varianceApproximation)    
+    {
+      case APPROXIMATE_ROUGH:
+      {
+        gphyper->prepareVarianceApproximationRough();
+        break;
+      }
+      case APPROXIMATE_FINE:
+      {
+        gphyper->prepareVarianceApproximationFine();
+        break;
+      }    
+      case EXACT:
+      {
+       //nothing to prepare
+        break;
+      }
+      default:
+      {
+       //nothing to prepare
+      }
+    }
   }
-  //for exact variance computation, we do not have to prepare anything
+
 
   // clean up all examples ??
   if (verbose)
@@ -224,10 +297,17 @@ void GPHIKClassifier::train ( const std::vector< NICE::SparseVector *> & example
 }
 
 /** training process */
-void GPHIKClassifier::train ( const std::vector< SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
+void GPHIKClassifier::train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
 { 
   if (verbose)
     std::cerr << "GPHIKClassifier::train" << std::endl;
+  
+  if ( this->confCopy == NULL )
+  {
+    std::cerr << "WARNING -- No config used so far, initialize values with empty config file now..." << std::endl;
+    NICE::Config tmpConfEmpty ;
+    this->init ( &tmpConfEmpty, this->confSection );
+  }  
 
   Timer t;
   t.start();
@@ -236,6 +316,8 @@ void GPHIKClassifier::train ( const std::vector< SparseVector *> & examples, std
   if (verbose)
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
   
+  if (gphyper != NULL)
+     delete gphyper;
   gphyper = new FMKGPHyperparameterOptimization ( confCopy, pf, fmk, confSection ); 
 
   if (verbose)
@@ -245,25 +327,39 @@ void GPHIKClassifier::train ( const std::vector< SparseVector *> & examples, std
   if (verbose)
     std::cerr << "optimization done, now prepare for the uncertainty prediction" << std::endl;
   
-  if ( (varianceApproximation == APPROXIMATE_ROUGH) )
+  if ( ( varianceApproximation != NONE ) )
   {
-    //prepare for variance computation (approximative)
-    gphyper->prepareVarianceApproximation();
+    std::cerr << "now prepare for the uncertainty prediction" << std::endl;
+    
+    switch (varianceApproximation)    
+    {
+      case APPROXIMATE_ROUGH:
+      {
+        gphyper->prepareVarianceApproximationRough();
+        break;
+      }
+      case APPROXIMATE_FINE:
+      {
+        gphyper->prepareVarianceApproximationFine();
+        break;
+      }    
+      case EXACT:
+      {
+       //nothing to prepare
+        break;
+      }
+      default:
+      {
+       //nothing to prepare
+      }
+    }
   }
-  //for exact variance computation, we do not have to prepare anything
 
   // clean up all examples ??
   if (verbose)
     std::cerr << "Learning finished" << std::endl;
 }
 
-void GPHIKClassifier::clear ()
-{
-  if ( gphyper != NULL )
-    delete gphyper;
-  gphyper = NULL;
-}
-
 GPHIKClassifier *GPHIKClassifier::clone () const
 {
   fthrow(Exception, "GPHIKClassifier: clone() not yet implemented" );
@@ -271,107 +367,253 @@ GPHIKClassifier *GPHIKClassifier::clone () const
   return NULL;
 }
   
-void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, NICE::Vector & uncertainties )
+void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const
+{  
+  if (gphyper == NULL)
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );  
+  
+  //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
+  switch (varianceApproximation)    
+  {
+    case APPROXIMATE_ROUGH:
+    {
+      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
+      break;
+    }
+    case APPROXIMATE_FINE:
+    {
+        std::cerr << "predict uncertainty fine" << std::endl;
+      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
+      break;
+    }    
+    case EXACT:
+    {
+      gphyper->computePredictiveVarianceExact( *example, uncertainty );
+      break;
+    }
+    default:
+    {
+      fthrow(Exception, "GPHIKClassifier - your settings disabled the variance approximation needed for uncertainty prediction.");
+//       uncertainty = numeric_limits<double>::max();
+//       break;
+    }
+  }
+}
+
+void GPHIKClassifier::predictUncertainty( const NICE::Vector * example, double & uncertainty ) const
 {  
+  if (gphyper == NULL)
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );  
+  
   //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
   switch (varianceApproximation)    
   {
     case APPROXIMATE_ROUGH:
     {
-      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainties );
+      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainty );
       break;
     }
     case APPROXIMATE_FINE:
     {
-      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainties );
+        std::cerr << "predict uncertainty fine" << std::endl;
+      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainty );
       break;
     }    
     case EXACT:
     {
-      gphyper->computePredictiveVarianceExact( *example, uncertainties );
+      gphyper->computePredictiveVarianceExact( *example, uncertainty );
       break;
     }
     default:
     {
-//       std::cerr << "No Uncertainty Prediction at all" << std::endl;
       fthrow(Exception, "GPHIKClassifier - your settings disabled the variance approximation needed for uncertainty prediction.");
-//       uncertainties.resize( 1 );
-//       uncertainties.set( numeric_limits<double>::max() );
+//       uncertainty = numeric_limits<double>::max();
 //       break;
     }
   }
 }
 
-//---------------------------------------------------------------------
-//                           protected methods
-//---------------------------------------------------------------------
+///////////////////// INTERFACE PERSISTENT /////////////////////
+// interface specific methods for store and restore
+///////////////////// INTERFACE PERSISTENT ///////////////////// 
+
 void GPHIKClassifier::restore ( std::istream & is, int format )
 {
-  if (is.good())
+  //delete everything we knew so far...
+  this->clear();
+  
+  bool b_restoreVerbose ( false );
+#ifdef B_RESTOREVERBOSE
+  b_restoreVerbose = true;
+#endif  
+  
+  if ( is.good() )
   {
-    is.precision (numeric_limits<double>::digits10 + 1);
+    if ( b_restoreVerbose ) 
+      std::cerr << " restore GPHIKClassifier" << std::endl;
+    
+    std::string tmp;
+    is >> tmp; //class name 
     
-    string tmp;
-    is >> tmp;
-    is >> confSection;
+    if ( ! this->isStartTag( tmp, "GPHIKClassifier" ) )
+    {
+        std::cerr << " WARNING - attempt to restore GPHIKClassifier, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+	throw;
+    }   
     
     if (pf != NULL)
     {
       delete pf;
+      pf = NULL;
     }
-    string transform;
-    is >> transform;
-    if ( transform == "absexp" )
+    if ( confCopy != NULL )
     {
-      this->pf = new PFAbsExp ();
-    } else if ( transform == "exp" ) {
-      this->pf = new PFExp ();
-    } else {
-      fthrow(Exception, "Transformation type is unknown " << transform);
+      delete confCopy;
+      confCopy = NULL;
+    }
+    if (gphyper != NULL)
+    {
+      delete gphyper;
+      gphyper = NULL;
     }    
-    pf->restore(is, format);
-            
-    //load every options we determined explicitely
-    confCopy->clear();
-    //we do not want to read until the end of the file
-    confCopy->setIoUntilEndOfFile( false );
-    confCopy->restore(is, format);
+    
+    is.precision (numeric_limits<double>::digits10 + 1);
+    
+    bool b_endOfBlock ( false ) ;
+    
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "GPHIKClassifier" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }      
+      
+      tmp = this->removeStartTag ( tmp );
+      
+      if ( b_restoreVerbose )
+	std::cerr << " currently restore section " << tmp << " in GPHIKClassifier" << std::endl;
+      
+      if ( tmp.compare("confSection") == 0 )
+      {
+        is >> confSection;        
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );	
+      }
+      else if ( tmp.compare("pf") == 0 )
+      {
+	
+	is >> tmp; // start of block 
+	if ( this->isEndTag( tmp, "pf" ) )
+	{
+	  std::cerr << " ParameterizedFunction object can not be restored. Aborting..." << std::endl;
+	  throw;
+	} 
+	
+	std::string transform = this->removeStartTag ( tmp );
+	
+
+	if ( transform == "PFAbsExp" )
+	{
+	  this->pf = new PFAbsExp ();
+	} else if ( transform == "PFExp" ) {
+	  this->pf = new PFExp ();
+	} else {
+	  fthrow(Exception, "Transformation type is unknown " << transform);
+	}
+	
+	pf->restore(is, format);
+	
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );	
+      } 
+      else if ( tmp.compare("ConfigCopy") == 0 )
+      {
+	// possibly obsolete safety checks
+	if ( confCopy == NULL )
+	  confCopy = new Config;
+	confCopy->clear();
+	
+	
+	//we do not want to read until the end of the file
+	confCopy->setIoUntilEndOfFile( false );
+	//load every options we determined explicitely
+	confCopy->restore(is, format);
+	
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );	
+      }
+      else if ( tmp.compare("gphyper") == 0 )
+      {
+	if ( gphyper == NULL )
+	  gphyper = new NICE::FMKGPHyperparameterOptimization();
+	
+	//then, load everything that we stored explicitely,
+	// including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
+	gphyper->restore(is, format);  
+		
+	is >> tmp; // end of block 
+	tmp = this->removeEndTag ( tmp );	
+      }       
+      else
+      {
+	std::cerr << "WARNING -- unexpected GPHIKClassifier object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	throw;	
+      }
+    }
 
+	
     //load every settings as well as default options
-    this->init(confCopy, confSection); 
-  
-    //first read things from the config
-    gphyper->initialize ( confCopy, pf );
-    
-    //then, load everything that we stored explicitely,
-    // including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
-    gphyper->restore(is, format);      
+    std::cerr << "run this->init" << std::endl;
+    this->init(confCopy, confSection);    
+    std::cerr << "run gphyper->initialize" << std::endl;
+    gphyper->initialize ( confCopy, pf, NULL, confSection );
   }
   else
   {
     std::cerr << "GPHIKClassifier::restore -- InStream not initialized - restoring not possible!" << std::endl;
+    throw;
   }
 }
 
 void GPHIKClassifier::store ( std::ostream & os, int format ) const
 {
+  if (gphyper == NULL)
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );  
+  
   if (os.good())
   {
+    // show starting point
+    os << this->createStartTag( "GPHIKClassifier" ) << std::endl;    
+    
     os.precision (numeric_limits<double>::digits10 + 1);
     
-    os << "confSection: "<<  confSection << std::endl;
+    os << this->createStartTag( "confSection" ) << std::endl;
+    os << confSection << std::endl;
+    os << this->createEndTag( "confSection" ) << std::endl; 
     
-    os << pf->sayYourName() << std::endl;
+    os << this->createStartTag( "pf" ) << std::endl;
     pf->store(os, format);
-    
+    os << this->createEndTag( "pf" ) << std::endl; 
+
+    os << this->createStartTag( "ConfigCopy" ) << std::endl;
     //we do not want to read until end of file for restoring    
     confCopy->setIoUntilEndOfFile(false);
-    confCopy->store(os,format);  
+    confCopy->store(os,format);
+    os << this->createEndTag( "ConfigCopy" ) << std::endl; 
     
+    os << this->createStartTag( "gphyper" ) << std::endl;
     //store the underlying data
     //will be done in gphyper->store(of,format)
     //store the optimized parameter values and all that stuff
-    gphyper->store(os, format); 
+    gphyper->store(os, format);
+    os << this->createEndTag( "gphyper" ) << std::endl;   
+    
+    
+    // done
+    os << this->createEndTag( "GPHIKClassifier" ) << std::endl;    
   }
   else
   {
@@ -379,16 +621,84 @@ void GPHIKClassifier::store ( std::ostream & os, int format ) const
   }
 }
 
-void GPHIKClassifier::addExample( const NICE::SparseVector * example, const double & label, const bool & performOptimizationAfterIncrement)
+void GPHIKClassifier::clear ()
 {
-  gphyper->addExample( *example, label, performOptimizationAfterIncrement );
+  if ( gphyper != NULL )
+  {
+    delete gphyper;
+    gphyper = NULL;
+  }
+  
+  if (pf != NULL)
+  {
+    delete pf;
+    pf = NULL;
+  }
+
+  if ( confCopy != NULL )
+  {
+    delete confCopy; 
+    confCopy = NULL;
+  } 
 }
 
-void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector *> & newExamples, const NICE::Vector & newLabels, const bool & performOptimizationAfterIncrement)
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+// interface specific methods for incremental extensions
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+
+void GPHIKClassifier::addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement
+			   )
+{
+  //***done*** //TODO add option for starting with empty classifier!
+  //***done***   // -> call train() with converted input here
+  //***done***  // TODO add option to go from 2 to 3 classes!  ***done***
+  //***done*** // TODO add option going from 1 to 2 classes without adding new alpha vector
+  //***done*** // TODO check variance matrices in update ***done***
+  // TODO add check option for variance update
+  //***done*** // TODO adapt code for addMultipleExamples  
+  
+  
+  if ( this->gphyper == NULL )
+  {
+    //call train method instead
+     std::cerr << "Classifier not initially trained yet -- run initial training instead of incremental extension!"  << std::endl;
+     
+    std::vector< const NICE::SparseVector *> examplesVec;
+    examplesVec.push_back ( example );
+    
+    NICE::Vector labelsVec ( 1 , label );
+    
+    this->train ( examplesVec, labelsVec );
+  }
+  else
+  {
+    this->gphyper->addExample( example, label, performOptimizationAfterIncrement );
+
+    std::cerr << " --- GPHIKClassifierIL::addExample done --- " << std::endl;   
+  }
+}
+
+void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement
+				    )
 {
   //are new examples available? If not, nothing has to be done
   if ( newExamples.size() < 1)
     return;
-  
-  gphyper->addMultipleExamples( newExamples, newLabels, performOptimizationAfterIncrement );
-}
+
+  if ( this->gphyper == NULL )
+  {
+    //call train method instead
+    
+    this->train ( newExamples, newLabels );    
+    
+    std::cerr << "train method successfully called in add multiple examples" << std::endl;
+  }
+  else
+  {
+    this->gphyper->addMultipleExamples( newExamples, newLabels, performOptimizationAfterIncrement );     
+  }
+}

+ 65 - 21
GPHIKClassifier.h

@@ -8,13 +8,19 @@
 #ifndef _NICE_GPHIKCLASSIFIERINCLUDE
 #define _NICE_GPHIKCLASSIFIERINCLUDE
 
+// STL includes
 #include <string>
 #include <limits>
 
+// NICE-core includes
 #include <core/basics/Config.h>
+#include <core/basics/Persistent.h>
+// 
 #include <core/vector/SparseVectorT.h>
 
-#include "FMKGPHyperparameterOptimization.h"
+// gp-hik-core includes
+#include "gp-hik-core/FMKGPHyperparameterOptimization.h"
+#include "gp-hik-core/OnlineLearnable.h"
 #include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
 
 namespace NICE {
@@ -25,7 +31,7 @@ namespace NICE {
  * @author Erik Rodner, Alexander Freytag
  */
  
-class GPHIKClassifier
+class GPHIKClassifier : public NICE::Persistent, public NICE::OnlineLearnable
 {
 
   protected:
@@ -57,18 +63,27 @@ class GPHIKClassifier
     * @brief classify a given example with the previously learnt model
     * @param pe example to be classified given in a sparse representation
     */    
-    void init(const NICE::Config *conf, const std::string & confSection);
+    void init(const NICE::Config *conf, const std::string & s_confSection);
        
 
   public:
 
     /** simple constructor */
-    GPHIKClassifier( const NICE::Config *conf, const std::string & confSection = "GPHIKClassifier" );
+    GPHIKClassifier( const NICE::Config *conf = NULL, const std::string & s_confSection = "GPHIKClassifier" );
       
     /** simple destructor */
     ~GPHIKClassifier();
+    
+    ///////////////////// ///////////////////// /////////////////////
+    //                         GET / SET
+    ///////////////////// ///////////////////// /////////////////////      
+    
+    std::set<int> getKnownClassNumbers ( ) const;    
    
-
+    ///////////////////// ///////////////////// /////////////////////
+    //                      CLASSIFIER STUFF
+    ///////////////////// ///////////////////// /////////////////////      
+    
     /** 
      * @brief classify a given example with the previously learnt model
      * @date 19-06-2012 (dd-mm-yyyy)
@@ -77,7 +92,7 @@ class GPHIKClassifier
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      */        
-    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores );
+    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -88,7 +103,7 @@ class GPHIKClassifier
      * @param scores (SparseVector) classification scores for known classes
      * @param uncertainty (double*) predictive variance of the classification result, if computed
      */    
-    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores, double & uncertainty );
+    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores, double & uncertainty ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -99,7 +114,7 @@ class GPHIKClassifier
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
      */        
-    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores );
+    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores ) const;
     
     /** 
      * @brief classify a given example with the previously learnt model
@@ -109,9 +124,9 @@ class GPHIKClassifier
      * @param example (non-sparse Vector) to be classified given in a non-sparse representation
      * @param result (int) class number of most likely class
      * @param scores (SparseVector) classification scores for known classes
-     * @param uncertainty (double*) predictive variance of the classification result, if computed
+     * @param uncertainty (double) predictive variance of the classification result, if computed
      */    
-    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores, double & uncertainty );    
+    void classify ( const NICE::Vector * example,  int & result, NICE::SparseVector & scores, double & uncertainty ) const;    
 
     /**
      * @brief train this classifier using a given set of examples and a given set of binary label vectors 
@@ -120,7 +135,7 @@ class GPHIKClassifier
      * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
      * @param labels (Vector) class labels (multi-class)
      */
-    void train ( const std::vector< NICE::SparseVector *> & examples, const NICE::Vector & labels );
+    void train ( const std::vector< const NICE::SparseVector *> & examples, const NICE::Vector & labels );
     
     /** 
      * @brief train this classifier using a given set of examples and a given set of binary label vectors 
@@ -129,13 +144,8 @@ class GPHIKClassifier
      * @param examples examples to use given in a sparse data structure
      * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
      */
-    void train ( const std::vector< NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
+    void train ( const std::vector< const NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
     
-    /** Persistent interface */
-    void restore ( std::istream & is, int format = 0 );
-    void store ( std::ostream & os, int format = 0 ) const;
-    void clear ();
-
     GPHIKClassifier *clone () const;
 
     /** 
@@ -143,12 +153,46 @@ class GPHIKClassifier
      * @date 19-06-2012 (dd-mm-yyyy)
      * @author Alexander Freytag
      * @param examples example for which the classification uncertainty shall be predicted, given in a sparse representation
-     * @param uncertainties contains the resulting classification uncertainties (1 entry for standard setting, m entries for binary-balanced setting)
+     * @param uncertainty contains the resulting classification uncertainty
+     */       
+    void predictUncertainty( const NICE::SparseVector * example, double & uncertainty ) const;
+    
+    /** 
+     * @brief prediction of classification uncertainty
+     * @date 19-12-2013 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples example for which the classification uncertainty shall be predicted, given in a non-sparse representation
+     * @param uncertainty contains the resulting classification uncertainty
      */       
-    void predictUncertainty( const NICE::SparseVector * example, NICE::Vector & uncertainties );
+    void predictUncertainty( const NICE::Vector * example, double & uncertainty ) const;    
+    
+
+
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////   
     
-    void addExample( const NICE::SparseVector * example, const double & label, const bool & performOptimizationAfterIncrement = true);
-    void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples, const NICE::Vector & newLabels, const bool & performOptimizationAfterIncrement = true);
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
+    
+    
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   );
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    );       
+
+
+
 };
 
 }

+ 65 - 92
GPLikelihoodApprox.cpp

@@ -5,22 +5,28 @@
 * @date 02/09/2012
 
 */
+
+// STL includes
 #include <iostream>
 
+// NICE-core includes
 #include <core/algebra/CholeskyRobust.h>
+#include <core/algebra/ILSConjugateGradients.h>
+// 
+#include <core/basics/Timer.h>
+// 
 #include <core/vector/Algorithms.h>
 #include <core/vector/Eigen.h>
 
-#include <core/basics/Timer.h>
-#include <core/algebra/ILSConjugateGradients.h>
+//stuff used for verification only
 #include "kernels/GeneralizedIntersectionKernelFunction.h"
 #include "kernels/IntersectionKernelFunction.h"
 
-
-#include "GPLikelihoodApprox.h"
-#include "IKMLinearCombination.h"
-#include "GMHIKernel.h"
-#include "algebra/LogDetApproxBaiAndGolub.h"
+// gp-hik-core includes
+#include "gp-hik-core/GPLikelihoodApprox.h"
+#include "gp-hik-core/IKMLinearCombination.h"
+#include "gp-hik-core/GMHIKernel.h"
+#include "gp-hik-core/algebra/LogDetApproxBaiAndGolub.h"
 
 
 using namespace std;
@@ -28,7 +34,7 @@ using namespace NICE;
 using namespace OPTIMIZATION;
 
 
-GPLikelihoodApprox::GPLikelihoodApprox( const map<int, Vector> & binaryLabels,
+GPLikelihoodApprox::GPLikelihoodApprox( const std::map<int, NICE::Vector> & binaryLabels,
                                         ImplicitKernelMatrix *ikm,
                                         IterativeLinearSolver *linsolver, 
                                         EigValues *eig,
@@ -52,21 +58,37 @@ GPLikelihoodApprox::GPLikelihoodApprox( const map<int, Vector> & binaryLabels,
   this->verifyApproximation = verifyApproximation;
   
   this->nrOfEigenvaluesToConsider = _nrOfEigenvaluesToConsider;
-  
-  lastAlphas = NULL;
-  
+    
   this->verbose = false;
   this->debug = false;
   
-  this->usePreviousAlphas = true;
-
+  this->initialAlphaGuess = NULL;
 }
 
 GPLikelihoodApprox::~GPLikelihoodApprox()
 {
-  //delete the pointer, but not the content (which is stored somewhere else)
-  if (lastAlphas != NULL)
-    lastAlphas = NULL;  
+  //we do not have to delete the memory here, since it will be handled externally...
+  // TODO however, if we should copy the whole vector, than we also have to delete it here accordingly! Check this!
+  if ( this->initialAlphaGuess != NULL )
+    this->initialAlphaGuess = NULL;
+}
+
+const std::map<int, Vector> & GPLikelihoodApprox::getBestAlphas () const
+{
+  if ( this->min_alphas.size() > 0 )
+  {
+  // did we already computed a local optimal solution?
+    return this->min_alphas;
+  }
+  else if ( this->initialAlphaGuess != NULL)
+  {
+    std::cerr << "no known alpha vectors so far, take initial guess instaed" << std::endl;
+    // computation not started, but initial guess was given, so use this one
+    return *(this->initialAlphaGuess);
+  }  
+  
+  // nothing known, min_alphas will be empty
+  return this->min_alphas;
 }
 
 void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax )
@@ -98,9 +120,9 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   cerr << "chol * chol^T: " << ( choleskyMatrix * choleskyMatrix.transpose() )(0,0,4,4) << endl;
 
   double gt_dataterm = 0.0;
-  for ( map< int, NICE::Vector >::const_iterator i = yset.begin(); i != yset.end(); i++ )
+  for ( std::map< int, NICE::Vector >::const_iterator i = yset.begin(); i != yset.end(); i++ )
   {
-    const Vector & y = i->second;
+    const NICE::Vector & y = i->second;
     Vector gt_alpha;
     choleskySolve ( choleskyMatrix, y, gt_alpha );
     cerr << "cholesky error: " << (K*gt_alpha - y).normL2() << endl;
@@ -113,31 +135,23 @@ void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatri
   cerr << "Something of K: " << K(0, 0, 4, 4) << endl;
   cerr << "frob norm: gt:" << K.frobeniusNorm() << endl;
   
-  /*try {
-    Vector *eigenv = eigenvalues ( K ); 
-    cerr << "lambda_max: gt:" << eigenv->Max() << " est:" << lambdaMax << endl; 
-    delete eigenv;
-  } catch (...) {
-    cerr << "NICE eigenvalues function failed!" << endl;
-  }*/
-
+  
   double gt_nlikelihood = gt_logdet + gt_dataterm;
   cerr << "OPTGT: " << mypara << " " << gt_nlikelihood << " " << gt_logdet << " " << gt_dataterm << endl;
 }
 
-void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x)
+void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x, const NICE::Vector & eigenValues )
 {
   Timer t;
-//   NICE::Vector diagonalElements;
   
-//   ikm->getDiagonalElements ( diagonalElements );
+  NICE::Vector diagonalElements; 
+  ikm->getDiagonalElements ( diagonalElements );
 
   // set simple jacobi pre-conditioning
   ILSConjugateGradients *linsolver_cg = dynamic_cast<ILSConjugateGradients *> ( linsolver );
 
-//   //TODO why do we need this?  
-//   if ( linsolver_cg != NULL )
-//     linsolver_cg->setJacobiPreconditioner ( diagonalElements );
+  if ( linsolver_cg != NULL )
+    linsolver_cg->setJacobiPreconditioner ( diagonalElements );
   
 
   // all alpha vectors will be stored!
@@ -153,8 +167,6 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x)
     if (verbose)
     {
       std::cerr << "Solving linear equation system for class " << classCnt << " ..." << std::endl;
-      std::cerr << "Size of the kernel matrix " << ikm->rows() << std::endl;
-      std::cerr << "binary label: " << j->second << std::endl;
     }
 
     /** About finding a good initial solution
@@ -171,23 +183,10 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x)
      * v = y ....which is somehow a weird assumption (cf Kernel PCA)
      *  This reduces the number of iterations by 5 or 8
      */
-    Vector alpha;
+    NICE::Vector alpha;
     
-    if ( (usePreviousAlphas) && (lastAlphas != NULL) )
-    {
-      std::map<int, NICE::Vector>::iterator alphaIt = lastAlphas->begin();
-      alpha = (*lastAlphas)[classCnt];
-    }
-    else  
-    {
-      //TODO hand over the eigenmax
-      alpha = (binaryLabels[classCnt] ); //* (1.0 / eigenmax[0]) );
-    }
+    alpha = (binaryLabels[classCnt] * (1.0 / eigenValues[0]) );
     
-    NICE::Vector initialAlpha;
-    if ( verbose )
-     initialAlpha = alpha;
-
     if ( verbose )
       std::cerr << "Using the standard solver ..." << std::endl;
 
@@ -205,8 +204,8 @@ void GPLikelihoodApprox::computeAlphaDirect(const OPTIMIZATION::matrix_type & x)
 
 double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
 {
-  Vector xv;
- 
+  NICE::Vector xv;
+   
   xv.resize ( x.rows() );
   for ( uint i = 0 ; i < x.rows(); i++ )
     xv[i] = x(i,0);
@@ -215,7 +214,7 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   unsigned long hashValue = xv.getHashValue();
   if (verbose)  
     std::cerr << "Current parameter: " << xv << " (weird hash value is " << hashValue << ")" << std::endl;
-  map<unsigned long, double>::const_iterator k = alreadyVisited.find(hashValue);
+  std::map<unsigned long, double>::const_iterator k = alreadyVisited.find(hashValue);
   
   if ( k != alreadyVisited.end() )
   {
@@ -244,8 +243,8 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   if (verbose)  
     std::cerr << "Calculating eigendecomposition " << ikm->rows() << " x " << ikm->cols() << std::endl;
   t.start();
-  Vector eigenmax;
-  Matrix eigenmaxvectors;
+  NICE::Vector eigenmax;
+  NICE::Matrix eigenmaxvectors;
  
   int rank = nrOfEigenvaluesToConsider;
 
@@ -255,9 +254,7 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   // the current implementation converges very quickly
   //old version: just use the first eigenvalue
   
-  //NOTE
-  // in theory, we have these values already on hand since we've done it in FMKGPHypOpt.
-  // Think about wether to give them as input to this function or not
+  // we have to re-compute EV and EW in all cases, since we change the hyper parameter and thereby the kernel matrix 
   eig->getEigenvalues( *ikm, eigenmax, eigenmaxvectors, rank ); 
   if (verbose)
     std::cerr << "eigenmax: " << eigenmax << std::endl;
@@ -278,12 +275,12 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
   
 
   // all alpha vectors will be stored!
-  map<int, Vector> alphas;
+  std::map<int, NICE::Vector> alphas;
 
   // This has to be done m times for the multi-class case
   if (verbose)
     std::cerr << "run ILS for every bin label. binaryLabels.size(): " << binaryLabels.size() << std::endl;
-  for ( map<int, Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
+  for ( std::map<int, NICE::Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
   {
     // (b) y^T (K+sI)^{-1} y
     int classCnt = j->first;
@@ -307,22 +304,18 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
      * v = y ....which is somehow a weird assumption (cf Kernel PCA)
      *  This reduces the number of iterations by 5 or 8
      */
-    Vector alpha;
-    
-    if ( (usePreviousAlphas) && (lastAlphas != NULL) )
+    NICE::Vector alpha;
+    if ( this->initialAlphaGuess != NULL )
     {
-      std::map<int, NICE::Vector>::iterator alphaIt = lastAlphas->begin();
-      alpha = (*lastAlphas)[classCnt];
+      alpha = this->initialAlphaGuess->find(classCnt)->second;
     }
-    else  
+    else
     {
-      alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );
+      alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );      
     }
     
-    Vector initialAlpha;
-    if ( verbose )
-     initialAlpha = alpha;
 
+    
     if ( verbose )
       cerr << "Using the standard solver ..." << endl;
 
@@ -330,22 +323,6 @@ double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
     linsolver->solveLin ( *ikm, binaryLabels[classCnt], alpha );
     t.stop();
    
-    //TODO This is only important for the incremental learning stuff.
-//     if ( verbose )
-//     {
-//       double initialAlphaNorm ( initialAlpha.normL1() );
-//       //compute the difference
-//       initialAlpha -= alpha;
-//       //take the abs of the differences
-//       initialAlpha.absInplace();
-//       //and compute a final score using a suitable norm
-// //       double difference( initialAlpha.normInf() );
-//       double difference( initialAlpha.normL1() );
-//       std::cerr << "debug -- last entry of new alpha: " << abs(alpha[alpha.size() -1 ]) << std::endl;
-//       std::cerr << "debug -- difference using inf norm: " << difference  << std::endl;
-//       std::cerr << "debug -- relative difference using inf norm: " << difference / initialAlphaNorm  << std::endl;
-//     }
-
 
     if ( verbose )
       std::cerr << "Time used for solving (K + sigma^2 I)^{-1} y: " << t.getLast() << std::endl;
@@ -418,21 +395,17 @@ void GPLikelihoodApprox::setParameterUpperBound(const double & _parameterUpperBo
   parameterUpperBound = _parameterUpperBound;
 }
 
-void GPLikelihoodApprox::setLastAlphas(std::map<int, NICE::Vector> * _lastAlphas)
+void GPLikelihoodApprox::setInitialAlphaGuess(std::map< int, NICE::Vector >* _initialAlphaGuess)
 {
-  lastAlphas = _lastAlphas;
+  this->initialAlphaGuess = _initialAlphaGuess;
 }
 
+
 void GPLikelihoodApprox::setBinaryLabels(const std::map<int, Vector> & _binaryLabels)
 {
   binaryLabels = _binaryLabels;
 }
 
-void GPLikelihoodApprox::setUsePreviousAlphas( const bool & _usePreviousAlphas )
-{
-  this->usePreviousAlphas = _usePreviousAlphas; 
-}
-
 void GPLikelihoodApprox::setVerbose( const bool & _verbose )
 {
   this->verbose = _verbose;

+ 17 - 14
GPLikelihoodApprox.h

@@ -8,18 +8,21 @@
 #ifndef _NICE_GPLIKELIHOODAPPROXINCLUDE
 #define _NICE_GPLIKELIHOODAPPROXINCLUDE
 
+// STL includes
 #include <map>
 
-#include <core/vector/VectorT.h>
-#include <core/basics/Config.h>
+// NICE-core includes
 #include <core/algebra/EigValues.h>
 #include <core/algebra/IterativeLinearSolver.h>
-
+// 
+#include <core/basics/Config.h>
 #include <core/optimization/blackbox/CostFunction.h>
+// 
+#include <core/vector/VectorT.h>
 
-#include "FastMinKernel.h"
-#include "ImplicitKernelMatrix.h"
-
+// gp-hik-core includes
+#include "gp-hik-core/FastMinKernel.h"
+#include "gp-hik-core/ImplicitKernelMatrix.h"
 #include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
 
 namespace NICE {
@@ -57,7 +60,7 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     void calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax );
 
     //! last alpha vectors computed (from previous IL-step)
-    std::map<int, Vector> * lastAlphas;
+    std::map<int, NICE::Vector> * initialAlphaGuess;
     
     //! alpha vectors of the best solution
     std::map<int, Vector> min_alphas;
@@ -84,8 +87,6 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     /** debug flag for several outputs useful for debugging*/
     bool debug;  
     
-    /** after adding new examples, shall the previous alpha solution be used as an initial guess?*/
-    bool usePreviousAlphas;
 
   public:
 
@@ -111,7 +112,7 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
     *
     * @return void
     */    
-    void computeAlphaDirect(const OPTIMIZATION::matrix_type & x);
+    void computeAlphaDirect(const OPTIMIZATION::matrix_type & x, const NICE::Vector & eigenValues);
     
     /**
     * @brief Evaluate the likelihood for given hyperparameters
@@ -124,18 +125,20 @@ class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
      
     
     // ------ get and set methods ------
-    const Vector & getBestParameters () const { return min_parameter; };
-    const std::map<int, Vector> & getBestAlphas () const { return min_alphas; };
+    const NICE::Vector & getBestParameters () const { return min_parameter; };
+    const std::map<int, Vector> & getBestAlphas () const;
     
     void setParameterLowerBound(const double & _parameterLowerBound);
     void setParameterUpperBound(const double & _parameterUpperBound);
     
-    void setLastAlphas(std::map<int, NICE::Vector> * _lastAlphas);
+    void setInitialAlphaGuess(std::map<int, NICE::Vector> * _initialAlphaGuess);
     void setBinaryLabels(const std::map<int, Vector> & _binaryLabels);
     
-    void setUsePreviousAlphas( const bool & _usePreviousAlphas );
     void setVerbose( const bool & _verbose );
     void setDebug( const bool & _debug );
+    
+    bool getVerbose ( ) { return verbose; } ;
+    bool getDebug ( ) { return debug; } ;
 };
 
 }

+ 83 - 16
IKMLinearCombination.cpp

@@ -3,11 +3,13 @@
 * @brief Combination of several (implicit) kernel matrices, such as noise matrix and gp-hik kernel matrix (Implementation)
 * @author Erik Rodner, Alexander Freytag
 * @date 02/14/2012
-
 */
+
+// STL includes
 #include <iostream>
 
-#include "IKMLinearCombination.h"
+// gp-hik-core includes
+#include "gp-hik-core/IKMLinearCombination.h"
 
 using namespace NICE;
 using namespace std;
@@ -15,28 +17,31 @@ using namespace std;
 
 IKMLinearCombination::IKMLinearCombination()
 {
-  verbose = false;
+  this->verbose = false;
 }
 
 IKMLinearCombination::~IKMLinearCombination()
 {
-  if (matrices.size() != 0)
+  if ( this->matrices.size() != 0)
   {
-    for (int i = 0; i < matrices.size(); i++)
-      delete matrices[i];
+    for (int i = 0; i < this->matrices.size(); i++)
+      delete this->matrices[i];
   }
 }
 
+///////////////////// ///////////////////// /////////////////////
+//                         GET / SET
+///////////////////// ///////////////////// ///////////////////
 
 void IKMLinearCombination::getDiagonalElements ( Vector & diagonalElements ) const
 {
   diagonalElements.resize ( rows() );
   diagonalElements.set(0.0);
   
-  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  for ( std::vector<NICE::ImplicitKernelMatrix *>::const_iterator i = this->matrices.begin(); i != this->matrices.end(); i++ )
   {
-    ImplicitKernelMatrix *ikm = *i;
-    Vector diagonalElementsSingle;
+    NICE::ImplicitKernelMatrix *ikm = *i;
+    NICE::Vector diagonalElementsSingle;
     ikm->getDiagonalElements ( diagonalElementsSingle );
     diagonalElements += diagonalElementsSingle;
   }
@@ -45,9 +50,9 @@ void IKMLinearCombination::getDiagonalElements ( Vector & diagonalElements ) con
 void IKMLinearCombination::getFirstDiagonalElement ( double & diagonalElement ) const
 {
   diagonalElement = 0.0;
-  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  for ( std::vector<NICE::ImplicitKernelMatrix *>::const_iterator i = this->matrices.begin(); i != this->matrices.end(); i++ )
   {
-    ImplicitKernelMatrix *ikm = *i;
+    NICE::ImplicitKernelMatrix *ikm = *i;
     double firstElem;
     ikm->getFirstDiagonalElement(firstElem);
     diagonalElement += firstElem;
@@ -219,12 +224,74 @@ ImplicitKernelMatrix * IKMLinearCombination::getModel(const uint & idx) const
     return NULL;
 }
 
-// ----------------- INCREMENTAL LEARNING METHODS -----------------------
-void IKMLinearCombination::addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels)
+// ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
+
+void IKMLinearCombination::restore ( std::istream & is, int format )
 {
-  for ( vector<ImplicitKernelMatrix *>::iterator i = matrices.begin(); i != matrices.end(); i++ )
+  if (is.good())
   {
-    ImplicitKernelMatrix *ikm = *i;
-    ikm->addExample(x, binLabels);
+    is.precision (std::numeric_limits<double>::digits10 + 1); 
+    
+    std::string tmp;    
+
+    bool b_endOfBlock ( false ) ;
+    
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "IKMLinearCombination" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }                  
+      
+      tmp = this->removeStartTag ( tmp );
+            
+      is >> tmp; // end of block 
+      tmp = this->removeEndTag ( tmp );
+    }
+  }
+}      
+
+void IKMLinearCombination::store ( std::ostream & os, int format ) const
+{
+  if ( os.good() )
+  {
+    // show starting point
+    os << this->createStartTag( "IKMLinearCombination" ) << std::endl;
+      
+    // done
+    os << this->createEndTag( "IKMLinearCombination" ) << std::endl;    
   }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }  
+}
+
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+// interface specific methods for incremental extensions
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+
+void IKMLinearCombination::addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement
+			   )
+{
+  for ( std::vector<NICE::ImplicitKernelMatrix *>::iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    (*i)->addExample( example, label);
+  }  
+}
+
+void IKMLinearCombination::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement
+				    )
+{
+  for ( std::vector<NICE::ImplicitKernelMatrix *>::iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    (*i)->addMultipleExamples( newExamples, newLabels);
+  }  
 }

+ 23 - 5
IKMLinearCombination.h

@@ -8,8 +8,12 @@
 #ifndef _NICE_IKMLINEARCOMBINATIONINCLUDE
 #define _NICE_IKMLINEARCOMBINATIONINCLUDE
 
+// STL includes
 #include <vector>
-#include "ImplicitKernelMatrix.h"
+
+// gp-hik-core includes
+#include "gp-hik-core/ImplicitKernelMatrix.h"
+#include "gp-hik-core/OnlineLearnable.h"
 
 namespace NICE {
 
@@ -67,12 +71,26 @@ class IKMLinearCombination : public ImplicitKernelMatrix
     ImplicitKernelMatrix * getModel(const uint & idx) const;
     inline int getNumberOfModels(){return matrices.size();};
     
-    /** Persistent interface */
-    virtual void restore ( std::istream & is, int format = 0 ) {};
-    virtual void store ( std::ostream & os, int format = 0 ) const {};  
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    virtual void restore ( std::istream & is, int format = 0 ) ;
+    virtual void store ( std::ostream & os, int format = 0 ) const;  
     virtual void clear () {};
     
-    void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels);
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////    
+    
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   );
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    );      
 
 };
 

+ 86 - 110
IKMNoise.cpp

@@ -3,11 +3,13 @@
 * @author Erik Rodner, Alexander Freytag
 * @brief Noise matrix (for model regularization) as an implicit kernel matrix (Implementation)
 * @date 02/14/2012
-
 */
+
+// STL includes
 #include <iostream>
 #include <limits>
 
+// NICE-core includes
 #include "IKMNoise.h"
 
 using namespace NICE;
@@ -18,8 +20,6 @@ IKMNoise::IKMNoise()
   this->size = 0;
   this->noise = 0.1;
   this->optimizeNoise = false;
-  this->np = 0;
-  this->nn = 0;
   this->verbose = false;
 }
 
@@ -28,30 +28,7 @@ IKMNoise::IKMNoise( uint size, double noise, bool optimizeNoise )
   this->size = size;
   this->noise = noise;
   this->optimizeNoise = optimizeNoise;
-  this->np = 0;
-  this->nn = 0;
-  this->verbose = false;
-}
-
-IKMNoise::IKMNoise( const Vector & labels, double noise, bool optimizeNoise )
-{
-  this->size = labels.size();
-  this->noise = noise;
-  this->optimizeNoise = optimizeNoise;
-  this->labels = labels;
-  this->np = 0;
-  this->nn = 0;
   this->verbose = false;
-  for ( uint i = 0 ; i < labels.size(); i++ )
-    if ( labels[i] == 1 ) 
-      this->np++;
-    else
-      this->nn++;
-    
-  if (verbose)
-  {
-    std::cerr << "IKMNoise np : " << np << " nn: " << nn << std::endl;
-  }
 }
 
 
@@ -63,47 +40,16 @@ IKMNoise::~IKMNoise()
 void IKMNoise::getDiagonalElements ( Vector & diagonalElements ) const
 {
   diagonalElements.resize( size );
-  if ( labels.size() == 0 ) {
-    diagonalElements.set( noise );
-  } else {
-    for ( uint i = 0 ; i < labels.size(); i++ )
-      if ( labels[i] == 1 ) {
-        diagonalElements[i] = 2*np*noise/size;
-      } else {
-        diagonalElements[i] = 2*nn*noise/size;
-      }
-  }
+  diagonalElements.set( noise );
 }
 
 void IKMNoise::getFirstDiagonalElement ( double & diagonalElement ) const
 {
-  if ( labels.size() == 0 )
-  {
-    if (verbose)
-    {    
-      std::cerr << "IKMNoise::getFirstDiagonalElement  and labels.size() is zero" << std::endl;
-    }
-    diagonalElement = noise ;
-  }
-  else
-  {
-    if ( labels[0] == 1 )
-    {
-      if (verbose)
-      {          
-        std::cerr << "IKMNoise::getFirstDiagonalElement -- and first entry is +1" << std::endl;
-      }
-      diagonalElement = 2*np*noise/size;
-    } 
-    else
-    {
-      if (verbose)
-      {                
-        std::cerr << "IKMNoise::getFirstDiagonalElement -- and first entry is -1" << std::endl;
-      }
-      diagonalElement = 2*nn*noise/size;
-    }
+  if (verbose)
+  {    
+    std::cerr << "IKMNoise::getFirstDiagonalElement  and labels.size() is zero" << std::endl;
   }
+  diagonalElement = noise ;
 }
 
 
@@ -159,17 +105,7 @@ void IKMNoise::multiply (NICE::Vector & y, const NICE::Vector & x) const
 {
   y.resize( rows() );
   
-  if ( labels.size() == 0 )
-  {
-    y = noise * x;
-  } else {
-    for ( uint i = 0 ; i < labels.size(); i++ )
-      if ( labels[i] == 1 ) {
-        y[i] = 2*np*noise/size * x[i];
-      } else {
-        y[i] = 2*nn*noise/size * x[i];
-      }
-  }
+  y = noise * x;
 }
 
 uint IKMNoise::rows () const
@@ -197,50 +133,90 @@ void IKMNoise::restore ( std::istream & is, int format )
   {
     is.precision (std::numeric_limits<double>::digits10 + 1); 
     
-    std::string tmp;
-    is >> tmp; //class name
-    
-    is >> tmp;
-    is >> size;
-    
-    is >> tmp;
-    is >> noise;
-    
-    is >> tmp;
-    is >> optimizeNoise;
-    
-    is >> tmp;
-    is >> np;
-    
-    is >> tmp;
-    is >> nn;
+    std::string tmp;    
+
+    bool b_endOfBlock ( false ) ;
     
-    is >> tmp;
-    is >> labels;
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "IKMNoise" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }
+                  
+      
+      tmp = this->removeStartTag ( tmp );
+      
+      if ( tmp.compare("size") == 0 )
+      {
+          is >> size;
+      }
+      else if ( tmp.compare("noise") == 0 )
+      {
+          is >> noise;
+      }
+      else if ( tmp.compare("optimizeNoise") == 0 )
+      {
+          is >> optimizeNoise;
+      }
+      else
+      {
+	std::cerr << "WARNING -- unexpected IKMNoise object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	throw;	
+      }
+      
+      is >> tmp; // end of block 
+      tmp = this->removeEndTag ( tmp );      
+    }
+   }
+  else
+  {
+    std::cerr << "IKMNoise::restore -- InStream not initialized - restoring not possible!" << std::endl;
   }
 }
 
 void IKMNoise::store ( std::ostream & os, int format ) const
 {
-  os << "IKMNoise" << std::endl;
-  os << "size: " << size << std::endl;
-  os << "noise: " << noise << std::endl;
-  os << "optimizeNoise: " <<  optimizeNoise << std::endl;
-  os << "np: " << np  << std::endl;
-  os << "nn: " << nn << std::endl;
-  os << "labels: " << labels << std::endl;
+  // show starting point
+  os << this->createStartTag( "IKMNoise" ) << std::endl;
+  
+  
+  
+  os << this->createStartTag( "size" ) << std::endl;
+  os << size << std::endl;
+  os << this->createEndTag( "size" ) << std::endl;
+  
+  os << this->createStartTag( "noise" ) << std::endl;
+  os << noise << std::endl;
+  os << this->createEndTag( "noise" ) << std::endl;
+  
+  os << this->createStartTag( "optimizeNoise" ) << std::endl;
+  os << optimizeNoise << std::endl;
+  os << this->createEndTag( "optimizeNoise" ) << std::endl; 
+  
+  // done
+  os << this->createEndTag( "IKMNoise" ) << std::endl;
 }
 
-// ----------------- INCREMENTAL LEARNING METHODS -----------------------
-void IKMNoise::addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels)
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+// interface specific methods for incremental extensions
+///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+
+void IKMNoise::addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement
+			   )
 {
-  ++size;
-  if ( (np != 0) && (nn != 0) )
-  {
-    labels = binLabels;
-    if (binLabels[binLabels.size()-1] == 1)
-      ++np;
-    else
-      ++nn;
-  }
+ this->size++;
+}
+
+void IKMNoise::addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement
+				    )
+{
+  this->size += newExamples.size();
 }

+ 24 - 13
IKMNoise.h

@@ -23,7 +23,6 @@ class IKMNoise : public ImplicitKernelMatrix
 {
 
   protected:
-    Vector labels;
 
     uint size;
 
@@ -31,9 +30,7 @@ class IKMNoise : public ImplicitKernelMatrix
 
     bool optimizeNoise;
 
-    uint np;
-    uint nn;
-    
+   
     /** give some debug outputs. There is not set function so far... */
     bool verbose;
   
@@ -43,20 +40,19 @@ class IKMNoise : public ImplicitKernelMatrix
     
     IKMNoise( uint size, double noise, bool optimizeNoise );
     
-    IKMNoise( const Vector & labels, double noise, bool optimizeNoise );
       
     virtual ~IKMNoise();
 
-    virtual void getDiagonalElements ( Vector & diagonalElements ) const;
+    virtual void getDiagonalElements ( NICE::Vector & diagonalElements ) const;
     virtual void getFirstDiagonalElement ( double & diagonalElement ) const;
     virtual uint getNumParameters() const;
     
-    virtual void getParameters(Vector & parameters) const;
-    virtual void setParameters(const Vector & parameters);
-    virtual bool outOfBounds(const Vector & parameters) const;
+    virtual void getParameters( NICE::Vector & parameters) const;
+    virtual void setParameters(const NICE::Vector & parameters);
+    virtual bool outOfBounds(const NICE::Vector & parameters) const;
 
-    virtual Vector getParameterLowerBounds() const;
-    virtual Vector getParameterUpperBounds() const;
+    virtual NICE::Vector getParameterLowerBounds() const;
+    virtual NICE::Vector getParameterUpperBounds() const;
 
     /** multiply with a vector: A*x = y */
     virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
@@ -70,12 +66,27 @@ class IKMNoise : public ImplicitKernelMatrix
     virtual double approxFrobNorm() const;
     virtual void setApproximationScheme(const int & _approxScheme) {};
     
-    /** Persistent interface */
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
     virtual void restore ( std::istream & is, int format = 0 );
     virtual void store ( std::ostream & os, int format = 0 ) const; 
     virtual void clear () {};
     
-    void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels);
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////    
+    
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   );
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    );        
+    
 
 };
 

+ 25 - 5
ImplicitKernelMatrix.h

@@ -8,11 +8,16 @@
 #ifndef _NICE_IMPLICITKERNELMATRIXINCLUDE
 #define _NICE_IMPLICITKERNELMATRIXINCLUDE
 
+// STL includes
 #include <iostream>
 
+// NICE-core includes
 #include <core/algebra/GenericMatrix.h>
+// 
+#include <core/basics/Persistent.h>
 
-#include "core/basics/Persistent.h"
+// gp-hik-core includes
+#include "gp-hik-core/OnlineLearnable.h"
 
 namespace NICE {
   
@@ -22,7 +27,7 @@ namespace NICE {
  * @date 02/14/2012
  */
 
-class ImplicitKernelMatrix : public GenericMatrix, NICE::Persistent
+class ImplicitKernelMatrix : public GenericMatrix, public NICE::Persistent, public NICE::OnlineLearnable
 {
 
   protected:
@@ -51,16 +56,31 @@ class ImplicitKernelMatrix : public GenericMatrix, NICE::Persistent
     virtual double approxFrobNorm() const = 0;
     virtual void setApproximationScheme(const int & _approxScheme) = 0;
     
-    /** Persistent interface */
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
     virtual void restore ( std::istream & is, int format = 0 ) = 0;
     virtual void store ( std::ostream & os, int format = 0 )  const = 0;
     virtual void clear () = 0;
     
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////
+    // interface specific methods for incremental extensions
+    ///////////////////// INTERFACE ONLINE LEARNABLE /////////////////////    
+    
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   ) = 0;
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    ) = 0;      
+    
     //high order methods
-    virtual void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels) = 0;
     virtual void  multiply (NICE::Vector &y, const NICE::Vector &x) const = 0;
 };
 
 }
 
-#endif
+#endif

+ 46 - 0
OnlineLearnable.h

@@ -0,0 +1,46 @@
+#ifndef _NICE_ONLINELEARNABLEINCLUDE
+#define _NICE_ONLINELEARNABLEINCLUDE
+
+
+// NICE-core includes
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+namespace NICE {
+
+
+ /** 
+ * @class OnlineLearnable
+ * @brief Interface specifying learning algorithms implementing methods for online updates
+ * @author Alexander Freytag
+ * @date 01-01-2014 (dd-mm-yyyy)
+ */ 
+ 
+class OnlineLearnable {
+
+ 
+  public:
+    // Interface specifications
+    virtual void addExample( const NICE::SparseVector * example, 
+			     const double & label, 
+			     const bool & performOptimizationAfterIncrement = true
+			   ) = 0;
+			   
+    virtual void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples,
+				      const NICE::Vector & newLabels,
+				      const bool & performOptimizationAfterIncrement = true
+				    ) = 0;    
+
+
+    // Provided functions and overloaded stream operators
+    virtual ~OnlineLearnable () {};
+    
+    // just to prevent senseless compiler warnings
+    OnlineLearnable() {};   
+
+};
+
+
+} // namespace
+
+#endif

+ 138 - 53
SortedVectorSparse.h

@@ -7,6 +7,7 @@
 #ifndef SORTEDVECTORSPARSEINCLUDE
 #define SORTEDVECTORSPARSEINCLUDE
 
+// STL includes
 #include <vector>
 #include <cmath>
 #include <map>
@@ -14,10 +15,13 @@
 #include <iostream>
 #include <limits>
 
+// NICE-core includes
 #include <core/basics/Exception.h>
+#include <core/basics/Persistent.h>
+// 
 #include <core/vector/VectorT.h>
 #include <core/vector/SparseVectorT.h>
-#include "core/basics/Persistent.h"
+
 
 namespace NICE {
 
@@ -587,70 +591,151 @@ template<class T> class SortedVectorSparse : NICE::Persistent{
     /** Persistent interface */
     virtual void restore ( std::istream & is, int format = 0 )
     {
-      if (is.good())
+      bool b_restoreVerbose ( false );
+      if ( is.good() )
       {
-        is.precision (std::numeric_limits<double>::digits10 + 1);
-        
-        std::string tmp;
-        is >> tmp; //class name
-        
-        is >> tmp;
-        is >> tolerance;
-               
-        is >> tmp;
-        is >> n;
-               
-        is >> tmp;
-        int size;
-        is >> size;
-        
-        is >> tmp;
-        
-        T origValue;
-        int origIndex;
-        T transformedValue;
-        
-        nzData.clear();
-        for (int i = 0; i < size; i++)
-        {
-         
-          is >> origValue;
-          is >> origIndex;
-          is >> transformedValue;
-        
-          std::pair<T, dataelement > p ( origValue, dataelement ( origIndex, transformedValue ) );
-          elementpointer it = nzData.insert ( p);
-          nonzero_indices.insert ( std::pair<int, elementpointer> ( origIndex, it ) );
-        }
-        
-        if (verbose)
-        {
-          std::cerr << "SortedVectorSparse::restore" << std::endl;      
-          std::cerr << "tolerance: " << tolerance << std::endl;          
-          std::cerr << "n: " << n << std::endl;          
-          std::cerr << "size: " << size << std::endl;          
-        }
+	if ( b_restoreVerbose ) 
+	  std::cerr << " restore SortedVectorSparse" << std::endl;
+	
+	std::string tmp;
+	is >> tmp; //class name 
+	
+	if ( ! this->isStartTag( tmp, "SortedVectorSparse" ) )
+	{
+	    std::cerr << " WARNING - attempt to restore SortedVectorSparse, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+	    throw;
+	}   
+	    
+	is.precision ( std::numeric_limits<double>::digits10 + 1);
+	
+	bool b_endOfBlock ( false ) ;
+	
+	while ( !b_endOfBlock )
+	{
+	  is >> tmp; // start of block 
+	  
+	  if ( this->isEndTag( tmp, "SortedVectorSparse" ) )
+	  {
+	    b_endOfBlock = true;
+	    continue;
+	  }      
+	  
+	  tmp = this->removeStartTag ( tmp );
+	  
+	  if ( b_restoreVerbose )
+	    std::cerr << " currently restore section " << tmp << " in SortedVectorSparse" << std::endl;
+	  
+	  if ( tmp.compare("tolerance") == 0 )
+	  {
+	    is >> tolerance;        
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );
+	  }
+	  else if ( tmp.compare("n") == 0 )
+	  {
+	    is >> n;        
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );
+	  }
+	  else if ( tmp.compare("underlying_data_(sorted)") == 0 )
+	  {
+	    is >> tmp; // start of block 
+	    
+	    int nonZeros;
+	    if ( ! this->isStartTag( tmp, "nonZeros" ) )
+	    {
+	      std::cerr << "Attempt to restore SortedVectorSparse, but found no information about nonZeros elements. Aborting..." << std::endl;
+	      throw;
+	    }
+	    else
+	    {
+	      is >> nonZeros;
+	      is >> tmp; // end of block 
+	      tmp = this->removeEndTag ( tmp );     
+	    }
+	    
+	    is >> tmp; // start of block 
+	    
+	    if ( ! this->isStartTag( tmp, "data" ) )
+	    {
+	      std::cerr << "Attempt to restore SortedVectorSparse, but found no data. Aborting..." << std::endl;
+	      throw;
+	    }
+	    else
+	    {	    
+	      T origValue;
+	      int origIndex;
+	      T transformedValue;
+	      
+	      nzData.clear();
+	      for (int i = 0; i < nonZeros; i++)
+	      {
+	      
+		is >> origValue;
+		is >> origIndex;
+		is >> transformedValue;
+	      
+		std::pair<T, dataelement > p ( origValue, dataelement ( origIndex, transformedValue ) );
+		elementpointer it = nzData.insert ( p);
+		nonzero_indices.insert ( std::pair<int, elementpointer> ( origIndex, it ) );
+	      }
+	      
+	      is >> tmp; // end of block 
+	      tmp = this->removeEndTag ( tmp );  
+	    }
+	    
+	    
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );	    
+	  }
+	  else
+	  {
+	    std::cerr << "WARNING -- unexpected SortedVectorSparse object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	    throw;	
+	  }
+	}        
+
       }
       else
       {
         std::cerr << "SortedVectorSparse::restore -- InStream not initialized - restoring not possible!" << std::endl;
+	throw;
       }      
     };
     virtual void store ( std::ostream & os, int format = 0 ) const
     {
       if (os.good())
       {
+	// show starting point
+	os << this->createStartTag( "SortedVectorSparse" ) << std::endl;
+	
         os.precision (std::numeric_limits<double>::digits10 + 1);
-        os << "SortedVectorSparse" << std::endl;
-        os << "tolerance: " << tolerance << std::endl;
-        os << "n: " << n << std::endl;
-        os << "nonZeros: " << nzData.size() << std::endl;
-        os << "underlying_data_(sorted)" << std::endl;
-        for (const_elementpointer elP = nzData.begin();  elP != nzData.end(); elP++)
-        {
-          os << elP->first << " " << elP->second.first << " " << elP->second.second << " ";
-        }
-        os << std::endl;
+	
+	os << this->createStartTag( "tolerance" ) << std::endl;
+	os << tolerance << std::endl;
+	os << this->createEndTag( "tolerance" ) << std::endl;
+	
+	os << this->createStartTag( "n" ) << std::endl;
+	os << n << std::endl;
+	os << this->createEndTag( "n" ) << std::endl;
+		
+
+        os << this->createStartTag( "underlying_data_(sorted)" ) << std::endl;
+	  os << this->createStartTag( "nonZeros" ) << std::endl;
+	  os << this->getNonZeros() << std::endl;
+	  os << this->createEndTag( "nonZeros" ) << std::endl;
+	  
+	  os << this->createStartTag( "data" ) << std::endl;  
+	  for (const_elementpointer elP = nzData.begin();  elP != nzData.end(); elP++)
+	  {
+	    os << elP->first << " " << elP->second.first << " " << elP->second.second << " ";
+	  }
+	  os << std::endl;
+	  os << this->createEndTag( "data" ) << std::endl;
+	os << this->createEndTag( "underlying_data_(sorted)" ) << std::endl;
+	
+	// done
+	os << this->createEndTag( "SortedVectorSparse" ) << std::endl;	
       }
       else
       {

+ 1 - 0
corefiles.cmake

@@ -38,5 +38,6 @@ SET(nice_gp-hik-core_HDR
 ./parameterizedFunctions/PFMKL.h
 ./parameterizedFunctions/PFAbsExp.h
 ./parameterizedFunctions/PFWeightedDim.h
+./OnlineLearnable.h
 )
 

+ 854 - 0
matlab/GPHIK.cpp

@@ -0,0 +1,854 @@
+#include <math.h>
+#include <matrix.h>
+#include "mex.h"
+#include "classHandleMtoC.h"
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+#include <core/vector/MatrixT.h>
+#include <core/vector/VectorT.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKClassifier.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+/* Pass analyze_sparse a pointer to a sparse mxArray.  A sparse mxArray
+   only stores its nonzero elements.  The values of the nonzero elements 
+   are stored in the pr and pi arrays.  The tricky part of analyzing
+   sparse mxArray's is figuring out the indices where the nonzero
+   elements are stored.  (See the mxSetIr and mxSetJc reference pages
+   for details. */  
+std::vector< NICE::SparseVector * > convertSparseMatrixToNice(const mxArray *array_ptr)
+{
+  double  *pr;//, *pi;
+  mwIndex  *ir, *jc;
+  mwSize      col, total=0;
+  mwIndex   starting_row_index, stopping_row_index, current_row_index;
+  mwSize      i_numExamples, i_numDim;
+  
+  /* Get the starting positions of all four data arrays. */ 
+  pr = mxGetPr(array_ptr);
+//   pi = mxGetPi(array_ptr);
+  ir = mxGetIr(array_ptr);
+  jc = mxGetJc(array_ptr);
+  
+  // dimenions of the matrix -> feature dimension and number of examples
+  i_numExamples = mxGetM(array_ptr);  
+  i_numDim = mxGetN(array_ptr);
+    
+  // initialize output variable
+  std::vector< NICE::SparseVector * > sparseMatrix;
+  sparseMatrix.resize ( i_numExamples );
+    
+  for ( std::vector< NICE::SparseVector * >::iterator matIt = sparseMatrix.begin(); 
+        matIt != sparseMatrix.end(); matIt++)
+  {
+      *matIt = new NICE::SparseVector( i_numDim );
+  }  
+  
+  // now copy the data
+  for (col=0; col < i_numDim; col++)
+  { 
+    starting_row_index = jc[col]; 
+    stopping_row_index = jc[col+1]; 
+    
+    // empty column?
+    if (starting_row_index == stopping_row_index)
+      continue;
+    else
+    {
+      for ( current_row_index = starting_row_index; 
+            current_row_index < stopping_row_index; 
+	        current_row_index++)
+      {
+          //note: no complex data supported her
+          sparseMatrix[ ir[current_row_index] ]->insert( std::pair<int, double>( col, pr[total++] ) );
+      } // for-loop
+      
+    }
+  } // for-loop over columns
+  
+  return sparseMatrix;
+}
+
+
+// b_adaptIndexMtoC: if true, dim k will be inserted as k, not as k-1 (which would be the default for  M->C)
+NICE::SparseVector convertSparseVectorToNice(const mxArray* array_ptr, const bool & b_adaptIndexMtoC = false )
+{
+  double  *pr, *pi;
+  mwIndex  *ir, *jc;
+  mwSize      col, total=0;
+  mwIndex   starting_row_index, stopping_row_index, current_row_index;
+  mwSize      dimy, dimx;
+  
+  /* Get the starting positions of all four data arrays. */ 
+  pr = mxGetPr(array_ptr);
+  pi = mxGetPi(array_ptr);
+  ir = mxGetIr(array_ptr);
+  jc = mxGetJc(array_ptr);
+  
+  // dimenions of the matrix -> feature dimension and number of examples
+  dimy = mxGetM(array_ptr);  
+  dimx = mxGetN(array_ptr);
+  
+  double* ptr = mxGetPr(array_ptr);
+
+  if(dimx != 1 && dimy != 1)
+    mexErrMsgIdAndTxt("mexnice:error","Vector expected");
+  
+
+  NICE::SparseVector svec( std::max(dimx, dimy) );
+   
+  
+  if ( dimx > 1)
+  {
+    for ( mwSize row=0; row < dimx; row++)
+    { 
+        // empty column?
+        if (jc[row] == jc[row+1])
+        {
+          continue;
+        }
+        else
+        {
+          //note: no complex data supported her
+            double value ( pr[total++] );
+            if ( b_adaptIndexMtoC ) 
+                svec.insert( std::pair<int, double>( row+1,  value ) );
+            else
+                svec.insert( std::pair<int, double>( row,  value ) );
+        }
+    } // for loop over cols      
+  }
+  else
+  {
+    mwSize numNonZero = jc[1]-jc[0];
+    
+    for ( mwSize colNonZero=0; colNonZero < numNonZero; colNonZero++)
+    {
+        //note: no complex data supported her
+        double value ( pr[total++] );
+        if ( b_adaptIndexMtoC ) 
+            svec.insert( std::pair<int, double>( ir[colNonZero]+1, value  ) );
+        else
+            svec.insert( std::pair<int, double>( ir[colNonZero], value  ) );
+    }          
+  }
+
+  return svec;
+}
+
+// b_adaptIndexCtoM: if true, dim k will be inserted as k, not as k+1 (which would be the default for C->M)
+mxArray* convertSparseVectorFromNice( const NICE::SparseVector & scores, const bool & b_adaptIndexCtoM = false)
+{
+    mxArray * matlabSparseVec = mxCreateSparse( scores.getDim() /*m*/, 1/*n*/, scores.size()/*nzmax*/, mxREAL);
+    
+    // To make the returned sparse mxArray useful, you must initialize the pr, ir, jc, and (if it exists) pi arrays.    
+    // mxCreateSparse allocates space for:
+    // 
+    // A pr array of length nzmax.
+    // A pi array of length nzmax, but only if ComplexFlag is mxCOMPLEX in C (1 in Fortran).
+    // An ir array of length nzmax.
+    // A jc array of length n+1.  
+  
+    double* prPtr = mxGetPr(matlabSparseVec);
+    mwIndex * ir = mxGetIr( matlabSparseVec );
+    
+    mwIndex * jc = mxGetJc( matlabSparseVec );
+    jc[1] = scores.size(); jc[0] = 0; 
+    
+    
+    mwSize cnt = 0;
+        
+    for ( NICE::SparseVector::const_iterator myIt = scores.begin(); myIt != scores.end(); myIt++, cnt++ )
+    {
+        // set index
+        if ( b_adaptIndexCtoM ) 
+            ir[cnt] = myIt->first-1;
+        else
+            ir[cnt] = myIt->first;
+        
+        // set value
+        prPtr[cnt] = myIt->second;
+    }
+    
+    return matlabSparseVec;
+}
+
+
+mxArray* convertMatrixFromNice(NICE::Matrix & niceMatrix)
+{
+	mxArray *matlabMatrix = mxCreateDoubleMatrix(niceMatrix.rows(),niceMatrix.cols(),mxREAL);
+	double* matlabMatrixPtr = mxGetPr(matlabMatrix);
+
+	for(int i=0; i<niceMatrix.rows(); i++)
+    {
+		for(int j=0; j<niceMatrix.cols(); j++)
+		{
+			matlabMatrixPtr[i + j*niceMatrix.rows()] = niceMatrix(i,j);
+		}
+    }
+	return matlabMatrix;
+}
+
+NICE::Matrix convertMatrixToNice(const mxArray* matlabMatrix)
+{
+	//todo: do not assume double
+
+  const mwSize *dims;
+  int dimx, dimy, numdims;
+    //figure out dimensions
+  dims = mxGetDimensions(matlabMatrix);
+  numdims = mxGetNumberOfDimensions(matlabMatrix);
+  dimy = (int)dims[0]; dimx = (int)dims[1];
+  double* ptr = mxGetPr(matlabMatrix);
+
+  NICE::Matrix niceMatrix(ptr, dimy, dimx, NICE::Matrix::external); 
+
+  return niceMatrix;
+}
+
+mxArray* convertVectorFromNice(NICE::Vector & niceVector)
+{
+	//cout << "start convertVectorFromNice" << endl;
+	mxArray *matlabVector = mxCreateDoubleMatrix(niceVector.size(), 1, mxREAL);
+	double* matlabVectorPtr = mxGetPr(matlabVector);
+
+	for(int i=0;i<niceVector.size(); i++)
+    {
+        matlabVectorPtr[i] = niceVector[i];
+    }
+	return matlabVector;
+}
+
+NICE::Vector convertVectorToNice(const mxArray* matlabMatrix)
+{
+	//todo: do not assume double
+
+  const mwSize *dims;
+  int dimx, dimy, numdims;
+    //figure out dimensions
+  dims = mxGetDimensions(matlabMatrix);
+  numdims = mxGetNumberOfDimensions(matlabMatrix);
+  dimy = (int)dims[0]; dimx = (int)dims[1];
+  double* ptr = mxGetPr(matlabMatrix);
+
+  if(dimx != 1 && dimy != 1)
+    mexErrMsgIdAndTxt("mexnice:error","Vector expected");
+
+  int dim = max(dimx, dimy);    
+
+  NICE::Vector niceVector(dim, 0.0);
+  
+  for(int i=0;i<dim;i++)
+  {
+      niceVector(i) = ptr[i];
+  }
+
+  return niceVector;
+}
+
+
+
+std::string convertMatlabToString(const mxArray *matlabString)
+{
+  if(!mxIsChar(matlabString))
+    mexErrMsgIdAndTxt("mexnice:error","Expected string");
+
+  char *cstring = mxArrayToString(matlabString);
+  std::string s(cstring);
+  mxFree(cstring);
+  return s;
+}
+
+
+int convertMatlabToInt32(const mxArray *matlabInt32)
+{
+  if(!mxIsInt32(matlabInt32))
+    mexErrMsgIdAndTxt("mexnice:error","Expected int32");
+
+  int* ptr = (int*)mxGetData(matlabInt32);
+  return ptr[0];
+}
+
+double convertMatlabToDouble(const mxArray *matlabDouble)
+{
+  if(!mxIsDouble(matlabDouble))
+    mexErrMsgIdAndTxt("mexnice:error","Expected double");
+
+  double* ptr = (double*)mxGetData(matlabDouble);
+  return ptr[0];
+}
+
+NICE::Config parseParameters(const mxArray *prhs[], int nrhs)
+{
+  NICE::Config conf;
+  
+  // if first argument is the filename of an existing config file,
+  // read the config accordingly
+  
+  int i_start ( 0 );
+  std::string variable = convertMatlabToString(prhs[i_start]);
+  if(variable == "conf")
+  {
+      conf = NICE::Config ( convertMatlabToString( prhs[i_start+1] )  );
+      i_start = i_start+2;
+  }
+  
+  // now run over all given parameter specifications
+  // and add them to the config
+  for( int i=i_start; i < nrhs; i+=2 )
+  {
+    std::string variable = convertMatlabToString(prhs[i]);
+    if(variable == "ils_verbose")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "true" && value != "false")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_verbose\'. \'true\' or \'false\' expected.");
+      if(value == "true")
+        conf.sB("GPHIKClassifier", variable, true);
+      else
+        conf.sB("GPHIKClassifier", variable, false);
+    }
+
+    if(variable == "ils_max_iterations")
+    {
+      int value = convertMatlabToInt32(prhs[i+1]);
+      if(value < 1)
+        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'ils_max_iterations\'.");
+      conf.sI("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "ils_method")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "CG" && value != "CGL" && value != "SYMMLQ" && value != "MINRES")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'ils_method\'. \'CG\', \'CGL\', \'SYMMLQ\' or \'MINRES\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "ils_min_delta")
+    {
+      double value = convertMatlabToDouble(prhs[i+1]);
+      if(value < 0.0)
+        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'ils_min_delta\'.");
+      conf.sD("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "ils_min_residual")
+    {
+      double value = convertMatlabToDouble(prhs[i+1]);
+      if(value < 0.0)
+        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'ils_min_residual\'.");
+      conf.sD("GPHIKClassifier", variable, value);
+    }
+
+
+    if(variable == "optimization_method")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "greedy" && value != "downhillsimplex" && value != "none")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'optimization_method\'. \'greedy\', \'downhillsimplex\' or \'none\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "use_quantization")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "true" && value != "false")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'use_quantization\'. \'true\' or \'false\' expected.");
+      if(value == "true")
+        conf.sB("GPHIKClassifier", variable, true);
+      else
+        conf.sB("GPHIKClassifier", variable, false);
+    }
+
+    if(variable == "num_bins")
+    {
+      int value = convertMatlabToInt32(prhs[i+1]);
+      if(value < 1)
+        mexErrMsgIdAndTxt("mexnice:error","Expected parameter value larger than 0 for \'num_bins\'.");
+      conf.sI("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "transform")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "absexp" && value != "exp" && value != "MKL" && value != "WeightedDim")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'transform\'. \'absexp\', \'exp\' , \'MKL\' or \'WeightedDim\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+
+    if(variable == "verboseTime")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "true" && value != "false")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'verboseTime\'. \'true\' or \'false\' expected.");
+      if(value == "true")
+        conf.sB("GPHIKClassifier", variable, true);
+      else
+        conf.sB("GPHIKClassifier", variable, false);
+    }
+
+    if(variable == "verbose")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "true" && value != "false")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'verbose\'. \'true\' or \'false\' expected.");
+      if(value == "true")
+        conf.sB("GPHIKClassifier", variable, true);
+      else
+        conf.sB("GPHIKClassifier", variable, false);
+    }
+
+    if(variable == "noise")
+    {
+      double value = convertMatlabToDouble(prhs[i+1]);
+      if(value < 0.0)
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value larger than 0 for \'noise\'.");
+      conf.sD("GPHIKClassifier", variable, value);
+    }
+
+
+    if(variable == "optimize_noise")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "true" && value != "false")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'optimize_noise\'. \'true\' or \'false\' expected.");
+      if(value == "true")
+        conf.sB("GPHIKClassifier", variable, true);
+      else
+        conf.sB("GPHIKClassifier", variable, false);
+    }
+    
+    if(variable == "varianceApproximation")
+    {
+      string value = convertMatlabToString(prhs[i+1]);
+      if(value != "approximate_fine" && value != "approximate_rough" && value != "exact" && value != "none")
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'varianceApproximation\'. \'approximate_fine\', \'approximate_rough\', \'none\' or \'exact\' expected.");
+        conf.sS("GPHIKClassifier", variable, value);
+    }
+    
+    if(variable == "nrOfEigenvaluesToConsiderForVarApprox")
+    {
+      double value = convertMatlabToDouble(prhs[i+1]);
+      conf.sI("GPHIKClassifier", variable, (int) value);
+    }    
+    
+  }
+
+
+  return conf;
+}
+
+// MAIN MATLAB FUNCTION
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
+{    
+    // get the command string specifying what to do
+    if (nrhs < 1)
+        mexErrMsgTxt("No commands and options passed... Aborting!");        
+    
+    if( !mxIsChar( prhs[0] ) )
+        mexErrMsgTxt("First argument needs to be the command, ie.e, the class method to call... Aborting!");        
+    
+    std::string cmd = convertMatlabToString( prhs[0] );
+      
+        
+    // create object
+    if ( !strcmp("new", cmd.c_str() ) )
+    {
+        // check output variable
+        if (nlhs != 1)
+            mexErrMsgTxt("New: One output expected.");
+        
+        // read config settings
+        NICE::Config conf = parseParameters(prhs+1,nrhs-1);
+        
+        // create class instance
+        NICE::GPHIKClassifier * classifier = new NICE::GPHIKClassifier ( &conf );
+        
+         
+        // handle to the C++ instance
+        plhs[0] = convertPtr2Mat<NICE::GPHIKClassifier>( classifier );
+        return;
+    }
+    
+    // in all other cases, there should be a second input,
+    // which the be the class instance handle
+    if (nrhs < 2)
+      mexErrMsgTxt("Second input should be a class instance handle.");
+    
+    // delete object
+    if ( !strcmp("delete", cmd.c_str() ) )
+    {
+        // Destroy the C++ object
+        destroyObject<NICE::GPHIKClassifier>(prhs[1]);
+        return;
+    }
+    
+    // get the class instance pointer from the second input
+    // every following function needs the classifier object
+    NICE::GPHIKClassifier * classifier = convertMat2Ptr<NICE::GPHIKClassifier>(prhs[1]);
+    
+    
+    ////////////////////////////////////////
+    //  Check which class method to call  //
+    ////////////////////////////////////////
+    
+    
+    // standard train - assumes initialized object
+    if (!strcmp("train", cmd.c_str() ))
+    {
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+        {
+            mexErrMsgTxt("Train: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+          
+        std::vector< NICE::SparseVector *> examplesTrain;
+        NICE::Vector yMultiTrain;  
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            examplesTrain = convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {
+            NICE::Matrix dataTrain;
+            dataTrain = convertMatrixToNice(prhs[2]);
+            
+            //----------------- convert data to sparse data structures ---------
+            examplesTrain.resize( dataTrain.rows() );
+
+                    
+            std::vector< NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+            for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+            {
+                *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+            }            
+        }
+          
+          yMultiTrain = convertVectorToNice(prhs[3]);
+          
+//           std::cerr << " DATA AFTER CONVERSION: \n" << std::endl;
+//           int lineIdx(0);
+//           for ( std::vector< NICE::SparseVector *>::const_iterator exTrainIt = examplesTrain.begin();
+//                 exTrainIt != examplesTrain.end(); exTrainIt++, lineIdx++)
+//           {
+//               std::cerr << "\n lineIdx: " << lineIdx << std::endl;
+//               (*exTrainIt)->store( std::cerr );
+//               
+//           }
+
+          // test assumption
+          {
+            if( yMultiTrain.Min() < 0)
+              mexErrMsgIdAndTxt("mexnice:error","Class labels smaller 0 are not allowed");
+          }
+
+
+          //----------------- train our classifier -------------
+          classifier->train ( examplesTrain , yMultiTrain );
+
+          //----------------- clean up -------------
+          for(int i=0;i<examplesTrain.size();i++)
+              delete examplesTrain[i];
+        
+        return;
+    }
+    
+    
+    // Classify    
+    if ( !strcmp("classify", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        //------------- read the data --------------
+
+        int result;
+        NICE::SparseVector scores;
+        double uncertainty;        
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( convertSparseVectorToNice( prhs[2] ) );
+            classifier->classify ( example,  result, scores, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( convertVectorToNice(prhs[2]) ); 
+            classifier->classify ( example,  result, scores, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+          
+          
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( result ); 
+          
+          
+          if(nlhs >= 2)
+          {
+            plhs[1] = convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
+          }
+          if(nlhs >= 3)
+          {
+            plhs[2] = mxCreateDoubleScalar( uncertainty );          
+          }
+          return;
+    }
+    
+    // Classify    
+    if ( !strcmp("uncertainty", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( (nlhs < 0) || (nrhs < 2) )
+        {
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        }
+        
+        double uncertainty;        
+        
+        //------------- read the data --------------
+
+        if ( mxIsSparse( prhs[2] ) )
+        {
+            NICE::SparseVector * example;
+            example = new NICE::SparseVector ( convertSparseVectorToNice( prhs[2] ) );
+            classifier->predictUncertainty( example, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+        else
+        {
+            NICE::Vector * example;
+            example = new NICE::Vector ( convertVectorToNice(prhs[2]) ); 
+            classifier->predictUncertainty( example, uncertainty );
+            
+            //----------------- clean up -------------
+            delete example;            
+        }
+        
+       
+
+          // output
+          plhs[0] = mxCreateDoubleScalar( uncertainty );                    
+          return;
+    }    
+    
+    
+    // Test    
+    if ( !strcmp("test", cmd.c_str() ) )
+    {        
+        // Check parameters
+        if (nlhs < 0 || nrhs < 4)
+            mexErrMsgTxt("Test: Unexpected arguments.");
+        //------------- read the data --------------
+        
+        
+        bool dataIsSparse ( mxIsSparse( prhs[2] ) );
+        
+        std::vector< NICE::SparseVector *> dataTest_sparse;
+        NICE::Matrix dataTest_dense;
+
+        if ( dataIsSparse )
+        {
+            dataTest_sparse = convertSparseMatrixToNice( prhs[2] );
+        }
+        else
+        {    
+            dataTest_dense = convertMatrixToNice(prhs[2]);          
+        }        
+
+          NICE::Vector yMultiTest;
+          yMultiTest = convertVectorToNice(prhs[3]);
+
+          
+          // ------------------------------------------
+          // ------------- PREPARATION --------------
+          // ------------------------------------------   
+          
+          // determine classes known during training and corresponding mapping
+          // thereby allow for non-continous class labels
+          std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+          
+          int noClassesKnownTraining ( classesKnownTraining.size() );
+          std::map<int,int> mapClNoToIdxTrain;
+          std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+          for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+              mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+          
+          // determine classes known during testing and corresponding mapping
+          // thereby allow for non-continous class labels
+          std::set<int> classesKnownTest;
+          classesKnownTest.clear();
+          
+  
+          // determine which classes we have in our label vector
+          // -> MATLAB: myClasses = unique(y);
+          for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+          {
+            if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+            {
+              classesKnownTest.insert ( *it );
+            }
+          }          
+          
+          int noClassesKnownTest ( classesKnownTest.size() );  
+          std::map<int,int> mapClNoToIdxTest;
+          std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
+          for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
+              mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );          
+          
+
+
+          int i_numTestSamples;
+          
+          if ( dataIsSparse ) 
+              i_numTestSamples = dataTest_sparse.size();
+          else
+              i_numTestSamples = (int) dataTest_dense.rows();
+          
+          NICE::Matrix confusionMatrix( noClassesKnownTraining, noClassesKnownTest, 0.0);
+          NICE::Matrix scores( i_numTestSamples, noClassesKnownTraining, 0.0);
+          
+          
+
+          // ------------------------------------------
+          // ------------- CLASSIFICATION --------------
+          // ------------------------------------------          
+          
+          NICE::Timer t;
+          double testTime (0.0);
+          
+
+
+          for (int i = 0; i < i_numTestSamples; i++)
+          {
+             //----------------- convert data to sparse data structures ---------
+            
+
+             int result;
+             NICE::SparseVector exampleScoresSparse;
+
+             if ( dataIsSparse )
+             {                
+                // and classify
+                t.start();
+                classifier->classify( dataTest_sparse[ i ], result, exampleScoresSparse );
+                t.stop();
+                testTime += t.getLast();
+             }
+             else
+             {
+                 NICE::Vector example ( dataTest_dense.getRow(i) );
+                // and classify
+                t.start();
+                classifier->classify( &example, result, exampleScoresSparse );
+                t.stop();
+                testTime += t.getLast();                
+             }
+
+             confusionMatrix(  mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+             int scoreCnt ( 0 );
+             for ( NICE::SparseVector::const_iterator scoreIt = exampleScoresSparse.begin(); scoreIt != exampleScoresSparse.end(); scoreIt++, scoreCnt++ )
+                scores(i,scoreCnt) = scoreIt->second;
+                
+          }
+          
+          std::cerr << "Time for testing: " << testTime << std::endl;          
+          
+          // clean up
+          if ( dataIsSparse )
+          {
+              for ( std::vector<NICE::SparseVector *>::iterator it = dataTest_sparse.begin(); it != dataTest_sparse.end(); it++) 
+                  delete *it;
+          }
+          
+
+
+          confusionMatrix.normalizeColumnsL1();
+          //std::cerr << confusionMatrix << std::endl;
+
+          double recRate = confusionMatrix.trace()/confusionMatrix.rows();
+          //std::cerr << "average recognition rate: " << recRate << std::endl;
+
+          
+          plhs[0] = mxCreateDoubleScalar( recRate );
+
+          if(nlhs >= 2)
+            plhs[1] = convertMatrixFromNice(confusionMatrix);
+          if(nlhs >= 3)
+            plhs[2] = convertMatrixFromNice(scores);          
+          
+          
+        return;
+    }
+    
+    // store the classifier    
+    if ( !strcmp("store", cmd.c_str() ) || !strcmp("save", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( nrhs < 3 )
+            mexErrMsgTxt("store: no destination given.");        
+               
+        std::string s_destination = convertMatlabToString( prhs[2] );
+          
+        std::filebuf fb;
+        fb.open ( s_destination.c_str(), ios::out );
+        std::ostream os(&fb);
+        //
+        classifier->store( os );
+        //   
+        fb.close();        
+            
+        return;
+    }
+    
+    // load classifier from external file    
+    if ( !strcmp("restore", cmd.c_str() ) || !strcmp("load", cmd.c_str() ) )
+    {
+        // Check parameters
+        if ( nrhs < 3 )
+            mexErrMsgTxt("restore: no destination given.");        
+               
+        std::string s_destination = convertMatlabToString( prhs[2] );
+        
+        std::cerr << " aim at restoring the classifier from " << s_destination << std::endl;
+          
+        std::filebuf fbIn;
+        fbIn.open ( s_destination.c_str(), ios::in );
+        std::istream is (&fbIn);
+        //
+        classifier->restore( is );
+        //   
+        fbIn.close();        
+            
+        return;
+    }    
+    
+    
+    // Got here, so command not recognized
+    
+    std::string errorMsg (cmd.c_str() );
+    errorMsg += " -- command not recognized.";
+    mexErrMsgTxt( errorMsg.c_str() );
+
+}

+ 5 - 0
matlab/Makefile

@@ -0,0 +1,5 @@
+NICEFLAGS1=$(shell pkg-config libgp-hik-core --cflags --libs)
+NICEFLAGS=$(subst -fopenmp,,$(NICEFLAGS1))
+
+default:
+	/home/matlab/7.14/bin/mex ${NICEFLAGS} -largeArrayDims GPHIK.cpp 

+ 142 - 0
matlab/classHandleMtoC.h

@@ -0,0 +1,142 @@
+/** 
+* @file classHandleMtoC.h
+* @brief Generic class to pass C++ objects to matlab (Interface and inline implementations)
+* @author Alexander Freytag
+* @date 19-12-2013 (dd-mm-yyyy)
+
+*/
+#ifndef _NICE_CLASSHANDLEMTOCINCLUDE
+#define _NICE_CLASSHANDLEMTOCINCLUDE
+
+#include "mex.h"
+#include <stdint.h>
+#include <iostream>
+#include <string>
+#include <cstring>
+#include <typeinfo>
+
+#define CLASS_HANDLE_SIGNATURE 0xFF00F0A3
+
+  /** 
+  * @class FMKGPHyperparameterOptimization
+  * @brief Generic class to pass C++ objects to matlab
+  * @author Alexander Freytag
+  */
+template<class objectClass> class ClassHandle
+{
+  private:
+      //!
+      uint32_t i_mySignature;
+      
+      //! typeid.name of object class we refere to
+      std::string s_myName;
+      
+      //! the actual pointer to our C++ object
+      objectClass* p_myPtr;  
+  
+  public:
+    
+    /**
+    * @brief standard constructor
+    *
+    * @param ptr pointer to the c++ object
+    */    
+      ClassHandle ( objectClass* p_ptr ) : p_myPtr(p_ptr), s_myName( typeid(objectClass).name() )
+      {
+        i_mySignature = CLASS_HANDLE_SIGNATURE;
+      }
+      
+    /**
+    * @brief standard destructor
+    */        
+      ~ClassHandle()
+      {
+          // reset internal variables
+          i_mySignature = 0;
+          
+          // clearn up data
+          delete p_myPtr;
+      }
+      
+    /**
+    * @brief check whether the class handle was initialized properly, i.e., we point to an actual object
+    */       
+      bool isValid()
+      { 
+        return ( (i_mySignature == CLASS_HANDLE_SIGNATURE) && !strcmp( s_myName.c_str(), typeid(objectClass).name() )   );
+      }
+      
+    /**
+    * @brief get the pointer to the actual object
+    */       
+      objectClass * getPtrToObject()
+      { 
+        return p_myPtr;
+      }
+
+
+};
+
+
+////////////////////////////////////////////
+//           conversion methods           //
+////////////////////////////////////////////
+
+/**
+* @brief convert handle to C++ object into matlab usable data
+*/ 
+template<class objectClass> inline mxArray *convertPtr2Mat(objectClass *ptr)
+{
+    // prevent user from clearing the mex file! Otherwise, storage leaks might be caused
+    mexLock();
+    
+    // allocate memory
+    mxArray *out = mxCreateNumericMatrix(1, 1, mxUINT64_CLASS, mxREAL);
+    
+    // convert handle do matlab usable data
+    *((uint64_t *)mxGetData(out)) = reinterpret_cast<uint64_t>(new ClassHandle<objectClass>(ptr));
+    
+    return out;
+}
+
+/**
+* @brief convert matlab usable data referring to an object into handle to C++ object
+*/ 
+template<class objectClass> inline ClassHandle<objectClass> *convertMat2HandlePtr(const mxArray *in)
+{
+    // check that the given pointer actually points to a real object
+    if ( ( mxGetNumberOfElements(in) != 1 )     ||
+         ( mxGetClassID(in) != mxUINT64_CLASS ) ||
+           mxIsComplex(in)
+       )
+        mexErrMsgTxt("Input must be a real uint64 scalar.");
+        
+    ClassHandle<objectClass> *ptr = reinterpret_cast<ClassHandle<objectClass> *>(*((uint64_t *)mxGetData(in)));
+    
+    if (!ptr->isValid())
+        mexErrMsgTxt("Handle not valid.");
+    
+    return ptr;
+}
+
+/**
+* @brief convert matlab usable data referring to an object into direct pointer to the underlying C++ object
+*/ 
+template<class objectClass> inline objectClass *convertMat2Ptr(const mxArray *in)
+{
+    return convertMat2HandlePtr<objectClass>(in)->getPtrToObject();
+}
+
+/**
+* @brief convert matlab usable data referring to an object into direct pointer to the underlying C++ object
+*/ 
+template<class objectClass> inline void destroyObject(const mxArray *in)
+{
+    // clean up
+    delete convertMat2HandlePtr<objectClass>(in);
+    
+    // storage is freed, so users can savely clear the mex file again at any time...
+    mexUnlock();
+}
+
+#endif // _NICE_CLASSHANDLEMTOCINCLUDE

+ 75 - 18
parameterizedFunctions/PFAbsExp.h

@@ -1,13 +1,19 @@
 /** 
 * @file PFAbsExp.h
-* @author Erik Rodner
+* @author Erik Rodner, Alexander Freytag
 * @brief Parameterized Function: absolute value and exponential operation -- pow(fabs(x), exponent) (Interface + Implementation)
 * @date 01/04/2012
 */
 #ifndef _NICE_PFABSEXPINCLUDE
 #define _NICE_PFABSEXPINCLUDE
 
+// STL includes
 #include <math.h>
+
+// NICE-core includes
+#include <core/vector/VectorT.h>
+
+// NICE-core includes
 #include "ParameterizedFunction.h"
 
 namespace NICE {
@@ -15,7 +21,7 @@ namespace NICE {
  /** 
  * @class PFAbsExp
  * @brief Parameterized Function: absolute value and exponential operation -- pow(fabs(x), exponent)
- * @author Erik Rodner
+ * @author Erik Rodner, Alexander Freytag
  */
  
 class PFAbsExp : public ParameterizedFunction
@@ -40,16 +46,13 @@ class PFAbsExp : public ParameterizedFunction
   ~PFAbsExp(){};
     
   double f ( uint index, double x ) const { 
-/*        std::cerr << "upperBound: " << upperBound << std::endl;
-    std::cerr << "lowerBound: " << lowerBound << std::endl;
-    std::cerr << "m_parameters: " << m_parameters << std::endl;   */ 
     return pow(fabs(x),m_parameters[0]); 
   }
 
   bool isOrderPreserving() const { return true; };
 
-  Vector getParameterUpperBounds() const { return Vector(1, upperBound); };
-  Vector getParameterLowerBounds() const { return Vector(1, lowerBound); };
+  Vector getParameterUpperBounds() const { return NICE::Vector(1, upperBound); };
+  Vector getParameterLowerBounds() const { return NICE::Vector(1, lowerBound); };
   
   void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
   void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
@@ -59,27 +62,81 @@ class PFAbsExp : public ParameterizedFunction
   {
     if (is.good())
     {
-      is.precision (std::numeric_limits<double>::digits10 + 1);
+      is.precision (std::numeric_limits<double>::digits10 + 1); 
+      
+      std::string tmp;    
+
+      bool b_endOfBlock ( false ) ;
+      
+      while ( !b_endOfBlock )
+      {
+	is >> tmp; // start of block 
+	
+	if ( this->isEndTag( tmp, "PFAbsExp" ) )
+	{
+	  b_endOfBlock = true;
+	  continue;
+	}
+		    
+	
+	tmp = this->removeStartTag ( tmp );
+	
+	if ( tmp.compare("upperBound") == 0 )
+	{
+	  is >> upperBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );
+	}
+	else if ( tmp.compare("lowerBound") == 0 )
+	{
+	  is >> lowerBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );	    
+	}
+	else if ( tmp.compare("ParameterizedFunction") == 0 )
+	{
+	  // restore parent object
+	  ParameterizedFunction::restore(is);
+	}	
+	else
+	{
+	  std::cerr << "WARNING -- unexpected PFAbsExp object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	  throw;	
+	}      
+      }
       
-      std::string tmp;
-      is >> tmp;
-      is >> upperBound;
 
-      is >> tmp;
-      is >> lowerBound;      
     }
-    ParameterizedFunction::restore(is);
-    
+    else
+    {
+      std::cerr << "PFAbsExp::restore -- InStream not initialized - restoring not possible!" << std::endl;
+    }   
   };
+  
   virtual void store ( std::ostream & os, int format = 0 ) const
   {
     if (os.good())
     {
+      // show starting point
+      os << this->createStartTag( "PFAbsExp" ) << std::endl;      
+      
       os.precision (std::numeric_limits<double>::digits10 + 1); 
-      os << "upperBound: " << std::endl <<  upperBound << std::endl;
-      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+
+      os << this->createStartTag( "upperBound" ) << std::endl;
+      os << upperBound << std::endl;
+      os << this->createEndTag( "upperBound" ) << std::endl; 
+      
+      os << this->createStartTag( "lowerBound" ) << std::endl;
+      os << lowerBound << std::endl;
+      os << this->createEndTag( "lowerBound" ) << std::endl;
+      
+      // store parent object
+      ParameterizedFunction::store(os);      
+      
+      // done
+      os << this->createEndTag( "PFAbsExp" ) << std::endl; 
     }
-    ParameterizedFunction::store(os);
+
   };
   
   virtual void clear () {};

+ 72 - 14
parameterizedFunctions/PFExp.h

@@ -1,12 +1,18 @@
 /** 
 * @file PFExp.h
-* @author Erik Rodner
+* @author Erik Rodner, Alexander Freytag
 * @brief Parameterized Function: exponential operation -- exp(fabs(x), exponent) (Interface + Implementation)
 */
 #ifndef _NICE_PFEXPINCLUDE
 #define _NICE_PFEXPINCLUDE
 
+// STL includes
 #include <math.h>
+
+// NICE-core includes
+#include <core/vector/VectorT.h>
+
+// NICE-core includes
 #include "ParameterizedFunction.h"
 
 namespace NICE {
@@ -14,7 +20,7 @@ namespace NICE {
  /** 
  * @class PFExp
  * @brief Parameterized Function: Parameterized Function: exponential operation -- exp(fabs(x), exponent)
- * @author Erik Rodner
+ * @author Erik Rodner, Alexander Freytag
  */
  
 class PFExp : public ParameterizedFunction
@@ -42,8 +48,8 @@ class PFExp : public ParameterizedFunction
 
   bool isOrderPreserving() const { return true; };
 
-  Vector getParameterUpperBounds() const { return Vector(1, upperBound); };
-  Vector getParameterLowerBounds() const { return Vector(1, lowerBound); };
+  Vector getParameterUpperBounds() const { return NICE::Vector(1, upperBound); };
+  Vector getParameterLowerBounds() const { return NICE::Vector(1, lowerBound); };
   
   void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
   void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
@@ -53,26 +59,78 @@ class PFExp : public ParameterizedFunction
   {
     if (is.good())
     {
-      is.precision (std::numeric_limits<double>::digits10 + 1);
+      is.precision (std::numeric_limits<double>::digits10 + 1); 
       
-      std::string tmp;
-      is >> tmp;
-      is >> upperBound;
+      std::string tmp;    
 
-      is >> tmp;
-      is >> lowerBound;      
+      bool b_endOfBlock ( false ) ;
+      
+      while ( !b_endOfBlock )
+      {
+	is >> tmp; // start of block 
+	
+	if ( this->isEndTag( tmp, "PFExp" ) )
+	{
+	  b_endOfBlock = true;
+	  continue;
+	}
+		    
+	
+	tmp = this->removeStartTag ( tmp );
+	
+	if ( tmp.compare("upperBound") == 0 )
+	{
+	  is >> upperBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );	    
+	}
+	else if ( tmp.compare("lowerBound") == 0 )
+	{
+	  is >> lowerBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );    	    
+	}
+	else if ( tmp.compare("ParameterizedFunction") == 0 )
+	{
+	  // restore parent object
+	  ParameterizedFunction::restore(is);
+	}
+	else
+	{
+	  std::cerr << "WARNING -- unexpected PFExp object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	  throw;	
+	}  
+      }
+    }
+    else
+    {
+      std::cerr << "PFExp::restore -- InStream not initialized - restoring not possible!" << std::endl;
     }
-    ParameterizedFunction::restore(is);
   };
   virtual void store ( std::ostream & os, int format = 0 ) const
   {
     if (os.good())
     {
+      // show starting point
+      os << this->createStartTag( "PFExp" ) << std::endl;      
+      
       os.precision (std::numeric_limits<double>::digits10 + 1); 
-      os << "upperBound: " << std::endl <<  upperBound << std::endl;
-      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+
+      os << this->createStartTag( "upperBound" ) << std::endl;
+      os << upperBound << std::endl;
+      os << this->createEndTag( "upperBound" ) << std::endl; 
+      
+      os << this->createStartTag( "lowerBound" ) << std::endl;
+      os << lowerBound << std::endl;
+      os << this->createEndTag( "lowerBound" ) << std::endl;
+      
+      // store parent object
+      ParameterizedFunction::store(os); 
+      
+      // done
+      os << this->createEndTag( "PFExp" ) << std::endl;         
     }
-    ParameterizedFunction::store(os);
+
   };
   virtual void clear () {};
   

+ 131 - 12
parameterizedFunctions/PFMKL.h

@@ -7,7 +7,13 @@
 #ifndef _NICE_PFMULTIPLEKERNELLEARNINGINCLUDE
 #define _NICE_PFMULTIPLEKERNELLEARNINGINCLUDE
 
+// STL includes
 #include <math.h>
+
+// NICE-core includes
+#include <core/vector/VectorT.h>
+
+// NICE-core includes
 #include "ParameterizedFunction.h"
 
 namespace NICE {
@@ -58,8 +64,8 @@ class PFMKL : public ParameterizedFunction
 
   bool isOrderPreserving() const { return true; };
 
-  Vector getParameterUpperBounds() const { return Vector(m_parameters.size(), upperBound); };
-  Vector getParameterLowerBounds() const { return Vector(m_parameters.size(), lowerBound); };
+  Vector getParameterUpperBounds() const { return NICE::Vector(m_parameters.size(), upperBound); };
+  Vector getParameterLowerBounds() const { return NICE::Vector(m_parameters.size(), lowerBound); };
   
   void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
   void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
@@ -69,26 +75,139 @@ class PFMKL : public ParameterizedFunction
   {
     if (is.good())
     {
-      is.precision (std::numeric_limits<double>::digits10 + 1);
+      is.precision (std::numeric_limits<double>::digits10 + 1); 
       
-      std::string tmp;
-      is >> tmp;
-      is >> upperBound;
+      std::string tmp;    
 
-      is >> tmp;
-      is >> lowerBound;   
+      bool b_endOfBlock ( false ) ;
+      
+      while ( !b_endOfBlock )
+      {
+	is >> tmp; // start of block 
+	
+	if ( this->isEndTag( tmp, "PFMKL" ) )
+	{
+	  b_endOfBlock = true;
+	  continue;
+	}
+		    
+	
+	tmp = this->removeStartTag ( tmp );
+	
+	if ( tmp.compare("upperBound") == 0 )
+	{
+	  is >> upperBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );	    
+	}
+	else if ( tmp.compare("lowerBound") == 0 )
+	{
+	  is >> lowerBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );	    
+	}
+	else if ( tmp.compare("steps") == 0 )
+	{
+	    is >> tmp; // start of block 
+	    
+	    int numberOfSteps;
+	    if ( ! this->isStartTag( tmp, "numberOfSteps" ) )
+	    {
+	      std::cerr << "Attempt to restore PFMKL, but found no information about numberOfSteps elements. Aborting..." << std::endl;
+	      throw;
+	    }
+	    else
+	    {
+	      is >> numberOfSteps;
+	      is >> tmp; // end of block 
+	      tmp = this->removeEndTag ( tmp );     
+	    }
+	    
+	    is >> tmp; // start of block 
+	    
+	    if ( ! this->isStartTag( tmp, "stepInfo" ) )
+	    {
+	      std::cerr << "Attempt to restore PFMKL, but found no stepInfo. Aborting..." << std::endl;
+	      throw;
+	    }
+	    else
+	    {
+	      steps.clear();
+	      
+	      for ( int tmpCnt = 0; tmpCnt < numberOfSteps; tmpCnt++)
+	      {
+		int tmpStep;
+		is >> tmpStep;
+		steps.insert ( tmpStep ); 
+	      }
+	      
+	      is >> tmp; // end of block 
+	      tmp = this->removeEndTag ( tmp ); 	      
+	    }
+	    
+	    is >> tmp; // end of block 
+	    tmp = this->removeEndTag ( tmp );
+	    
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );	    
+	}
+	else if ( tmp.compare("ParameterizedFunction") == 0 )
+	{
+	  // restore parent object
+	  ParameterizedFunction::restore(is);
+	}	
+	else
+	{
+	  std::cerr << "WARNING -- unexpected PFMKL object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	  throw;	
+	}
+	
+      
+      }
+      
+      // restore parent object
+      ParameterizedFunction::restore(is);
+    }
+    else
+    {
+      std::cerr << "PFMKL::restore -- InStream not initialized - restoring not possible!" << std::endl;
     }
-    ParameterizedFunction::restore(is);
   };  
   virtual void store ( std::ostream & os, int format = 0 ) const
   {
     if (os.good())
     {
+      // show starting point
+      os << this->createStartTag( "PFMKL" ) << std::endl;      
+      
       os.precision (std::numeric_limits<double>::digits10 + 1); 
-      os << "upperBound: " << std::endl <<  upperBound << std::endl;
-      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+
+      os << this->createStartTag( "upperBound" ) << std::endl;
+      os << upperBound << std::endl;
+      os << this->createEndTag( "upperBound" ) << std::endl; 
+      
+      os << this->createStartTag( "lowerBound" ) << std::endl;
+      os << lowerBound << std::endl;
+      os << this->createEndTag( "lowerBound" ) << std::endl;
+      
+      os << this->createStartTag( "steps" ) << std::endl;
+	os << this->createStartTag( "numberOfSteps" ) << std::endl;
+	os << steps.size() << std::endl;
+	os << this->createEndTag( "numberOfSteps" ) << std::endl;    
+	
+        os << this->createStartTag( "stepInfo" ) << std::endl;;
+        for ( std::set<int>::const_iterator mySetIt = steps.begin(); mySetIt != steps.end(); mySetIt++)
+	  os << *mySetIt << " ";
+        os << std::endl;
+	os << this->createEndTag( "stepInfo" ) << std::endl;
+      os << this->createEndTag( "steps" ) << std::endl;      
+
+      // store parent object
+      ParameterizedFunction::store(os);       
+      
+      // done
+      os << this->createEndTag( "PFMKL" ) << std::endl; 
     }
-    ParameterizedFunction::store(os);
   };  
   virtual void clear () {};
   

+ 72 - 19
parameterizedFunctions/PFWeightedDim.h

@@ -1,13 +1,19 @@
 /** 
 * @file PFWeightedDim.h
 * @brief Parameterized Function: weights for each dimension (Interface + Implementation)
-* @author Erik Rodner
+* @author Erik Rodner, Alexander Freytag
 
 */
 #ifndef _NICE_PFWEIGHTEDDIMINCLUDE
 #define _NICE_PFWEIGHTEDDIMINCLUDE
 
+// STL includes
 #include <math.h>
+
+// NICE-core includes
+#include <core/vector/VectorT.h>
+
+// NICE-core includes
 #include "ParameterizedFunction.h"
 
 namespace NICE {
@@ -15,7 +21,7 @@ namespace NICE {
  /** 
  * @class PFWeightedDim
  * @brief Parameterized Function: weights for each dimension
- * @author Erik Rodner
+ * @author Erik Rodner, Alexander Freytag
  */
  
 class PFWeightedDim : public ParameterizedFunction
@@ -24,7 +30,6 @@ class PFWeightedDim : public ParameterizedFunction
 
     double upperBound;
     double lowerBound;
-    uint dimension;
 
   public:
 
@@ -33,7 +38,6 @@ class PFWeightedDim : public ParameterizedFunction
             double uB = std::numeric_limits<double>::max() ) : 
             ParameterizedFunction(dimension) 
   { 
-    this->dimension = dimension;
     upperBound = uB;
     lowerBound = lB;
     if ( uB < 1.0 )
@@ -48,8 +52,8 @@ class PFWeightedDim : public ParameterizedFunction
 
   bool isOrderPreserving() const { return true; };
 
-  Vector getParameterUpperBounds() const { return Vector(m_parameters.size(), upperBound); };
-  Vector getParameterLowerBounds() const { return Vector(m_parameters.size(), lowerBound); };
+  Vector getParameterUpperBounds() const { return NICE::Vector(m_parameters.size(), upperBound); };
+  Vector getParameterLowerBounds() const { return NICE::Vector(m_parameters.size(), lowerBound); };
   
   void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
   void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
@@ -59,30 +63,79 @@ class PFWeightedDim : public ParameterizedFunction
   {
     if (is.good())
     {
-      is.precision (std::numeric_limits<double>::digits10 + 1);
+      is.precision (std::numeric_limits<double>::digits10 + 1); 
       
-      std::string tmp;
-      is >> tmp;
-      is >> upperBound;
+      std::string tmp;    
 
-      is >> tmp;
-      is >> lowerBound;   
+      bool b_endOfBlock ( false ) ;
       
-      is >> tmp;
-      is >> dimension;
+      while ( !b_endOfBlock )
+      {
+	is >> tmp; // start of block 
+	
+	if ( this->isEndTag( tmp, "PFWeightedDim" ) )
+	{
+	  b_endOfBlock = true;
+	  continue;
+	}
+		    
+	
+	tmp = this->removeStartTag ( tmp );
+	
+	if ( tmp.compare("upperBound") == 0 )
+	{
+	  is >> upperBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp );	    
+	}
+	else if ( tmp.compare("lowerBound") == 0 )
+	{
+	  is >> lowerBound;
+	  is >> tmp; // end of block 
+	  tmp = this->removeEndTag ( tmp ); 	    
+	}
+	else if ( tmp.compare("ParameterizedFunction") == 0 )
+	{
+	  // restore parent object
+	  ParameterizedFunction::restore(is);
+	}	
+	else
+	{
+	  std::cerr << "WARNING -- unexpected PFWeightedDim object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	  throw;	
+	}
+      }
+    }
+    else
+    {
+      std::cerr << "PFWeightedDim::restore -- InStream not initialized - restoring not possible!" << std::endl;
     }
-    ParameterizedFunction::restore(is);
   };  
   virtual void store ( std::ostream & os, int format = 0 ) const
   {
     if (os.good())
     {
+      // show starting point
+      os << this->createStartTag( "PFWeightedDim" ) << std::endl;      
+      
       os.precision (std::numeric_limits<double>::digits10 + 1); 
-      os << "upperBound: " << std::endl <<  upperBound << std::endl;
-      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
-      os << "dimension: " << std::endl << dimension << std::endl;
+
+      os << this->createStartTag( "upperBound" ) << std::endl;
+      os << upperBound << std::endl;
+      os << this->createEndTag( "upperBound" ) << std::endl; 
+      
+      os << this->createStartTag( "lowerBound" ) << std::endl;
+      os << lowerBound << std::endl;
+      os << this->createEndTag( "lowerBound" ) << std::endl;   
+      
+
+      // store parent object
+      ParameterizedFunction::store(os); 
+      
+      // done
+      os << this->createEndTag( "PFWeightedDim" ) << std::endl;
     }
-    ParameterizedFunction::store(os);
+
   };  
   virtual void clear () {};
   

+ 51 - 8
parameterizedFunctions/ParameterizedFunction.cpp

@@ -1,12 +1,14 @@
 /** 
 * @file ParameterizedFunction.cpp
 * @brief Simple parameterized multi-dimensional function (Implementation)
-* @author Erik Rodner
+* @author Erik Rodner, Alexander Freytag
 * @date 01/04/2012
-
 */
+
+// STL includes
 #include <iostream>
 
+// NICE-core includes
 #include "ParameterizedFunction.h"
 
 using namespace NICE;
@@ -36,11 +38,42 @@ void ParameterizedFunction::restore ( std::istream & is, int format )
 {
   if (is.good())
   {
-    is.precision (numeric_limits<double>::digits10 + 1);
+    is.precision (std::numeric_limits<double>::digits10 + 1); 
+    
+    std::string tmp;    
+
+    bool b_endOfBlock ( false ) ;
     
-    string tmp;
-    is >> tmp;
-    is >> m_parameters;
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "ParameterizedFunction" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }
+                  
+      
+      tmp = this->removeStartTag ( tmp );
+      
+      if ( tmp.compare("m_parameters") == 0 )
+      {
+          is >> m_parameters;
+      }
+      else
+      {
+	std::cerr << "WARNING -- unexpected ParameterizedFunction object -- " << tmp << " -- for restoration... aborting" << std::endl;
+	throw;	
+      }
+      
+      is >> tmp; // end of block 
+      tmp = this->removeEndTag ( tmp );      
+    }
+   }
+  else
+  {
+    std::cerr << "ParameterizedFunction::restore -- InStream not initialized - restoring not possible!" << std::endl;
   }
 }
 
@@ -48,7 +81,17 @@ void ParameterizedFunction::store ( std::ostream & os, int format ) const
 {
   if (os.good())
   {
-    os.precision (numeric_limits<double>::digits10 + 1); 
-    os << "m_parameters: " << std::endl << m_parameters << std::endl;
+    // show starting point
+    os << this->createStartTag( "ParameterizedFunction" ) << std::endl;
+    
+    os.precision (std::numeric_limits<double>::digits10 + 1);
+    
+    os << this->createStartTag( "m_parameters" ) << std::endl;
+    os << m_parameters << std::endl;
+    os << this->createEndTag( "m_parameters" ) << std::endl;   
+    
+    
+    // done
+    os << this->createEndTag( "ParameterizedFunction" ) << std::endl;   
   }
 };

+ 8 - 5
parameterizedFunctions/ParameterizedFunction.h

@@ -1,16 +1,19 @@
 /** 
 * @file ParameterizedFunction.h
 * @brief Simple parameterized multi-dimensional function (Interface)
-* @author Erik Rodner
+* @author Erik Rodner, Alexander Freytag
 * @date 01/04/2012
 */
 #ifndef _NICE_PARAMETERIZEDFUNCTIONINCLUDE
 #define _NICE_PARAMETERIZEDFUNCTIONINCLUDE
 
+// STL includes
 #include <vector>
 #include <limits>
 
-#include "core/basics/Persistent.h"
+// NICE-core includes
+#include <core/basics/Persistent.h>
+// 
 #include <core/vector/VectorT.h>
 #include <core/vector/SparseVectorT.h>
 
@@ -25,15 +28,15 @@ namespace NICE {
  * (1) f(0) = 0
  * (2) f is monotonically increasing
  *
- * @author Erik Rodner
+ * @author Erik Rodner, Alexander Freytag
  */
-class ParameterizedFunction : NICE::Persistent
+class ParameterizedFunction : public NICE::Persistent
 {
 
   protected:
 
     /** parameters of the function */
-    Vector m_parameters;
+    NICE::Vector m_parameters;
 
 
   public:

+ 2 - 0
progfiles.cmake

@@ -2,6 +2,8 @@
 set(nice_segmentation_PROGFILES_SRC 
 ./progs/toyExample.cpp
 ./progs/completeEvaluationFastMinkernel.cpp
+./progs/classifyDatasetGPHIK.cpp
+./progs/toyExampleStoreRestore.cpp
 )
 
 set(nice_segmentation_PROGFILES_HDR

+ 6 - 6
progs/classifyDatasetGPHIK.cpp

@@ -19,7 +19,7 @@
 #include "gp-hik-core/GPHIKClassifier.h"
 
 
-void readSparseExamples ( const std::string & fn,  std::vector< NICE::SparseVector * > & examples, NICE::Vector & labels )
+void readSparseExamples ( const std::string & fn,  std::vector< const NICE::SparseVector * > & examples, NICE::Vector & labels )
 {
   // initially cleaning of variables
   examples.clear();
@@ -103,7 +103,7 @@ int main (int argc, char* argv[])
   // ========================================================================  
    
   // read training data
-  std::vector< NICE::SparseVector * > examplesTrain;
+  std::vector< const NICE::SparseVector * > examplesTrain;
   NICE::Vector labelsTrain;
   
   std::string s_fn_trainingSet = conf.gS("main", "trainset");
@@ -125,7 +125,7 @@ int main (int argc, char* argv[])
   // ========================================================================
   
   // read test data
-  std::vector< NICE::SparseVector * > examplesTest;
+  std::vector< const NICE::SparseVector * > examplesTest;
   NICE::Vector labelsTest;
   
   std::string s_fn_testSet = conf.gS("main", "testset");
@@ -145,7 +145,7 @@ int main (int argc, char* argv[])
   
   NICE::Matrix confusion ( i_noClassesTest, i_noClassesTrain, 0.0 );
   
-  for (std::vector< NICE::SparseVector *>::const_iterator itTestExamples = examplesTest.begin(); itTestExamples != examplesTest.end(); itTestExamples++, idx++)
+  for (std::vector< const NICE::SparseVector *>::const_iterator itTestExamples = examplesTest.begin(); itTestExamples != examplesTest.end(); itTestExamples++, idx++)
   {
     int classno_groundtruth = labelsTest( idx );
     int classno_predicted;
@@ -171,13 +171,13 @@ int main (int argc, char* argv[])
   // ========================================================================
   
   // release memore of feature vectors from training set
-  for (std::vector< NICE::SparseVector *>::const_iterator itTrainExamples = examplesTrain.begin(); itTrainExamples != examplesTrain.end(); itTrainExamples++ )
+  for (std::vector< const NICE::SparseVector *>::const_iterator itTrainExamples = examplesTrain.begin(); itTrainExamples != examplesTrain.end(); itTrainExamples++ )
   {
     delete *itTrainExamples;
   }
   
   // release memore of feature vectors from test set
-  for (std::vector< NICE::SparseVector *>::const_iterator itTestExamples = examplesTest.begin(); itTestExamples != examplesTest.end(); itTestExamples++ )
+  for (std::vector< const NICE::SparseVector *>::const_iterator itTestExamples = examplesTest.begin(); itTestExamples != examplesTest.end(); itTestExamples++ )
   {
     delete *itTestExamples;
   }

+ 125 - 37
progs/toyExample.cpp

@@ -5,14 +5,17 @@
 * @date 19-10-2012
 */
 
+// STL includes
 #include <iostream>
 #include <vector>
 
+// NICE-core includes
 #include <core/basics/Config.h>
 #include <core/basics/Timer.h>
 #include <core/vector/MatrixT.h>
 #include <core/vector/VectorT.h>
 
+// gp-hik-core includes
 #include "gp-hik-core/GPHIKClassifier.h"
 
 using namespace std; //C basics
@@ -23,6 +26,7 @@ int main (int argc, char* argv[])
   
   Config conf ( argc, argv );
   std::string trainData = conf.gS( "main", "trainData", "progs/toyExampleSmallScaleTrain.data" );
+  bool b_debug = conf.gB( "main", "debug", false );
 
   
   //------------- read the training data --------------
@@ -31,34 +35,56 @@ int main (int argc, char* argv[])
   NICE::Vector yBinTrain;
   NICE::Vector yMultiTrain;  
 
-  std::ifstream ifsTrain ( trainData.c_str() , ios::in );
-
-  if (ifsTrain.good() )
-  {
-    ifsTrain >> dataTrain;
-    ifsTrain >> yBinTrain;
-    ifsTrain >> yMultiTrain;
-    ifsTrain.close();  
+  if ( b_debug )
+  { 
+    dataTrain.resize(6,3);
+    dataTrain.set(0);
+    dataTrain(0,0) = 0.2; dataTrain(0,1) = 0.3; dataTrain(0,2) = 0.5;
+    dataTrain(1,0) = 0.3; dataTrain(1,1) = 0.2; dataTrain(1,2) = 0.5;    
+    dataTrain(2,0) = 0.9; dataTrain(2,1) = 0.0; dataTrain(2,2) = 0.1;
+    dataTrain(3,0) = 0.8; dataTrain(3,1) = 0.1; dataTrain(3,2) = 0.1;    
+    dataTrain(4,0) = 0.1; dataTrain(4,1) = 0.1; dataTrain(4,2) = 0.8;
+    dataTrain(5,0) = 0.1; dataTrain(5,1) = 0.0; dataTrain(5,2) = 0.9;    
+    
+    yMultiTrain.resize(6);
+    yMultiTrain[0] = 1; yMultiTrain[1] = 1;
+    yMultiTrain[2] = 2; yMultiTrain[3] = 2;
+    yMultiTrain[4] = 3; yMultiTrain[5] = 3;
   }
   else 
   {
-    std::cerr << "Unable to read training data, aborting." << std::endl;
-    return -1;
+    std::ifstream ifsTrain ( trainData.c_str() , ios::in );
+
+    if (ifsTrain.good() )
+    {
+      ifsTrain >> dataTrain;
+      ifsTrain >> yBinTrain;
+      ifsTrain >> yMultiTrain;
+      ifsTrain.close();  
+    }
+    else 
+    {
+      std::cerr << "Unable to read training data, aborting." << std::endl;
+      return -1;
+    }
   }
   
   //----------------- convert data to sparse data structures ---------
-  std::vector< NICE::SparseVector *> examplesTrain;
+  std::vector< const NICE::SparseVector *> examplesTrain;
   examplesTrain.resize( dataTrain.rows() );
   
-  std::vector< NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
   for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
   {
     *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
   }
   
+  std::cerr << "Number of training examples: " << examplesTrain.size() << std::endl;
+  
   //----------------- train our classifier -------------
-  conf.sB("GPHIKClassifier", "verbose", false);
+//   conf.sB("GPHIKClassifier", "verbose", false);
   GPHIKClassifier * classifier  = new GPHIKClassifier ( &conf );  
+    
   classifier->train ( examplesTrain , yMultiTrain );
   
   // ------------------------------------------
@@ -68,54 +94,116 @@ int main (int argc, char* argv[])
   
   //------------- read the test data --------------
   
+  
   NICE::Matrix dataTest;
   NICE::Vector yBinTest;
-  NICE::Vector yMultiTest;  
-  
-  std::string testData = conf.gS( "main", "testData", "progs/toyExampleTest.data" );  
-  std::ifstream ifsTest ( testData.c_str(), ios::in );
-  if (ifsTest.good() )
-  {
-    ifsTest >> dataTest;
-    ifsTest >> yBinTest;
-    ifsTest >> yMultiTest;
-    ifsTest.close();  
+  NICE::Vector yMultiTest; 
+    
+  if ( b_debug )
+  { 
+    dataTest.resize(1,3);
+    dataTest.set(0);
+    dataTest(0,0) = 0.3; dataTest(0,1) = 0.4; dataTest(0,2) = 0.3;
+    
+    yMultiTest.resize(1);
+    yMultiTest[0] = 1;
   }
   else 
-  {
-    std::cerr << "Unable to read test data, aborting." << std::endl;
-    return -1;
+  {  
+    std::string testData = conf.gS( "main", "testData", "progs/toyExampleTest.data" );  
+    std::ifstream ifsTest ( testData.c_str(), ios::in );
+    if (ifsTest.good() )
+    {
+      ifsTest >> dataTest;
+      ifsTest >> yBinTest;
+      ifsTest >> yMultiTest;
+      ifsTest.close();  
+    }
+    else 
+    {
+      std::cerr << "Unable to read test data, aborting." << std::endl;
+      return -1;
+    }
   }
   
-  //TODO adapt this to the actual number of classes
-  NICE::Matrix confusionMatrix(3, 3, 0.0);
+  // ------------------------------------------
+  // ------------- PREPARATION --------------
+  // ------------------------------------------   
+  
+  // determine classes known during training and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  
+  int noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map<int,int> mapClNoToIdxTrain;
+  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  
+  // determine classes known during testing and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTest;
+  classesKnownTest.clear();
+  
+
+  // determine which classes we have in our label vector
+  // -> MATLAB: myClasses = unique(y);
+  for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+  {
+    if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+    {
+      classesKnownTest.insert ( *it );
+    }
+  }          
+  
+  int noClassesKnownTest ( classesKnownTest.size() );  
+  std::map<int,int> mapClNoToIdxTest;
+  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
+  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+          
+  
+  NICE::Matrix confusionMatrix( noClassesKnownTraining, noClassesKnownTest, 0.0);
   
   NICE::Timer t;
   double testTime (0.0);
   
-  for (int i = 0; i < (int)dataTest.rows(); i++)
+  double uncertainty;
+  
+  int i_loopEnd  ( (int)dataTest.rows() );
+  
+  
+  for (int i = 0; i < i_loopEnd ; i++)
   {
-    //----------------- convert data to sparse data structures ---------
-    NICE::SparseVector * example =  new NICE::SparseVector( dataTest.getRow(i) );
-       
-    int result;
+    NICE::Vector example ( dataTest.getRow(i) );
     NICE::SparseVector scores;
-   
+    int result;
+    
     // and classify
     t.start();
-    classifier->classify( example, result, scores );
+    classifier->classify( &example, result, scores );
     t.stop();
     testTime += t.getLast();
     
-    confusionMatrix(result, yMultiTest[i]) += 1.0;
+    std::cerr << " scores.size(): " << scores.size() << std::endl;
+    scores.store(std::cerr);
+    
+    if ( b_debug )
+    {    
+      classifier->predictUncertainty( &example, uncertainty );
+      std::cerr << " uncertainty: " << uncertainty << std::endl;
+    }
+    
+    confusionMatrix( mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
   }
   
+
   std::cerr << "Time for testing: " << testTime << std::endl;
   
   confusionMatrix.normalizeColumnsL1();
   std::cerr << confusionMatrix << std::endl;
 
-  std::cerr << "average recognition rate: " << confusionMatrix.trace()/confusionMatrix.rows() << std::endl;
+  std::cerr << "average recognition rate: " << confusionMatrix.trace()/confusionMatrix.cols() << std::endl;
   
   
   return 0;

+ 204 - 0
progs/toyExampleStoreRestore.cpp

@@ -0,0 +1,204 @@
+/** 
+* @file toyExampleStoreRestore.cpp
+* @brief 
+* @author Alexander Freytag
+* @date 21-12-2013
+*/
+
+// STL includes
+#include <iostream>
+#include <vector>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKClassifier.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+int main (int argc, char* argv[])
+{  
+  
+  NICE::Config conf ( argc, argv );
+  std::string trainData = conf.gS( "main", "trainData", "progs/toyExampleSmallScaleTrain.data" );
+  NICE::GPHIKClassifier * classifier;  
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yBinTrain;
+  NICE::Vector yMultiTrain; 
+
+  std::ifstream ifsTrain ( trainData.c_str() , ios::in );
+
+  if (ifsTrain.good() )
+  {
+    ifsTrain >> dataTrain;
+    ifsTrain >> yBinTrain;
+    ifsTrain >> yMultiTrain;
+    ifsTrain.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read training data, aborting." << std::endl;
+    return -1;
+  } 
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows() );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }  
+  
+  // TRAIN CLASSIFIER FROM SCRATCH
+  
+  classifier = new GPHIKClassifier ( &conf );  
+    
+  classifier->train ( examplesTrain , yMultiTrain );
+  
+  
+  // TEST STORING ABILITIES
+  
+  std::string s_destination_save ( "/home/alex/code/nice/gp-hik-core/progs/myClassifier.txt" );
+  
+  std::filebuf fbOut;
+  fbOut.open ( s_destination_save.c_str(), ios::out );
+  std::ostream os (&fbOut);
+  //
+  classifier->store( os );
+  //   
+  fbOut.close(); 
+  
+  
+  // TEST RESTORING ABILITIES
+    
+  NICE::GPHIKClassifier * classifierRestored = new GPHIKClassifier;  
+      
+  std::string s_destination_load ( "/home/alex/code/nice/gp-hik-core/progs/myClassifier.txt" );
+  
+  std::filebuf fbIn;
+  fbIn.open ( s_destination_load.c_str(), ios::in );
+  std::istream is (&fbIn);
+  //
+  classifierRestored->restore( is );
+  //   
+  fbIn.close();   
+  
+  // TEST both classifiers to produce equal results
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yBinTest;
+  NICE::Vector yMultiTest; 
+
+  std::string testData = conf.gS( "main", "testData", "progs/toyExampleTest.data" );  
+  std::ifstream ifsTest ( testData.c_str(), ios::in );
+  if (ifsTest.good() )
+  {
+    ifsTest >> dataTest;
+    ifsTest >> yBinTest;
+    ifsTest >> yMultiTest;
+    ifsTest.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read test data, aborting." << std::endl;
+    return -1;
+  }
+  
+  // ------------------------------------------
+  // ------------- PREPARATION --------------
+  // ------------------------------------------   
+  
+  // determine classes known during training and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  
+  int noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map<int,int> mapClNoToIdxTrain;
+  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  
+  // determine classes known during testing and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTest;
+  classesKnownTest.clear();
+  
+
+  // determine which classes we have in our label vector
+  // -> MATLAB: myClasses = unique(y);
+  for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+  {
+    if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+    {
+      classesKnownTest.insert ( *it );
+    }
+  }          
+  
+  int noClassesKnownTest ( classesKnownTest.size() );  
+  std::map<int,int> mapClNoToIdxTest;
+  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
+  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+          
+  
+  NICE::Matrix confusionMatrix         ( noClassesKnownTraining, noClassesKnownTest, 0.0);
+  NICE::Matrix confusionMatrixRestored ( noClassesKnownTraining, noClassesKnownTest, 0.0);
+  
+  NICE::Timer t;
+  double testTime (0.0);
+  
+  double uncertainty;
+  
+  int i_loopEnd  ( (int)dataTest.rows() );
+  
+  
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example ( dataTest.getRow(i) );
+    NICE::SparseVector scores;
+    int result;
+    
+    // classify with trained classifier 
+    t.start();
+    classifier->classify( &example, result, scores );
+    t.stop();
+    testTime += t.getLast();
+     
+    
+    confusionMatrix( mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+
+    // classify with restored classifier 
+    t.start();
+    classifierRestored->classify( &example, result, scores );
+    t.stop();
+    testTime += t.getLast();  
+    
+    confusionMatrixRestored( mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+    
+    
+  }  
+  
+  confusionMatrix.normalizeColumnsL1();
+  std::cerr << confusionMatrix << std::endl;
+
+  std::cerr << "average recognition rate: " << confusionMatrix.trace()/confusionMatrix.cols() << std::endl;
+
+  confusionMatrixRestored.normalizeColumnsL1();
+  std::cerr << confusionMatrixRestored << std::endl;
+
+  std::cerr << "average recognition rate of restored classifier: " << confusionMatrixRestored.trace()/confusionMatrixRestored.cols() << std::endl;
+  
+  
+  return 0;
+}

+ 3 - 0
testfiles.cmake

@@ -3,10 +3,13 @@ SET(nice_gp-hik-core_TESTFILES_SRC
 ./tests/TestVectorSorter.cpp
 ./tests/TestFeatureMatrixT.cpp
 ./tests/TestFastHIK.cpp
+./tests/TestGPHIKOnlineLearnable.cpp
 )
 
 SET(nice_gp-hik-core_TESTFILES_HDR
 ./tests/TestFastHIK.h
 ./tests/TestVectorSorter.h
 ./tests/TestFeatureMatrixT.h
+./tests/TestGPHIKOnlineLearnable.h
+./tests/TestGPHIKPersistent.h
 )

+ 32 - 486
tests/TestFastHIK.cpp

@@ -15,28 +15,6 @@
 
 #include "TestFastHIK.h"
 
-
-const bool verbose = false;
-const bool verboseStartEnd = true;
-const bool solveLinWithoutRand = false;
-const uint n = 20;//1500;//1500;//10;
-const uint d = 5;//200;//2;
-const uint numBins = 11;//1001;//1001;
-const uint solveLinMaxIterations = 1000;
-const double sparse_prob = 0.6;
-const bool smallTest = false;
-
-using namespace NICE;
-using namespace std;
-
-CPPUNIT_TEST_SUITE_REGISTRATION( TestFastHIK );
-
-void TestFastHIK::setUp() {
-}
-
-void TestFastHIK::tearDown() {
-}
-
 bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
 {
   bool result(true);
@@ -54,7 +32,6 @@ bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const doub
       break;
     } 
     
-//     std::cerr << "itA->size(): " << itA->size() << "itB->size(): " << itB->size() << std::endl;
     for(uint i = 0; (i < itA->size()) && (i < itB->size()); i++)
     {
       if (fabs((*itA)[i] - (*itB)[i]) > tolerance)
@@ -68,7 +45,6 @@ bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const doub
           break;        
     itA++;
     itB++;
-//     std::cerr << "foo" << std::endl;
   }
   
   return result;
@@ -91,6 +67,27 @@ bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const
   return result;
 }
 
+const bool verbose = false;
+const bool verboseStartEnd = true;
+const bool solveLinWithoutRand = false;
+const uint n = 30;//1500;//1500;//10;
+const uint d = 5;//200;//2;
+const uint numBins = 11;//1001;//1001;
+const uint solveLinMaxIterations = 1000;
+const double sparse_prob = 0.6;
+const bool smallTest = false;
+
+using namespace NICE;
+using namespace std;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestFastHIK );
+
+void TestFastHIK::setUp() {
+}
+
+void TestFastHIK::tearDown() {
+}
+
 void TestFastHIK::testKernelMultiplication() 
 {
   if (verboseStartEnd)
@@ -639,13 +636,13 @@ void TestFastHIK::testLinSolve()
   if (verboseStartEnd)
     std::cerr << "================== TestFastHIK::testLinSolve ===================== " << std::endl;
 
-  Quantization q ( numBins );
+  NICE::Quantization q ( numBins );
 
   // data is generated, such that there is no approximation error
-  vector< vector<double> > dataMatrix;
+  std::vector< std::vector<double> > dataMatrix;
   for ( uint i = 0; i < d ; i++ )
   {
-    vector<double> v;
+    std::vector<double> v;
     v.resize(n);
     for ( uint k = 0; k < n; k++ ) {
       if ( drand48() < sparse_prob ) {
@@ -659,28 +656,27 @@ void TestFastHIK::testLinSolve()
   }
   
   if ( verbose ) {
-    cerr << "data matrix: " << endl;
+    std::cerr << "data matrix: " << std::endl;
     printMatrix ( dataMatrix );
-    cerr << endl;
+    std::cerr << std::endl;
   }
 
   double noise = 1.0;
-  FastMinKernel fmk ( dataMatrix, noise );
+  NICE::FastMinKernel fmk ( dataMatrix, noise );
   
-  ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
+  NICE::ParameterizedFunction *pf = new NICE::PFAbsExp ( 1.0 );
   fmk.applyFunctionToFeatureMatrix( pf );
-//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
 
-  Vector y ( n );  
+  NICE::Vector y ( n );  
   for ( uint i = 0; i < y.size(); i++ )
     y[i] = sin(i);
   
-  Vector alpha;
-  Vector alphaRandomized;
+  NICE::Vector alpha;
+  NICE::Vector alphaRandomized;
 
   std::cerr << "solveLin with randomization" << std::endl;
   // tic
-  Timer t;
+  NICE::Timer t;
   t.start();
   //let's try to do 10.000 iterations and sample in each iteration 30 examples randomly
   fmk.solveLin(y,alphaRandomized,q,pf,true,solveLinMaxIterations,30);
@@ -789,454 +785,4 @@ void TestFastHIK::testKernelVector()
   
 }
 
-void TestFastHIK::testAddExample()
-{
-  if (verboseStartEnd)
-    std::cerr << "================== TestFastHIK::testAddExample ===================== " << std::endl;  
-  
-  std::vector< std::vector<double> > dataMatrix;
-  int dim = 3;
-  int number = 5;
-  
-  if (!smallTest)
-  {
-    dim = d;
-    number = n;
-  }
-  
-  if (smallTest)
-  {
-    dataMatrix.resize(3);
-    //we explicitely give some values which can easily be verified
-    dataMatrix[0].push_back(0.2);dataMatrix[0].push_back(0.1);dataMatrix[0].push_back(0.0);dataMatrix[0].push_back(0.0);dataMatrix[0].push_back(0.4); 
-    dataMatrix[1].push_back(0.3);dataMatrix[1].push_back(0.6);dataMatrix[1].push_back(1.0);dataMatrix[1].push_back(0.4);dataMatrix[1].push_back(0.3);
-    dataMatrix[2].push_back(0.5);dataMatrix[2].push_back(0.3);dataMatrix[2].push_back(0.0);dataMatrix[2].push_back(0.6);dataMatrix[2].push_back(0.3);
-  }
-  else
-  {
-    // randomly generate features
-    generateRandomFeatures ( dim, number, dataMatrix );
-
-    // and make them sparse
-    int nrZeros(0);
-    for ( int i = 0 ; i < dim; i++ )
-    {
-      for ( int k = 0; k < number; k++ )
-        if ( drand48() < sparse_prob ) 
-        {
-          dataMatrix[i][k] = 0.0;
-          nrZeros++;
-        }
-    }    
-  }
-  
-  if ( verbose ) {
-    std::cerr << "data matrix: " << std::endl;
-    printMatrix ( dataMatrix );
-    std::cerr << endl;
-  }
-  
-  double noise = 1.0;
-  //check the features stored in the fmk
-  FastMinKernel fmk ( dataMatrix, noise );  
-  NICE::Vector alpha;
-  
-  ParameterizedFunction *pf = new PFAbsExp( 1.2 ); //1.0 is okay
-  fmk.applyFunctionToFeatureMatrix( pf );
-//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );  
-  
-  std::cerr << "generate alpha" << std::endl;
-  
-  if (smallTest)
-  {
-    //we explicitely give some values which can easily be verified
-    alpha = Vector(5,1.0);
-    alpha[0] = 0.1;alpha[1] = 0.2;alpha[2] = 0.4;alpha[3] = 0.8;alpha[4] = 1.6;
-  }
-  else
-  {  // randomly generate features
-     alpha = Vector::UniformRandom( number, 0.0, 1.0, 0 );
-  }
-  
-  
-  std::cerr << "generate xStar" << std::endl;
-  std::vector<double> xStar;
-  if (smallTest)
-  {
-    // we check the following cases: largest elem in dim, smallest elem in dim, zero element
-    // remember to adapt the feature in some lines apart as well    
-    xStar.push_back(0.9);xStar.push_back(0.0);xStar.push_back(0.1);
-  }
-  else
-  {
-    // again: random sampling
-    for ( int i = 0 ; i < dim; i++ )
-    {
-      if ( drand48() < sparse_prob ) 
-        xStar.push_back(0.0);
-      else
-        xStar.push_back(drand48());
-    }
-  }
-  NICE::Vector xStarVec (xStar);
-  NICE::SparseVector xStarSV (xStarVec);
-  
-  // check the alpha-preparations
-  NICE::VVector A;
-  NICE::VVector B;
-  fmk.hik_prepare_alpha_multiplications( alpha, A, B );
-  
-  //check the quantization and LUT construction
-  Quantization q ( numBins );  
-  //direct
-//   double * LUT = fmk.hikPrepareLookupTable(alpha, q);
-  //indirect
-  double * LUT = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, pf );
-  
-  //check for kernel vector norm approximation
-  NICE::VVector AForKVN;
-  fmk.hikPrepareKVNApproximation(AForKVN);
-  
-  //check the LUTs for fast kernel vector norm approximation
-  //direct
-  double* LUT_kernelVectorNormDirect = fmk.hikPrepareLookupTableForKVNApproximation(q, pf );
-  //indirect
-  double* LUT_kernelVectorNorm = fmk.hikPrepareKVNApproximationFast( AForKVN, q, pf );
-  
-  bool LUTKVN_equal( compareLUTs( LUT_kernelVectorNorm, LUT_kernelVectorNormDirect, q.size()*dim ) );
-  
-  if (verbose)
-  {
-    if (LUTKVN_equal == false)
-    {
-      std::cerr << "LUTKVN is not equal :( " << std::endl;
-        std::cerr << "LUT_kernelVectorNorm: " << std::endl;
-        for ( uint i = 0; i < q.size()*dim; i++ )
-        {
-          if ( (i % q.size()) == 0)
-            std::cerr << std::endl;
-          std::cerr << LUT_kernelVectorNorm[i] << " ";
-        }
-        std::cerr << "LUT_kernelVectorNormDirect: "<< std::endl;
-        for ( uint i = 0; i < q.size()*dim; i++ )
-        {
-          if ( (i % q.size()) == 0)
-            std::cerr << std::endl;
-          std::cerr << LUT_kernelVectorNormDirect[i] << " ";
-        }      
-    }
-  }
-  CPPUNIT_ASSERT( LUTKVN_equal == true );
-  
-  if (verbose)
-    std::cerr << "start the incremental learning part" << std::endl;
-
-  // ------  Incremental Learning -----
-  
-  double newAlpha;
-  if (smallTest) 
-    newAlpha = 3.2;
-  else
-    newAlpha = drand48();
-  alpha.append(newAlpha);
-   
-  // add an example
-  if (verbose)
-    std::cerr << "addExample" << std::endl;  
-  fmk.addExample( xStarSV, pf );  
-  
-  // update the alpha preparation
-  if (verbose)  
-    std::cerr << "update Alpha Preparation" << std::endl;
-  fmk.updatePreparationForAlphaMultiplications( xStarSV, newAlpha, A, B, pf );
-  
-  // update the LUT for fast multiplications
-  if (verbose)  
-    std::cerr << "update LUT" << std::endl;
-  fmk.updateLookupTableForAlphaMultiplications( xStarSV, newAlpha, LUT, q, pf );
-  
-  //update VVector for Kernel vector norm
-  if (verbose)  
-    std::cerr << "update VVector for Kernel vector norm" << std::endl;
-  fmk.updatePreparationForKVNApproximation( xStarSV, AForKVN, pf );
-  
-  // update LUT for kernel vector norm
-  if (verbose)  
-    std::cerr << "update LUT for kernel vector norm" << std::endl;
-  fmk.updateLookupTableForKVNApproximation( xStarSV, LUT_kernelVectorNorm, q, pf );
-  
-  //and batch retraining  
-  if (verbose)  
-    std::cerr << "perform batch retraining " << std::endl;  
-  for ( int i = 0 ; i < dim; i++ )
-    dataMatrix[i].push_back(xStar[i]);
-  
-  FastMinKernel fmk2 ( dataMatrix, noise );
-  fmk2.applyFunctionToFeatureMatrix( pf );
-  
-  NICE::VVector A2;
-  NICE::VVector B2;
-  if (verbose)  
-    std::cerr << "prepare alpha multiplications" << std::endl;
-  fmk2.hik_prepare_alpha_multiplications( alpha, A2, B2 );
- 
-  // compare the content of the data matrix
-  if (verbose)  
-    std::cerr << "do the comparison of the resulting feature matrices" << std::endl;
-  if (verbose)
-  {
-    std::cerr << "fmk.featureMatrix().print()" << std::endl;
-    fmk.featureMatrix().print(std::cerr);
-  
-    std::cerr << "fmk2.featureMatrix().print()" << std::endl;
-    fmk2.featureMatrix().print(std::cerr);
-  }  
-  
-  CPPUNIT_ASSERT(fmk.featureMatrix() == fmk2.featureMatrix());
-
-  //compare the preparation for alpha multiplications
-  if (verbose)  
-    std::cerr << "do the comparison of the resulting matrices A and B" << std::endl;
-  CPPUNIT_ASSERT(compareVVector(A, A2));  
-  CPPUNIT_ASSERT(compareVVector(B, B2));
-  
-  if (verbose)
-  {
-    std::cerr << "compare the preparation for alpha multiplications" << std::endl;
-    std::cerr << "A: " << std::endl;
-    A.store(std::cerr);
-    std::cerr << "A2: " << std::endl;
-    A2.store(std::cerr);
-    std::cerr << "B: " << std::endl;
-    B.store(std::cerr);
-    std::cerr << "B2: " << std::endl;
-    B2.store(std::cerr);
-  }  
-  
-  // compare the resulting LUTs
-  if (verbose)
-    std::cerr << "prepare LUT" << std::endl;
-  double * LUT2 = fmk2.hikPrepareLookupTable( alpha, q, pf );    
-  if (verbose)
-    std::cerr << "do the comparison of the resulting LUTs" << std::endl;  
-  bool LUTequal( compareLUTs( LUT, LUT2, q.size()*dim) );
-  
-  if (verbose)
-  {
-    if ( LUTequal )
-      std::cerr << "LUTs are equal :) " << std::endl;
-    else
-    {
-      std::cerr << "LUTs are not equal :( " << std::endl;
-      std::cerr << "new feature vector: " << xStarVec << std::endl;
-      
-      std::cerr << "newAlpha: " << newAlpha <<  " alpha " << alpha << std::endl;
-      std::cerr << "LUT: " << std::endl;
-      for ( uint i = 0; i < q.size()*dim; i++ )
-      {
-        if ( (i % q.size()) == 0)
-          std::cerr << std::endl;
-        std::cerr << LUT[i] << " ";
-      }
-      std::cerr << "LUT2: "<< std::endl;
-      for ( uint i = 0; i < q.size()*dim; i++ )
-      {
-        if ( (i % q.size()) == 0)
-          std::cerr << std::endl;
-        std::cerr << LUT2[i] << " ";
-      }     
-    }
-  }
-  CPPUNIT_ASSERT( LUTequal );
-  
-  //check for kernel vector norm approximation
-  NICE::VVector A2ForKVN;
-  fmk2.hikPrepareKVNApproximation( A2ForKVN );
-  bool KVN_equal ( compareVVector(AForKVN, A2ForKVN) );
- 
-  if (verbose)
-  {
-    if ( KVN_equal )
-      std::cerr << "VVectors for kernel vector norm are equal :) " << std::endl;
-    else
-    {
-      std::cerr << "VVectors for vector norm are not equal :( " << std::endl;
-      std::cerr << "new feature vector: " << xStarVec << std::endl;
-      
-      std::cerr << "AForKVN: " << std::endl;
-      AForKVN.store(std::cerr);
-      
-      std::cerr << "A2ForKVN: "<< std::endl;
-      A2ForKVN.store(std::cerr);
-    }
-  }  
-  
-  CPPUNIT_ASSERT( KVN_equal );  
-  
-  //check for kernel vector norm approximation with LUTs
-  if (verbose)
-    std::cerr << "prepare LUT for kernel vector norm" << std::endl;
-  double* LUT2_kernelVectorNorm = fmk2.hikPrepareLookupTableForKVNApproximation( q, pf );  
-  if (verbose)
-    std::cerr << "do the comparison of the resulting LUTs for kernel vector norm computation" << std::endl;
-  bool LUT_KVN_equal( compareLUTs ( LUT_kernelVectorNorm, LUT2_kernelVectorNorm, q.size()*dim ) );
-  
-  if (verbose)
-  {
-    if ( LUT_KVN_equal )
-      std::cerr << "LUTs for kernel vector norm are equal :) " << std::endl;
-    else
-    {
-      std::cerr << "LUTs kernel vector norm are not equal :( " << std::endl;
-      std::cerr << "new feature vector: " << xStarVec << std::endl;
-      
-      std::cerr << "LUT_kernelVectorNorm: " << std::endl;
-      for ( int i = 0; i < q.size()*dim; i++ )
-      {
-        if ( (i % q.size()) == 0)
-          std::cerr << std::endl;
-        std::cerr << LUT_kernelVectorNorm[i] << " ";
-      }
-      std::cerr << std::endl << "LUT2_kernelVectorNorm: "<< std::endl;
-      for ( uint i = 0; i < q.size()*dim; i++ )
-      {
-        if ( (i % q.size()) == 0)
-          std::cerr << std::endl;
-        std::cerr << LUT2_kernelVectorNorm[i] << " ";
-      }     
-    }
-  }  
-  
-  CPPUNIT_ASSERT( LUT_KVN_equal );
-  
-  delete [] LUT;
-  delete [] LUT2;
-  
-  delete [] LUT_kernelVectorNorm;
-  delete [] LUT2_kernelVectorNorm;
-  
-  if (verboseStartEnd)
-    std::cerr << "================== TestFastHIK::testAddExample done ===================== " << std::endl;  
-}
-
-void TestFastHIK::testAddMultipleExamples()
-{
-  if (verboseStartEnd)
-    std::cerr << "================== TestFastHIK::testAddMultipleExamples ===================== " << std::endl;  
-  
-  std::vector< std::vector<double> > dataMatrix;
-  int dim = d;
-  int number = n;
-
-  // randomly generate features
-  generateRandomFeatures ( dim, number, dataMatrix );
-
-  // and make them sparse
-  int nrZeros(0);
-  for ( int i = 0 ; i < dim; i++ )
-  {
-    for ( int k = 0; k < number; k++ )
-      if ( drand48() < sparse_prob ) 
-      {
-        dataMatrix[i][k] = 0.0;
-        nrZeros++;
-      }
-  }    
-  
-  if ( verbose ) {
-    std::cerr << "data matrix: " << std::endl;
-    printMatrix ( dataMatrix );
-    std::cerr << endl;
-  }
-  
-  double noise = 1.0;
-  //check the features stored in the fmk
-  FastMinKernel fmk ( dataMatrix, noise );  
-  NICE::Vector alpha;
-  
-  ParameterizedFunction *pf = new PFAbsExp( 1.0 ); //1.0 is okay
-  fmk.applyFunctionToFeatureMatrix( pf );
-  
-  std::cerr << "generate alpha" << std::endl;  
-  
-  // randomly generate features
-  alpha = Vector::UniformRandom( number, 0.0, 1.0, 0 );
-   
-/*  // check the alpha-preparations
-  NICE::VVector A;
-  NICE::VVector B;
-  fmk.hik_prepare_alpha_multiplications( alpha, A, B );*/  
-  
-  if (verbose)
-    std::cerr << "start the incremental learning part" << std::endl;
-
-  // ------  Incremental Learning -----
-    
-  std::cerr << "generate xStar" << std::endl;
-  std::vector<NICE::SparseVector > newExamples;
-  int nrOfNewExamples(5);
-  // again: random sampling
-  for (int i = 0; i < nrOfNewExamples; i++)
-  {
-    NICE::Vector xStar(dim);
-    for ( int j = 0 ; j < dim; j++ )
-    {
-      if ( drand48() < sparse_prob ) 
-      {
-        xStar[j] = 0.0;
-        dataMatrix[j].push_back(0.0);
-      }
-      else
-      {
-        double tmp(drand48());
-        xStar[j] = tmp;
-        dataMatrix[j].push_back(tmp);
-      }
-    }
-    
-    NICE::SparseVector xStarSV (xStar);
-    newExamples.push_back(xStarSV);
-  }    
-
-  // add an example
-  if (verbose)
-    std::cerr << "addExample" << std::endl;  
-  for (int i = 0; i < nrOfNewExamples; i++)
-  {
-    fmk.addExample( newExamples[i], pf );  
-  }
-  
-  int oldSize(alpha.size());
-  alpha.resize( oldSize + nrOfNewExamples);
-  for (int i = 0; i < nrOfNewExamples; i++)
-  {
-    alpha[oldSize + i] = drand48();
-  }
-   
-  
-  // update the alpha preparation
-  if (verbose)  
-    std::cerr << "update Alpha Preparation" << std::endl;
-  // check the alpha-preparations
-  NICE::VVector A;
-  NICE::VVector B;
-  fmk.hik_prepare_alpha_multiplications( alpha, A, B );   
-  
-  FastMinKernel fmk2 ( dataMatrix, noise );
-  fmk2.applyFunctionToFeatureMatrix( pf );  
-  
-  NICE::VVector A2;
-  NICE::VVector B2;
-  fmk2.hik_prepare_alpha_multiplications( alpha, A2, B2 );
-  
-  bool equalA = compareVVector( A, A2 );
-  bool equalB = compareVVector( B, B2 );
-  
-  CPPUNIT_ASSERT(equalA == true);
-  CPPUNIT_ASSERT(equalB == true);  
-  
-  if (verboseStartEnd)
-    std::cerr << "================== TestFastHIK::testAddMultipleExamples done ===================== " << std::endl;  
-}
-
 #endif

+ 0 - 4
tests/TestFastHIK.h

@@ -19,8 +19,6 @@ class TestFastHIK : public CppUnit::TestFixture {
     CPPUNIT_TEST(testLUTUpdate);
     CPPUNIT_TEST(testLinSolve);
     CPPUNIT_TEST(testKernelVector);
-    CPPUNIT_TEST(testAddExample);
-    CPPUNIT_TEST(testAddMultipleExamples);
     
     CPPUNIT_TEST_SUITE_END();
   
@@ -40,8 +38,6 @@ class TestFastHIK : public CppUnit::TestFixture {
     void testLUTUpdate();
     void testLinSolve();
     void testKernelVector();
-    void testAddExample();
-    void testAddMultipleExamples();
 
 };
 

+ 1 - 1
tests/TestFeatureMatrixT.cpp

@@ -182,7 +182,7 @@ void TestFeatureMatrixT::testMatlabIO()
 #endif //#ifdef NICE_USELIB_MATIO
 
   if (verboseStartEnd)
-    std::cerr << "================== TestFeatureMatrixT::testMatlabIO done===================== " << std::endl;
+    std::cerr << "================== TestFeatureMatrixT::testMatlabIO done ===================== " << std::endl;
   
 }
 

+ 382 - 0
tests/TestGPHIKOnlineLearnable.cpp

@@ -0,0 +1,382 @@
+/** 
+ * @file TestGPHIKOnlineLearnable.cpp
+ * @brief CppUnit-Testcase to verify that GPHIKClassifier methods herited from Persistent (store and restore) work as desired.
+ * @author Alexander Freytag
+ * @date 21-12-2013
+*/
+
+#ifdef NICE_USELIB_CPPUNIT
+
+// STL includes
+#include <iostream>
+#include <vector>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKClassifier.h"
+
+#include "TestGPHIKOnlineLearnable.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+const bool verboseStartEnd = true;
+
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKOnlineLearnable );
+
+void TestGPHIKOnlineLearnable::setUp() {
+}
+
+void TestGPHIKOnlineLearnable::tearDown() {
+}
+
+
+
+void readData ( const std::string filename, NICE::Matrix & data, NICE::Vector & yBin, NICE::Vector & yMulti )
+{
+ std::ifstream ifs ( filename.c_str() , ios::in );
+
+  if ( ifs.good() )
+  {
+    ifs >> data;
+    ifs >> yBin;
+    ifs >> yMulti;
+    ifs.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read data from file " << filename << " -- aborting." << std::endl;
+    CPPUNIT_ASSERT ( ifs.good() );
+  }    
+}
+
+void prepareLabelMappings (std::map<int,int> mapClNoToIdxTrain, const GPHIKClassifier * classifier, std::map<int,int> mapClNoToIdxTest, const NICE::Vector & yMultiTest)
+{
+  // determine classes known during training and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  
+  int noClassesKnownTraining ( classesKnownTraining.size() );
+  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  
+  // determine classes known during testing and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTest;
+  classesKnownTest.clear();
+  
+
+  // determine which classes we have in our label vector
+  // -> MATLAB: myClasses = unique(y);
+  for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+  {
+    if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+    {
+      classesKnownTest.insert ( *it );
+    }
+  }          
+  
+  int noClassesKnownTest ( classesKnownTest.size() );  
+  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
+  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  );   
+}
+
+void evaluateClassifier ( NICE::Matrix & confusionMatrix, 
+                          const NICE::GPHIKClassifier * classifier, 
+                          const NICE::Matrix & data,
+                          const NICE::Vector & yMulti,
+                          const std::map<int,int> & mapClNoToIdxTrain,
+                          const std::map<int,int> & mapClNoToIdxTest
+                        ) 
+{
+  int i_loopEnd  ( (int)data.rows() );  
+  
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example ( data.getRow(i) );
+    NICE::SparseVector scores;
+    int result;    
+    
+    // classify with incrementally trained classifier 
+    classifier->classify( &example, result, scores );
+    
+    confusionMatrix( mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMulti[i])->second ) += 1.0;
+  }
+}
+
+void TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty ===================== " << std::endl;  
+  
+  NICE::Config conf;
+  
+  conf.sB ( "GPHIKClassifier", "eig_verbose", false);
+  conf.sS ( "GPHIKClassifier", "optimization_method", "downhillsimplex");
+  
+  std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yBinTrain;
+  NICE::Vector yMultiTrain; 
+  
+  readData ( s_trainData, dataTrain, yBinTrain, yMultiTrain );
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows() );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }
+  
+  //create classifier object
+  NICE::GPHIKClassifier * classifier;
+  classifier = new NICE::GPHIKClassifier ( &conf );  
+  bool performOptimizationAfterIncrement ( false );
+
+  // add training samples, but without running training method first
+  classifier->addMultipleExamples ( examplesTrain,yMultiTrain, performOptimizationAfterIncrement );  
+  
+  // create second object trained in the standard way
+  NICE::GPHIKClassifier * classifierScratch = new NICE::GPHIKClassifier ( &conf );
+  classifierScratch->train ( examplesTrain, yMultiTrain );
+  
+    
+  // TEST both classifiers to produce equal results
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yBinTest;
+  NICE::Vector yMultiTest; 
+  
+  std::string s_testData = conf.gS( "main", "testData", "toyExampleTest.data" );  
+  
+  readData ( s_testData, dataTest, yBinTest, yMultiTest );
+
+    
+  // ------------------------------------------
+  // ------------- PREPARATION --------------
+  // ------------------------------------------   
+  
+  // determine classes known during training/testing and corresponding mapping
+  // thereby allow for non-continous class labels  
+  std::map<int,int> mapClNoToIdxTrain;
+  std::map<int,int> mapClNoToIdxTest;
+  prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
+  
+  
+  NICE::Matrix confusionMatrix         ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
+  NICE::Matrix confusionMatrixScratch  ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
+  
+    
+  // ------------------------------------------
+  // ------------- CLASSIFICATION --------------
+  // ------------------------------------------  
+  evaluateClassifier ( confusionMatrix, classifier, dataTest, yMultiTest,
+                          mapClNoToIdxTrain,mapClNoToIdxTest ); 
+  
+  evaluateClassifier ( confusionMatrixScratch, classifierScratch, dataTest, yMultiTest,
+                          mapClNoToIdxTrain,mapClNoToIdxTest );  
+  
+    
+  // post-process confusion matrices
+  confusionMatrix.normalizeColumnsL1();
+  double arr ( confusionMatrix.trace()/confusionMatrix.cols() );
+
+  confusionMatrixScratch.normalizeColumnsL1();
+  double arrScratch ( confusionMatrixScratch.trace()/confusionMatrixScratch.cols() );
+
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( arr, arrScratch, 1e-8);
+  
+  // don't waste memory
+  
+  delete classifier;
+  delete classifierScratch;  
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  }
+  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningStartEmpty done ===================== " << std::endl;   
+}
+
+void TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary ===================== " << std::endl;  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningOCCtoBinary done ===================== " << std::endl;   
+}
+
+void TestGPHIKOnlineLearnable::testOnlineLearningBinarytoMultiClass()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningBinarytoMultiClass ===================== " << std::endl;   
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningBinarytoMultiClass done ===================== " << std::endl;   
+}
+
+void TestGPHIKOnlineLearnable::testOnlineLearningMultiClass()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningMultiClass ===================== " << std::endl;  
+  
+  NICE::Config conf;
+  
+  conf.sB ( "GPHIKClassifier", "eig_verbose", false);
+  conf.sS ( "GPHIKClassifier", "optimization_method", "downhillsimplex");
+  
+  std::string s_trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yBinTrain;
+  NICE::Vector yMultiTrain; 
+  
+  readData ( s_trainData, dataTrain, yBinTrain, yMultiTrain );
+
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows()-1 );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows()-1; i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }  
+  
+  // TRAIN INITIAL CLASSIFIER FROM SCRATCH
+  NICE::GPHIKClassifier * classifier;
+  classifier = new NICE::GPHIKClassifier ( &conf );
+
+  //use all but the first example for training and add the first one lateron
+  NICE::Vector yMultiRelevantTrain  ( yMultiTrain.getRangeRef( 0, yMultiTrain.size()-2  ) );
+  
+  classifier->train ( examplesTrain , yMultiRelevantTrain );
+  
+  
+  // RUN INCREMENTAL LEARNING
+  
+  bool performOptimizationAfterIncrement ( false );
+  
+  NICE::SparseVector * exampleToAdd = new NICE::SparseVector ( dataTrain.getRow( (int)dataTrain.rows()-1 ) );
+  classifier->addExample ( exampleToAdd, yMultiTrain[ (int)dataTrain.rows()-2 ], performOptimizationAfterIncrement );
+  
+  std::cerr << "label of example to add: " << yMultiTrain[ (int)dataTrain.rows()-1 ] << std::endl;
+  
+  // TRAIN SECOND CLASSIFIER FROM SCRATCH USING THE SAME OVERALL AMOUNT OF EXAMPLES
+  examplesTrain.push_back(  exampleToAdd );
+
+  NICE::GPHIKClassifier * classifierScratch = new NICE::GPHIKClassifier ( &conf );
+  classifierScratch->train ( examplesTrain, yMultiTrain );
+  
+  std::cerr << "trained both classifiers - now start evaluating them" << std::endl;
+  
+  
+  // TEST that both classifiers produce equal store-files
+   std::string s_destination_save_IL ( "myClassifierIL.txt" );
+  
+  std::filebuf fbOut;
+  fbOut.open ( s_destination_save_IL.c_str(), ios::out );
+  std::ostream os (&fbOut);
+  //
+  classifier->store( os );
+  //   
+  fbOut.close(); 
+  
+  std::string s_destination_save_scratch ( "myClassifierScratch.txt" );
+  
+  std::filebuf fbOutScratch;
+  fbOutScratch.open ( s_destination_save_scratch.c_str(), ios::out );
+  std::ostream osScratch (&fbOutScratch);
+  //
+  classifierScratch->store( osScratch );
+  //   
+  fbOutScratch.close(); 
+  
+  
+  // TEST both classifiers to produce equal results
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yBinTest;
+  NICE::Vector yMultiTest; 
+  
+  std::string s_testData = conf.gS( "main", "testData", "toyExampleTest.data" );  
+  
+  readData ( s_testData, dataTest, yBinTest, yMultiTest );
+
+    
+  // ------------------------------------------
+  // ------------- PREPARATION --------------
+  // ------------------------------------------   
+  
+  // determine classes known during training/testing and corresponding mapping
+  // thereby allow for non-continous class labels  
+  std::map<int,int> mapClNoToIdxTrain;
+  std::map<int,int> mapClNoToIdxTest;
+  prepareLabelMappings (mapClNoToIdxTrain, classifier, mapClNoToIdxTest, yMultiTest);
+  
+  
+  NICE::Matrix confusionMatrix         ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
+  NICE::Matrix confusionMatrixScratch    ( mapClNoToIdxTrain.size(), mapClNoToIdxTest.size(), 0.0);
+    
+  // ------------------------------------------
+  // ------------- CLASSIFICATION --------------
+  // ------------------------------------------  
+  evaluateClassifier ( confusionMatrix, classifier, dataTest, yMultiTest,
+                          mapClNoToIdxTrain,mapClNoToIdxTest ); 
+  
+  evaluateClassifier ( confusionMatrixScratch, classifierScratch, dataTest, yMultiTest,
+                          mapClNoToIdxTrain,mapClNoToIdxTest );  
+  
+    
+  // post-process confusion matrices
+  confusionMatrix.normalizeColumnsL1();
+  double arr ( confusionMatrix.trace()/confusionMatrix.cols() );
+
+  confusionMatrixScratch.normalizeColumnsL1();
+  double arrScratch ( confusionMatrixScratch.trace()/confusionMatrixScratch.cols() );
+
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( arr, arrScratch, 1e-8);
+  
+  // don't waste memory
+  
+  delete classifier;
+  delete classifierScratch;
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  } 
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKOnlineLearnable::testOnlineLearningMultiClass done ===================== " << std::endl;  
+  
+}
+
+#endif

+ 38 - 0
tests/TestGPHIKOnlineLearnable.h

@@ -0,0 +1,38 @@
+#ifndef _TESTGPHIKONLINELEARNABLE_H
+#define _TESTGPHIKONLINELEARNABLE_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <gp-hik-core/GPHIKClassifier.h>
+
+/**
+ * CppUnit-Testcase. 
+ * @brief CppUnit-Testcase to verify that GPHIKClassifierIL methods herited from OnlineLearnable (addExample and addMultipleExamples) work as desired.
+ * @author Alexander Freytag
+ * @date 03-11-2014 (dd-mm-yyyy)
+ */
+class TestGPHIKOnlineLearnable : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestGPHIKOnlineLearnable );
+      CPPUNIT_TEST(testOnlineLearningStartEmpty);
+      CPPUNIT_TEST(testOnlineLearningOCCtoBinary);
+      CPPUNIT_TEST(testOnlineLearningBinarytoMultiClass);
+      CPPUNIT_TEST(testOnlineLearningMultiClass);
+      
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    void testOnlineLearningStartEmpty();    
+    
+    void testOnlineLearningOCCtoBinary();
+    
+    void testOnlineLearningBinarytoMultiClass();
+
+    void testOnlineLearningMultiClass();
+};
+
+#endif // _TESTGPHIKONLINELEARNABLE_H

+ 225 - 0
tests/TestGPHIKPersistent.cpp

@@ -0,0 +1,225 @@
+/** 
+ * @file TestGPHIKPersistent.cpp
+ * @brief CppUnit-Testcase to verify that GPHIKClassifier methods herited from Persistent (store and restore) work as desired.
+ * @author Alexander Freytag
+ * @date 21-12-2013
+*/
+
+#ifdef NICE_USELIB_CPPUNIT
+
+// STL includes
+#include <iostream>
+#include <vector>
+
+// NICE-core includes
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+
+// gp-hik-core includes
+#include "gp-hik-core/GPHIKClassifier.h"
+
+#include "TestGPHIKPersistent.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+const bool verboseStartEnd = true;
+
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKPersistent );
+
+void TestGPHIKPersistent::setUp() {
+}
+
+void TestGPHIKPersistent::tearDown() {
+}
+void TestGPHIKPersistent::testPersistentMethods()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKPersistent::testPersistentMethods ===================== " << std::endl;  
+  
+  NICE::Config conf;
+  std::string trainData = conf.gS( "main", "trainData", "toyExampleSmallScaleTrain.data" );
+  NICE::GPHIKClassifier * classifier;  
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yBinTrain;
+  NICE::Vector yMultiTrain; 
+
+  std::ifstream ifsTrain ( trainData.c_str() , ios::in );
+
+  if ( ifsTrain.good() )
+  {
+    ifsTrain >> dataTrain;
+    ifsTrain >> yBinTrain;
+    ifsTrain >> yMultiTrain;
+    ifsTrain.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read training data from file " << trainData << " -- aborting." << std::endl;
+    CPPUNIT_ASSERT ( ifsTrain.good() );
+  } 
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< const NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows() );
+  
+  std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }  
+  
+  // TRAIN CLASSIFIER FROM SCRATCH
+  
+  classifier = new GPHIKClassifier ( &conf );  
+    
+  classifier->train ( examplesTrain , yMultiTrain );
+  
+  
+  // TEST STORING ABILITIES
+  
+  std::string s_destination_save ( "myClassifier.txt" );
+  
+  std::filebuf fbOut;
+  fbOut.open ( s_destination_save.c_str(), ios::out );
+  std::ostream os (&fbOut);
+  //
+  classifier->store( os );
+  //   
+  fbOut.close(); 
+  
+  
+  // TEST RESTORING ABILITIES
+    
+  NICE::GPHIKClassifier * classifierRestored = new GPHIKClassifier;  
+      
+  std::string s_destination_load ( "myClassifier.txt" );
+  
+  std::filebuf fbIn;
+  fbIn.open ( s_destination_load.c_str(), ios::in );
+  std::istream is (&fbIn);
+  //
+  classifierRestored->restore( is );
+  //   
+  fbIn.close();   
+  
+  
+  // TEST both classifiers to produce equal results
+  
+  //------------- read the test data --------------
+  
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yBinTest;
+  NICE::Vector yMultiTest; 
+
+  std::string testData = conf.gS( "main", "testData", "toyExampleTest.data" );  
+  std::ifstream ifsTest ( testData.c_str(), ios::in );
+  if ( ifsTest.good() )
+  {
+    ifsTest >> dataTest;
+    ifsTest >> yBinTest;
+    ifsTest >> yMultiTest;
+    ifsTest.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read test data, aborting." << std::endl;
+    CPPUNIT_ASSERT ( ifsTest.good() );
+  }
+  
+  // ------------------------------------------
+  // ------------- PREPARATION --------------
+  // ------------------------------------------   
+  
+  // determine classes known during training and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTraining = classifier->getKnownClassNumbers();
+  
+  int noClassesKnownTraining ( classesKnownTraining.size() );
+  std::map<int,int> mapClNoToIdxTrain;
+  std::set<int>::const_iterator clTrIt = classesKnownTraining.begin();
+  for ( int i=0; i < noClassesKnownTraining; i++, clTrIt++ )
+      mapClNoToIdxTrain.insert ( std::pair<int,int> ( *clTrIt, i )  );
+  
+  // determine classes known during testing and corresponding mapping
+  // thereby allow for non-continous class labels
+  std::set<int> classesKnownTest;
+  classesKnownTest.clear();
+  
+
+  // determine which classes we have in our label vector
+  // -> MATLAB: myClasses = unique(y);
+  for ( NICE::Vector::const_iterator it = yMultiTest.begin(); it != yMultiTest.end(); it++ )
+  {
+    if ( classesKnownTest.find ( *it ) == classesKnownTest.end() )
+    {
+      classesKnownTest.insert ( *it );
+    }
+  }          
+  
+  int noClassesKnownTest ( classesKnownTest.size() );  
+  std::map<int,int> mapClNoToIdxTest;
+  std::set<int>::const_iterator clTestIt = classesKnownTest.begin();
+  for ( int i=0; i < noClassesKnownTest; i++, clTestIt++ )
+      mapClNoToIdxTest.insert ( std::pair<int,int> ( *clTestIt, i )  ); 
+          
+  
+  NICE::Matrix confusionMatrix         ( noClassesKnownTraining, noClassesKnownTest, 0.0);
+  NICE::Matrix confusionMatrixRestored ( noClassesKnownTraining, noClassesKnownTest, 0.0);
+  
+  int i_loopEnd  ( (int)dataTest.rows() );
+  
+  
+  for (int i = 0; i < i_loopEnd ; i++)
+  {
+    NICE::Vector example ( dataTest.getRow(i) );
+    NICE::SparseVector scores;
+    int result;
+    
+    // classify with trained classifier 
+    classifier->classify( &example, result, scores );
+       
+    
+    confusionMatrix( mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+
+    // classify with restored classifier 
+    scores.clear();
+    classifierRestored->classify( &example, result, scores );
+    
+    confusionMatrixRestored( mapClNoToIdxTrain.find(result)->second, mapClNoToIdxTest.find(yMultiTest[i])->second ) += 1.0;
+    
+    
+  }  
+    
+  confusionMatrix.normalizeColumnsL1();
+  double arr ( confusionMatrix.trace()/confusionMatrix.cols() );
+
+  confusionMatrixRestored.normalizeColumnsL1();
+  double arrRestored ( confusionMatrixRestored.trace()/confusionMatrixRestored.cols() );
+
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( arr, arrRestored, 1e-8);
+  
+  // don't waste memory
+  //TODO clean up of training data, also in TestGPHIKPersistent
+  
+  delete classifier;
+  delete classifierRestored;
+  
+  for (std::vector< const NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin(); exTrainIt != examplesTrain.end(); exTrainIt++)
+  {
+    delete *exTrainIt;
+  } 
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKPersistent::testPersistentMethods done ===================== " << std::endl;  
+  
+}
+
+#endif

+ 30 - 0
tests/TestGPHIKPersistent.h

@@ -0,0 +1,30 @@
+#ifndef _TESTGPHIKPERSISTENT_H
+#define _TESTGPHIKPERSISTENT_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <gp-hik-core/GPHIKClassifier.h>
+
+/**
+ * CppUnit-Testcase. 
+ * @brief CppUnit-Testcase to verify that GPHIKClassifier methods herited from Persistent (store and restore) work as desired.
+ * @author Alexander Freytag
+ * @date 21-12-2013
+ */
+class TestGPHIKPersistent : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestGPHIKPersistent );
+	 CPPUNIT_TEST(testPersistentMethods);
+      
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+
+    void testPersistentMethods();
+};
+
+#endif // _TESTGPHIKPERSISTENT_H

+ 64 - 0
tests/toyExampleSmallScaleTrain.data

@@ -0,0 +1,64 @@
+60 x 49
+0.0259695 0.0105403 0.0110604 0.0160947 0.00950027 0.0188586 0.0118317 0.0126629 0.00929102 0.0130691 0.00773724 0.0117242 0.0186207 0.00753209 0.0112492 0.00827115 0.00863989 0.014142 0.00777097 0.0128102 0.0114344 0.00784775 0.0160248 0.0107051 0.00827794 0.00769294 0.00802757 0.00909618 0.0119556 0.0140857 0.0100281 0.00829676 0.01237 0.00871181 0.01287 0.0101447 0.00948488 0.00805543 0.00947402 0.00882249 0.00796149 0.0172038 0.0114972 0.0125575 0.00895035 0.0116704 0.0145875 0.00764121 0.0151904 
+0.0271022 0.011318 0.00996974 0.0110196 0.00925289 0.0109691 0.0108103 0.0108613 0.0116696 0.00787904 0.0109383 0.00769676 0.00829481 0.00969134 0.00929602 0.0130977 0.0177721 0.0186529 0.00898793 0.00846386 0.00851917 0.0142691 0.00784852 0.0179733 0.00924451 0.0112334 0.0105803 0.013269 0.01111 0.0107221 0.0134643 0.00755481 0.0162522 0.00919093 0.0112732 0.0108264 0.00968506 0.0159471 0.0106169 0.017662 0.00839274 0.0103854 0.00808056 0.011202 0.0145698 0.0101001 0.0143089 0.0134676 0.0107477 
+0.0230633 0.0114753 0.00784154 0.00929423 0.00889608 0.0116286 0.0150744 0.0123752 0.0122833 0.010329 0.0124179 0.0140873 0.0126277 0.0111987 0.0103069 0.00949489 0.0146096 0.00890901 0.00783397 0.0112901 0.013117 0.00811653 0.00767978 0.0122787 0.00962363 0.0200206 0.011309 0.0105445 0.00752052 0.0122045 0.00849636 0.0113249 0.00950693 0.00775034 0.0129373 0.010784 0.0114393 0.0119846 0.0088356 0.00808703 0.0103027 0.0111316 0.0077251 0.0119095 0.00794289 0.0154611 0.0132691 0.0132304 0.0142578 
+0.0239072 0.00923091 0.0108958 0.0105975 0.0148438 0.0115835 0.0139692 0.00877158 0.00861952 0.016659 0.0123196 0.0199786 0.0134272 0.00917523 0.00795431 0.019383 0.0159615 0.0100525 0.00801319 0.0117417 0.00912759 0.0101336 0.00795618 0.0111197 0.0095205 0.0110377 0.0137116 0.0111116 0.0120717 0.0139439 0.0166161 0.0142446 0.00807274 0.00767242 0.0149445 0.0119278 0.013909 0.00800654 0.0104336 0.0131392 0.00808004 0.013861 0.00981833 0.0118265 0.0126384 0.0096132 0.0134302 0.0162862 0.0119054 
+0.0223962 0.0127152 0.00925128 0.0129104 0.00813518 0.0136416 0.0122769 0.0114984 0.0128947 0.00802646 0.00793451 0.00908589 0.0137168 0.00981374 0.0124787 0.0109228 0.0106603 0.0149199 0.00805851 0.00907345 0.011635 0.0144065 0.0131782 0.00814107 0.00788798 0.0169886 0.0138701 0.0125473 0.0075731 0.0134445 0.0179868 0.0129148 0.0148743 0.00902368 0.0182203 0.00754253 0.0160189 0.0109286 0.00852182 0.0159449 0.00850632 0.00917183 0.0176302 0.0150493 0.0106994 0.0141455 0.011174 0.00956637 0.0159048 
+0.0304368 0.00985075 0.00924785 0.00869394 0.0140252 0.0100286 0.0161972 0.0158769 0.0108712 0.00941128 0.0100974 0.0138416 0.00780139 0.00798227 0.0115011 0.0118319 0.0096153 0.00843833 0.0123238 0.00848155 0.0117371 0.01002 0.00968988 0.00976564 0.0102043 0.00838277 0.0199357 0.0101832 0.00840926 0.00891564 0.00995065 0.0111052 0.0101932 0.0129618 0.0135528 0.0115114 0.00822212 0.0105505 0.0133001 0.0105868 0.0126539 0.0100525 0.00810846 0.00946776 0.0081359 0.008567 0.00906365 0.0202011 0.0115662 
+0.0241134 0.0143089 0.00763809 0.0103048 0.0147389 0.0116759 0.0143365 0.00955372 0.0136892 0.0112737 0.0131253 0.0128577 0.0136494 0.00796086 0.0117908 0.0114703 0.0119028 0.00813583 0.0144375 0.00849329 0.0126432 0.0141733 0.0078145 0.0144172 0.00884723 0.0151062 0.008119 0.00825625 0.0101327 0.00828455 0.00826487 0.0114902 0.0122419 0.00858807 0.0128025 0.0150127 0.01324 0.00885035 0.0110488 0.011903 0.00945571 0.017715 0.00826096 0.00909538 0.0103109 0.0098135 0.00834678 0.016477 0.0128185 
+0.0256172 0.00805043 0.0158787 0.0123214 0.0124122 0.013729 0.0137244 0.00867987 0.0135993 0.00762924 0.00890781 0.0144716 0.00900865 0.00791389 0.0147116 0.0107582 0.0110171 0.012628 0.00907872 0.00827167 0.0120265 0.00826881 0.0151552 0.0113008 0.0116872 0.00987471 0.0148357 0.00969207 0.0142194 0.0111191 0.00852744 0.0129664 0.0123777 0.00889003 0.00941833 0.00866721 0.0112646 0.0138308 0.010994 0.00828151 0.0119669 0.0113105 0.0198909 0.0111647 0.0151214 0.00941323 0.0157938 0.00781977 0.0104112 
+0.0234519 0.0128553 0.00967367 0.00816595 0.0104776 0.00946706 0.00879412 0.0133896 0.0180495 0.0107266 0.0163932 0.00757559 0.0105604 0.0166635 0.0135827 0.0134133 0.0101368 0.00822928 0.00839668 0.0129781 0.0084781 0.0108501 0.00910018 0.0083439 0.00794161 0.00977823 0.0155439 0.0195073 0.0100602 0.0105728 0.0139483 0.0114243 0.0191313 0.00922986 0.00896528 0.0127137 0.0100254 0.00847564 0.00751717 0.0118792 0.0112046 0.0140585 0.0108731 0.0103888 0.00972049 0.0172753 0.00852407 0.00983208 0.0120798 
+0.0244513 0.00916373 0.0129416 0.00941483 0.0161396 0.00909459 0.00879067 0.00823309 0.0186154 0.00810004 0.00980972 0.0139805 0.0110224 0.0152112 0.00918473 0.00759198 0.0121576 0.00774559 0.0131761 0.0162813 0.0100207 0.0136941 0.00990834 0.0128233 0.0139276 0.00916473 0.0158321 0.00826871 0.0150633 0.0148289 0.0160133 0.0119603 0.0115088 0.0119952 0.0185482 0.0159011 0.012829 0.00832603 0.013801 0.00838511 0.0113083 0.0130678 0.00842253 0.00917392 0.00795815 0.0149192 0.0113192 0.00929634 0.0113226 
+0.0270594 0.00807159 0.00991026 0.0105547 0.0148077 0.0128059 0.0122153 0.0143011 0.00932909 0.0151779 0.0127106 0.00992389 0.0183903 0.0156485 0.0157064 0.0164128 0.0116402 0.00943215 0.0105756 0.00813119 0.0112977 0.0131093 0.0140546 0.0183521 0.010916 0.0138169 0.0102092 0.013503 0.00970905 0.00830962 0.0181664 0.00841476 0.0101295 0.0111494 0.00986364 0.0178961 0.0174996 0.0137928 0.0163185 0.0157177 0.017789 0.0110789 0.0117625 0.0113533 0.0087363 0.0103995 0.0153326 0.0196703 0.0113684 
+0.024149 0.0105725 0.00887366 0.00989522 0.0153129 0.0112372 0.0173714 0.0107147 0.0101773 0.0167279 0.0090978 0.0079373 0.0103145 0.0152955 0.012309 0.0210111 0.0080108 0.00998822 0.0157671 0.00933217 0.00820574 0.0187734 0.0177674 0.009834 0.00918752 0.0179541 0.0186268 0.00756802 0.0109617 0.00983035 0.0112832 0.00861512 0.0245526 0.00975396 0.0123681 0.0132452 0.00788526 0.0131673 0.018516 0.0118614 0.0147822 0.0079077 0.0167211 0.0108169 0.00820817 0.0117829 0.00824693 0.0122202 0.0116028 
+0.0207545 0.0125084 0.0108505 0.0104894 0.0128691 0.00994967 0.00832289 0.0130178 0.0150846 0.0160362 0.00750938 0.0108468 0.0160085 0.0127219 0.0144578 0.0124182 0.0105225 0.0101767 0.0107503 0.0144781 0.00883719 0.0102866 0.00984082 0.00817868 0.0163922 0.00778454 0.00821095 0.00961263 0.0127874 0.013317 0.0104098 0.0120479 0.0175316 0.00782725 0.00831699 0.0125904 0.015201 0.0105327 0.00952129 0.0111621 0.00871013 0.0141928 0.00771442 0.00870064 0.00921121 0.00832019 0.0095926 0.0152257 0.00842376 
+0.0313984 0.007982 0.00995331 0.00981045 0.00876915 0.0126173 0.00999689 0.00851341 0.00934417 0.00899455 0.0100468 0.0108085 0.00850033 0.0086989 0.0091643 0.00956867 0.0089853 0.00985235 0.0190657 0.010688 0.00827333 0.0099693 0.0134224 0.0174105 0.0152557 0.00929907 0.0161777 0.0150555 0.00806505 0.00850974 0.0096831 0.0104143 0.0118908 0.00930829 0.0125519 0.011415 0.00957852 0.00792941 0.00892285 0.0101932 0.0121507 0.0139159 0.0122315 0.00979685 0.0107431 0.00871917 0.0141024 0.00864975 0.0106822 
+0.0223997 0.0080833 0.0119063 0.0123641 0.00763316 0.0119364 0.00935 0.0110724 0.0114145 0.0095334 0.0119481 0.009164 0.0193016 0.0196916 0.00848646 0.00863559 0.0168786 0.0128225 0.0113463 0.00856433 0.00803072 0.0111494 0.012586 0.0175893 0.0178392 0.0103538 0.00967134 0.00806565 0.0128146 0.0199558 0.0162451 0.0099261 0.0121075 0.0158896 0.00812237 0.00779004 0.00800657 0.0109748 0.00877823 0.00833482 0.015287 0.00981958 0.012355 0.00902026 0.0118177 0.00912929 0.0143999 0.0107564 0.0103279 
+0.0227656 0.00873123 0.00783844 0.0155652 0.00943663 0.0100323 0.00815993 0.0101672 0.0132211 0.0153565 0.0105628 0.0158833 0.0152073 0.00801318 0.0109822 0.00822322 0.0110543 0.0120444 0.00895155 0.0150476 0.012667 0.00974858 0.0103141 0.00822587 0.00752527 0.0115945 0.00912847 0.0111587 0.00892064 0.0105066 0.0152925 0.00898104 0.0084449 0.0135059 0.0115401 0.00794416 0.00754046 0.00796978 0.0101548 0.00950692 0.0128038 0.0151233 0.00849278 0.00925549 0.00973895 0.0134272 0.0143091 0.00965296 0.00872116 
+0.0220867 0.00952122 0.0142567 0.00857123 0.0112302 0.00998641 0.0104499 0.0134628 0.00992557 0.0138055 0.0172484 0.00923038 0.00773323 0.0141641 0.0107527 0.0104726 0.0129019 0.00930548 0.00855496 0.0115993 0.0166085 0.0102857 0.00924597 0.0110356 0.0116808 0.00765342 0.00769771 0.00903775 0.0141177 0.00807407 0.0120771 0.00976994 0.0188901 0.0127345 0.00857711 0.0182242 0.0117 0.0091719 0.0172657 0.00978154 0.00936656 0.0093512 0.0121914 0.0165407 0.00926855 0.0129943 0.00847156 0.00906975 0.0107144 
+0.0219103 0.017343 0.0196978 0.0104879 0.0105515 0.00999576 0.00928981 0.0149162 0.0090834 0.0113828 0.0109014 0.0153987 0.00770247 0.00823608 0.0128081 0.00914814 0.00751693 0.0107139 0.0125536 0.0102118 0.0116546 0.0123145 0.00896101 0.0118348 0.0104389 0.0133669 0.0111383 0.0153073 0.00830141 0.00823326 0.0108762 0.00976553 0.00819337 0.00757586 0.0104232 0.0150566 0.0112633 0.00918437 0.0088266 0.0131901 0.0148984 0.0114526 0.0173006 0.0126395 0.0134126 0.00977013 0.00799374 0.0209565 0.0158383 
+0.024387 0.0100473 0.0085098 0.0110007 0.0102743 0.0145895 0.0142096 0.00932722 0.0115369 0.00849483 0.00933373 0.00822497 0.0101023 0.0129909 0.0124775 0.0171622 0.00770938 0.0136457 0.00845559 0.00977008 0.00957626 0.00809545 0.0189998 0.00803223 0.00904886 0.00753141 0.00798637 0.0117644 0.00897732 0.00867011 0.0091308 0.0118075 0.00975002 0.0132517 0.0159158 0.0167903 0.0115717 0.00884695 0.016522 0.00820418 0.0075003 0.0130556 0.00946251 0.00784477 0.00826967 0.013128 0.0123578 0.0170744 0.0188824 
+0.0288534 0.0134006 0.0110453 0.0140292 0.015017 0.0128366 0.00788706 0.0178822 0.0127398 0.00967744 0.00767562 0.00849709 0.0133257 0.00838418 0.0171635 0.0132189 0.0105086 0.0139051 0.00804162 0.0101824 0.0154833 0.0156085 0.00862076 0.0130732 0.014416 0.00788632 0.0104821 0.0128714 0.00923762 0.010608 0.0136589 0.0134713 0.0138101 0.00986637 0.00790978 0.0112465 0.0159947 0.00901536 0.0101493 0.0134912 0.00978309 0.015527 0.00969498 0.0126513 0.0111776 0.00755319 0.0184152 0.00789488 0.00930304 
+0.00880662 0.0216464 0.0210872 0.0105126 0.0127658 0.0112209 0.0115129 0.0113154 0.011329 0.0134951 0.00901304 0.00848824 0.0177882 0.0190767 0.0136747 0.010352 0.0152489 0.0109627 0.010929 0.0127208 0.0107025 0.0114791 0.00890777 0.0104932 0.0129995 0.0156713 0.00762406 0.0129343 0.0108742 0.0127172 0.0102928 0.00827948 0.0136815 0.0113224 0.00843217 0.0152795 0.0149782 0.007825 0.0109507 0.0137877 0.0127452 0.0136955 0.00991781 0.00827777 0.0179353 0.00886824 0.0141212 0.0168107 0.0173965 
+0.00900718 0.0217041 0.0139392 0.00853973 0.0096694 0.0162534 0.012773 0.00981645 0.00805063 0.00937836 0.0138217 0.0112746 0.0086497 0.0152205 0.00913559 0.0166785 0.0145056 0.00797746 0.00843824 0.00793002 0.0169129 0.00869163 0.0107405 0.00847737 0.01042 0.00827518 0.0117657 0.00869265 0.0109001 0.0162319 0.0165439 0.0132549 0.0102501 0.00996245 0.0114955 0.0101729 0.0117175 0.0174851 0.0104437 0.00795506 0.0126651 0.0108139 0.0143188 0.00871343 0.012538 0.0223205 0.00845874 0.00775098 0.0148045 
+0.0128452 0.0206778 0.00942699 0.0162227 0.0084621 0.00894997 0.00974841 0.0148166 0.00800937 0.0123893 0.00793666 0.0109363 0.0129667 0.011252 0.0132105 0.018058 0.0100507 0.00875572 0.010091 0.0192821 0.00799599 0.0140755 0.0137605 0.00786973 0.0154267 0.0131201 0.0200065 0.0200133 0.00802706 0.0111778 0.0172287 0.0111324 0.00835332 0.00857922 0.00900446 0.0134356 0.0128486 0.00765603 0.0153792 0.0136076 0.00936792 0.0111302 0.00957391 0.00810223 0.0118117 0.0133464 0.0127174 0.0105692 0.00892939 
+0.0113189 0.025128 0.00901293 0.0131072 0.0129681 0.0157613 0.00831601 0.0111685 0.0130465 0.00889493 0.012909 0.0119596 0.00750153 0.0109797 0.0112245 0.0117992 0.014789 0.0134853 0.0105948 0.00783141 0.0103964 0.0106642 0.00827252 0.0112292 0.00756793 0.0107174 0.0139279 0.0112388 0.00919734 0.00932427 0.00860366 0.0131025 0.0195901 0.00827442 0.0112736 0.00896313 0.0156302 0.0116154 0.00805878 0.014369 0.0132608 0.0105323 0.00876208 0.0118269 0.00961405 0.0105055 0.0126735 0.0082246 0.00777274 
+0.0103316 0.031193 0.0100607 0.0125212 0.0160683 0.00981616 0.00925346 0.0075162 0.00980768 0.00900646 0.00890371 0.0136854 0.0145326 0.00856758 0.0126471 0.0137377 0.00961645 0.0078712 0.0150319 0.0114971 0.0110445 0.0115416 0.0126913 0.0116137 0.00759611 0.00829835 0.00931262 0.0165359 0.00807646 0.0112452 0.0129125 0.0138121 0.0144461 0.009263 0.00773189 0.0118874 0.0141273 0.0141659 0.00968382 0.0129926 0.0109789 0.0101879 0.0102743 0.0165389 0.0144569 0.00819232 0.0153246 0.00796191 0.00828517 
+0.0130705 0.0233932 0.010609 0.00792477 0.0102463 0.0121859 0.0156888 0.0116376 0.0167157 0.0187747 0.00783081 0.0121942 0.00987135 0.0127848 0.00864133 0.00867839 0.0102234 0.0201533 0.00969555 0.00895597 0.00927638 0.0105126 0.0103961 0.0104856 0.0136381 0.00759534 0.0106049 0.0110775 0.00906157 0.0190068 0.00763674 0.00796651 0.00811015 0.00867397 0.00900911 0.00931233 0.00754657 0.00751041 0.00936925 0.0128884 0.0142454 0.0183237 0.00776128 0.00834111 0.0163471 0.0145873 0.0105058 0.0106947 0.0119503 
+0.00775893 0.020603 0.0108698 0.00797924 0.00989985 0.00925665 0.0116527 0.0134322 0.0113285 0.0132066 0.0110814 0.016983 0.0160277 0.0102325 0.0121382 0.010984 0.00756652 0.0125121 0.0107692 0.00966968 0.00933033 0.0133006 0.0103347 0.00842852 0.008004 0.00865679 0.0131454 0.00917677 0.0194663 0.0112063 0.00876808 0.00773606 0.0201326 0.0100063 0.00910977 0.0113968 0.00985285 0.00829201 0.0115724 0.00918228 0.00913257 0.016697 0.0110031 0.011874 0.0143604 0.0129367 0.0122788 0.0141067 0.0165299 
+0.0106273 0.0244861 0.00907672 0.0159438 0.0174978 0.0170136 0.0141612 0.0139259 0.0126844 0.0096883 0.0111207 0.00927742 0.00780215 0.0112174 0.00753353 0.00820472 0.00798994 0.0111971 0.0129177 0.0120068 0.0165062 0.00856146 0.00820934 0.0128443 0.0101435 0.0123611 0.00848645 0.0159466 0.0117611 0.00778523 0.0108223 0.00792226 0.0146178 0.0167422 0.00888029 0.0084917 0.0103163 0.0108963 0.0131455 0.009296 0.00776834 0.0101146 0.0101946 0.0158065 0.0104041 0.0162822 0.0133552 0.01034 0.00806414 
+0.00939357 0.0231166 0.00847506 0.0122524 0.012433 0.0093421 0.0143454 0.0101233 0.0107128 0.0135012 0.00833962 0.0104653 0.00775288 0.0140836 0.0107977 0.00846482 0.0120958 0.0160775 0.0106818 0.00906504 0.00925368 0.0121868 0.0145626 0.0136249 0.00756399 0.0135822 0.0108359 0.00928053 0.0115864 0.0130171 0.00915042 0.00795784 0.00875577 0.00889915 0.00949089 0.0081355 0.00818921 0.0152822 0.0107207 0.0125019 0.00785935 0.00807379 0.0133005 0.0153737 0.00844602 0.008706 0.00832958 0.0176599 0.0113929 
+0.00937036 0.0244766 0.00882136 0.0115904 0.0101322 0.0149058 0.0100778 0.019607 0.0131377 0.0130118 0.0124715 0.0103685 0.00807073 0.00928566 0.0108924 0.0103663 0.0151538 0.0140399 0.0135388 0.0116879 0.00862708 0.0103798 0.0149153 0.0200176 0.00864548 0.0102865 0.0123699 0.0148262 0.0103001 0.0126099 0.0152885 0.0123521 0.0193287 0.00835033 0.00942435 0.0101484 0.014357 0.0137524 0.0175744 0.00881443 0.0139454 0.00923441 0.0154462 0.00888159 0.0111433 0.0163142 0.00983176 0.010001 0.00882299 
+0.00899205 0.0217599 0.00892213 0.0160531 0.0110302 0.0103801 0.0153188 0.012857 0.0122882 0.0084038 0.00827598 0.00821238 0.0100706 0.00916037 0.0145212 0.00997158 0.00996835 0.00804101 0.00832772 0.0085908 0.0136335 0.0131631 0.0118268 0.0083565 0.0106843 0.0181602 0.0116033 0.0126854 0.0160203 0.00970096 0.00926267 0.00816163 0.00921396 0.0150989 0.0111025 0.00957662 0.0173483 0.0118917 0.00918161 0.014099 0.0153688 0.0134076 0.0103182 0.013726 0.0180079 0.00983955 0.0120475 0.00939958 0.0153206 
+0.0105588 0.0234418 0.00939131 0.0113621 0.0130088 0.0153581 0.00841515 0.00865124 0.0135816 0.0162395 0.0203847 0.0127909 0.00829511 0.00881781 0.0163614 0.00820557 0.0124668 0.0123785 0.00910737 0.0111985 0.00886406 0.0112144 0.00894312 0.0152993 0.0124651 0.0104476 0.0100528 0.0118992 0.00857994 0.0135462 0.0162855 0.00787341 0.00978844 0.00891056 0.00849655 0.0124034 0.011003 0.00954397 0.00756585 0.00871334 0.0111837 0.0148349 0.0131524 0.016365 0.00985213 0.0115147 0.00986463 0.00915865 0.00835631 
+0.0089894 0.0268708 0.0120246 0.0114489 0.011564 0.0173305 0.00881312 0.0141614 0.013083 0.00862102 0.00989734 0.0151296 0.0138561 0.00752894 0.0108879 0.0181947 0.00986264 0.0106516 0.00949566 0.0129874 0.00763186 0.0100316 0.0109182 0.0146944 0.0102073 0.00973839 0.0161131 0.0085112 0.015397 0.00965817 0.012005 0.00856396 0.0127106 0.018741 0.0150085 0.0141986 0.0113409 0.00952792 0.00785132 0.00985255 0.00927583 0.0122464 0.00818961 0.00821668 0.0136789 0.0129419 0.0121544 0.0104701 0.0158305 
+0.00777133 0.0338468 0.00751626 0.0160865 0.0101277 0.00850899 0.00752594 0.0095737 0.0106809 0.00867979 0.0113247 0.012077 0.00994838 0.00854107 0.0155612 0.0108833 0.00920965 0.00920464 0.010517 0.00900608 0.0148061 0.00965102 0.0105772 0.008064 0.0109489 0.0120811 0.0133097 0.0132957 0.0141527 0.01249 0.0112634 0.00813683 0.0110562 0.0108294 0.00796001 0.0124461 0.0132342 0.010618 0.0143029 0.00839652 0.0109242 0.00987172 0.00908014 0.0119396 0.01279 0.014592 0.00924242 0.00931531 0.00795973 
+0.00822071 0.026167 0.0104337 0.00948422 0.0117414 0.0210517 0.0137955 0.0129675 0.0138387 0.00893837 0.0151963 0.0171065 0.00775741 0.0101589 0.0141498 0.0187153 0.00947764 0.00774628 0.0154054 0.00989548 0.0152872 0.009876 0.01436 0.0128548 0.0133433 0.0126873 0.0165809 0.00761219 0.00873266 0.0176218 0.00890786 0.007627 0.00928098 0.0118897 0.0171019 0.0098913 0.013492 0.0121169 0.011258 0.0122963 0.0108707 0.011473 0.0107655 0.0148202 0.00982258 0.00957453 0.0111155 0.0100285 0.00813544 
+0.0120856 0.026171 0.00989043 0.00808503 0.00865407 0.0113607 0.00909992 0.013825 0.0130143 0.0134441 0.0126256 0.0119627 0.0108432 0.0130967 0.00952968 0.0164724 0.0111507 0.0160875 0.00754196 0.00870893 0.0187831 0.00819476 0.0120628 0.00911874 0.00942726 0.0179704 0.0110552 0.00994693 0.0129673 0.00918318 0.00912005 0.00789547 0.0103583 0.00988415 0.00999427 0.0121406 0.0157666 0.00870823 0.0119195 0.013 0.00982782 0.008185 0.0125848 0.0125986 0.00854327 0.00809625 0.0127336 0.0165125 0.0135715 
+0.0131534 0.0251287 0.011663 0.0103692 0.0140887 0.0106316 0.0132454 0.0104939 0.00926397 0.0120671 0.0103348 0.00877237 0.0225838 0.0107394 0.0126268 0.00957499 0.00950101 0.0147754 0.0101303 0.00940025 0.01413 0.0137589 0.00832324 0.0161982 0.00979315 0.00869996 0.00832121 0.0110396 0.0102091 0.00861289 0.011158 0.00943551 0.00805955 0.0097125 0.013317 0.0124797 0.0093203 0.0134614 0.00954158 0.0125645 0.0152999 0.0152032 0.0124181 0.0149455 0.00942742 0.0148204 0.0132616 0.0178735 0.0104298 
+0.0128681 0.0239653 0.0105898 0.013532 0.0162459 0.0142268 0.0115755 0.0158183 0.0118954 0.0168663 0.0120186 0.0114937 0.00980749 0.0129433 0.00975411 0.0131276 0.010905 0.012303 0.0130469 0.00818084 0.0160419 0.0112622 0.019154 0.010586 0.0114233 0.017696 0.010955 0.0108811 0.00757016 0.00925705 0.0138948 0.0151963 0.0101173 0.01329 0.00948939 0.0121616 0.0129249 0.013639 0.00887597 0.00781413 0.00790878 0.00990957 0.017771 0.00860841 0.0075033 0.0110234 0.0158632 0.0129937 0.0116417 
+0.0111109 0.0294645 0.0120537 0.0111116 0.0117274 0.00853298 0.00787973 0.00964887 0.0130147 0.00973637 0.0131743 0.00921314 0.0122116 0.0117603 0.0154865 0.010949 0.00978931 0.0098883 0.0172717 0.00869153 0.00793727 0.0112716 0.00787567 0.00865546 0.010282 0.0108673 0.0143578 0.0116831 0.0118659 0.00807395 0.00845995 0.0114815 0.00858862 0.00772967 0.00801831 0.0101241 0.0131679 0.00841491 0.0190119 0.0119131 0.00985021 0.00776973 0.0103715 0.0124589 0.0159059 0.00987744 0.0106877 0.00904324 0.0103904 
+0.0207783 0.0260479 0.00997658 0.013466 0.00918262 0.0102103 0.0102057 0.012457 0.00807424 0.0108472 0.00922633 0.0146097 0.0119745 0.011336 0.00792642 0.012814 0.0102497 0.00972321 0.00987833 0.0110496 0.0144665 0.00823416 0.0110999 0.0122386 0.0127554 0.0113965 0.0152782 0.0100133 0.00860568 0.00944747 0.0137201 0.00764315 0.018137 0.0117797 0.00834321 0.0131745 0.00909519 0.0186882 0.0136019 0.00959667 0.0138963 0.0240943 0.00922153 0.00795525 0.0123477 0.00760298 0.00935222 0.0116062 0.0117592 
+0.00820278 0.00823624 0.0260592 0.00903996 0.00971909 0.00966642 0.00893984 0.0117034 0.0099061 0.0177389 0.0132994 0.012128 0.0102907 0.00821357 0.0130841 0.0103626 0.00939324 0.0128043 0.0114336 0.0183887 0.0122977 0.0136871 0.00766289 0.00772549 0.0091006 0.00988621 0.0140285 0.0132475 0.0149558 0.0140575 0.00941845 0.0135572 0.0127171 0.0101749 0.0133492 0.0123623 0.0157588 0.0132736 0.00873428 0.0232314 0.0144387 0.0107137 0.0114545 0.00904013 0.0133339 0.0114084 0.00760248 0.0116109 0.0140074 
+0.009646 0.0177584 0.0211102 0.00960976 0.0100134 0.0109611 0.0153114 0.010387 0.0165983 0.0175927 0.0101028 0.0138415 0.0113752 0.00790347 0.00757291 0.00788049 0.0113011 0.00869304 0.0144222 0.00752977 0.0112919 0.0142053 0.00801023 0.0135512 0.0189808 0.0138823 0.0103193 0.0148885 0.00916007 0.0111217 0.0113887 0.00874071 0.0126873 0.00977365 0.0158879 0.0135432 0.008609 0.0177525 0.00934098 0.00775321 0.0200725 0.0138581 0.00929476 0.0103565 0.0155505 0.0114794 0.0086308 0.00882947 0.0145632 
+0.0121874 0.0119632 0.0205059 0.00978061 0.0117281 0.0104614 0.0106371 0.0119173 0.00801365 0.00928938 0.00869799 0.0130824 0.00826153 0.0113958 0.00902546 0.0158337 0.0127575 0.0108629 0.0108841 0.0122359 0.011006 0.0104098 0.0118249 0.00972292 0.00757526 0.00852472 0.0108496 0.0122731 0.00866414 0.0102172 0.0118904 0.00838573 0.00840182 0.0145641 0.0107593 0.0102488 0.00931913 0.00907936 0.00976757 0.0106548 0.00773216 0.00831631 0.0087845 0.00974962 0.0102509 0.0077432 0.011432 0.00869996 0.00920229 
+0.0106396 0.010933 0.0255497 0.0130523 0.00784461 0.00802316 0.00839757 0.0145396 0.00936429 0.0104733 0.0104434 0.0117082 0.0108046 0.0143165 0.00999919 0.0131738 0.00875868 0.00904078 0.0159884 0.0159185 0.0100566 0.0169482 0.011197 0.00924774 0.0083769 0.00853495 0.00776002 0.0107559 0.0076253 0.0110167 0.0175894 0.00870061 0.0108202 0.015549 0.00798324 0.018877 0.0218267 0.0131679 0.0118787 0.0100219 0.00780855 0.00804774 0.00867327 0.0110219 0.0084428 0.00776008 0.00766939 0.0120955 0.00801992 
+0.00904685 0.00940322 0.0283131 0.0106521 0.00834238 0.0105714 0.0137723 0.0160339 0.0106749 0.0106381 0.0129054 0.0109451 0.0141554 0.0139853 0.00848258 0.0147618 0.00882912 0.0115394 0.0144487 0.0175726 0.00958771 0.0140424 0.01117 0.00765008 0.0151508 0.0153911 0.00774375 0.0158878 0.00870456 0.00851632 0.00798942 0.0155605 0.00763791 0.0151582 0.00907071 0.00860455 0.00927132 0.00840893 0.00814229 0.0173009 0.00972256 0.0114171 0.00879717 0.00768555 0.0100898 0.0108291 0.0117871 0.00967495 0.00888629 
+0.0128385 0.00900563 0.0300644 0.00903157 0.00825902 0.009528 0.0084389 0.00793054 0.0111465 0.0118638 0.00973942 0.0148717 0.0139167 0.00888057 0.010006 0.00958233 0.0114284 0.01154 0.00976147 0.0105533 0.0123792 0.0127316 0.0113913 0.00938501 0.0108933 0.0132494 0.0100134 0.00793871 0.0119074 0.0138443 0.00850183 0.0106248 0.00916611 0.0142109 0.0174247 0.00920319 0.00830646 0.00963313 0.00905203 0.0171472 0.0109715 0.0116792 0.00925263 0.0113661 0.00752427 0.00986478 0.0174423 0.00845846 0.00833033 
+0.00957112 0.0180033 0.0214033 0.00887345 0.0127341 0.0113676 0.00913553 0.0121502 0.00777487 0.00912217 0.00850362 0.0154226 0.0155775 0.00892688 0.0114641 0.00942511 0.0156517 0.015633 0.0160293 0.00924149 0.0129434 0.0100237 0.011184 0.0153932 0.0100437 0.0124477 0.0126745 0.0119709 0.0140577 0.0135944 0.0188757 0.0106933 0.010631 0.0137078 0.0129499 0.0105144 0.0119992 0.0131591 0.0118469 0.0149973 0.0176692 0.0125257 0.00758253 0.0138059 0.0134798 0.0110038 0.0132354 0.0107177 0.0134322 
+0.00812629 0.0102932 0.0261431 0.00963761 0.00906311 0.00865032 0.0109847 0.00928407 0.0077928 0.0135659 0.0130834 0.00843539 0.0185632 0.0078 0.0152633 0.00880949 0.0132717 0.0168182 0.0103695 0.0089427 0.0106294 0.0107249 0.0081818 0.0196271 0.00827723 0.0136525 0.00876158 0.00939363 0.0133215 0.0150743 0.0109293 0.011322 0.0127186 0.00856855 0.0225302 0.0100105 0.00911908 0.0105362 0.0105037 0.00855259 0.0135456 0.00982338 0.0107018 0.00759774 0.0152149 0.00881609 0.0143926 0.00817332 0.00882965 
+0.00933851 0.00938584 0.0231235 0.00996461 0.0157442 0.00896774 0.0112177 0.0102015 0.011268 0.0148865 0.0130086 0.00867837 0.0146227 0.00816429 0.00796441 0.00974237 0.00811556 0.00877084 0.00792424 0.00825762 0.00984428 0.00905027 0.0113013 0.0116487 0.0117235 0.01317 0.015996 0.0129839 0.0081974 0.00819008 0.00809835 0.00812309 0.0135022 0.0105967 0.0129249 0.00907971 0.011666 0.0102498 0.0102823 0.0127665 0.0177145 0.00864585 0.0109531 0.00915439 0.0163722 0.0169408 0.00839886 0.00808527 0.0132475 
+0.0222342 0.0123037 0.0228699 0.0133582 0.0122959 0.00961472 0.0170842 0.0097461 0.00928027 0.0188664 0.0103514 0.0090499 0.0162294 0.0104919 0.0103414 0.0151058 0.0105229 0.00968669 0.0112248 0.00925596 0.0118245 0.0125294 0.00951293 0.0091115 0.0100605 0.0151374 0.0113915 0.0181758 0.0174017 0.010752 0.013117 0.0109705 0.0178813 0.00835953 0.0152218 0.0105508 0.0112765 0.0148581 0.0117385 0.0142863 0.0142671 0.0102961 0.00755827 0.00936552 0.0149442 0.00892664 0.010704 0.00807923 0.0103055 
+0.0129276 0.0104311 0.0252761 0.00811758 0.0125915 0.0113035 0.0140784 0.00822778 0.011812 0.0077716 0.00817863 0.0158648 0.0134115 0.0114055 0.00879775 0.0143018 0.0127297 0.00820451 0.0140901 0.0148652 0.0105412 0.00750298 0.00978503 0.00777211 0.0106125 0.0137089 0.0100961 0.0106955 0.015458 0.0135725 0.00926069 0.00887185 0.00930858 0.00948317 0.0118273 0.0104596 0.0148373 0.0122002 0.0121313 0.00896792 0.00780052 0.0142384 0.0162418 0.0120706 0.0108821 0.00821499 0.00899263 0.0152771 0.00929117 
+0.00852234 0.00940389 0.0225402 0.0101909 0.0154755 0.0132661 0.00775918 0.00946033 0.00824469 0.0076171 0.0168582 0.0149323 0.012522 0.0119527 0.0135063 0.0140917 0.0138473 0.0136181 0.00928085 0.0120208 0.00863997 0.0101098 0.00867349 0.0127716 0.00905366 0.0104012 0.018715 0.0120801 0.0123961 0.00815842 0.0104416 0.00964878 0.0117088 0.00801172 0.0142401 0.0154078 0.0139241 0.0141611 0.00867801 0.0141621 0.0188441 0.00857864 0.00867845 0.0108114 0.00963661 0.0177781 0.0158615 0.00982639 0.0121654 
+0.0121402 0.0154205 0.0252128 0.012044 0.0151337 0.0107913 0.0132634 0.0144782 0.00976324 0.0160855 0.011114 0.0173437 0.0184064 0.00914569 0.00893225 0.00906138 0.0172909 0.0156876 0.0155012 0.0121186 0.00848431 0.0100921 0.00914746 0.0138674 0.00830907 0.00889417 0.00908697 0.0131617 0.00816596 0.00967272 0.0136794 0.0079061 0.0101782 0.0077368 0.0116617 0.00841962 0.0100697 0.0114808 0.0134463 0.0173815 0.00865972 0.0153556 0.0134127 0.00793805 0.0107085 0.00977577 0.0196041 0.0113622 0.0141046 
+0.0107332 0.0143686 0.0226478 0.0120877 0.0212239 0.0123182 0.0106044 0.0117571 0.00806122 0.0081122 0.0101717 0.0117068 0.00929421 0.012269 0.00839687 0.016317 0.00808212 0.0116174 0.010069 0.00762633 0.0118785 0.0170918 0.00906191 0.0167674 0.00790757 0.0109736 0.0122299 0.00811963 0.00960001 0.00925493 0.0123415 0.01325 0.00753833 0.0123176 0.0145587 0.0118021 0.00766919 0.0124873 0.00973067 0.00847831 0.012676 0.0123287 0.0097535 0.0106664 0.0107547 0.0138246 0.0195624 0.0100525 0.00962325 
+0.0119281 0.0122241 0.0242841 0.0101683 0.0130994 0.00879826 0.00823088 0.0114712 0.0161398 0.0100226 0.0100097 0.0125042 0.0138629 0.0117501 0.00756436 0.0144745 0.0136194 0.0124086 0.0114991 0.0143748 0.0181917 0.0136612 0.00962511 0.0103765 0.0101686 0.0113172 0.00870255 0.019323 0.00995193 0.0118664 0.00851607 0.00946212 0.00907452 0.00818943 0.0131628 0.00996086 0.0141944 0.0150947 0.0111328 0.0130214 0.00983337 0.0136919 0.0113602 0.00871084 0.0157483 0.010904 0.0236183 0.0143695 0.011746 
+0.016832 0.0125288 0.0228028 0.0150509 0.00825244 0.0089719 0.012338 0.0111107 0.00807729 0.0130397 0.0134324 0.0135269 0.0137549 0.00965155 0.0143415 0.0147855 0.00917189 0.00983137 0.0147469 0.00907731 0.0109262 0.0137849 0.00978295 0.0082266 0.00806215 0.00863035 0.00929611 0.0115255 0.0100205 0.0107482 0.0104747 0.0106799 0.0162054 0.00826127 0.0137276 0.00833625 0.0130827 0.00891462 0.010127 0.0143162 0.00873058 0.0115233 0.013493 0.0151634 0.00956458 0.00948767 0.0135435 0.0170953 0.00955095 
+0.008911 0.0106027 0.0220302 0.0087342 0.00759654 0.00953132 0.019344 0.0164507 0.00760068 0.0114743 0.010967 0.0113929 0.00926275 0.00968558 0.0181604 0.0111836 0.010156 0.0166397 0.00962995 0.00851972 0.012249 0.00780138 0.0104243 0.0111163 0.0123718 0.00754142 0.00995157 0.00903349 0.0116911 0.0102157 0.00941913 0.0126701 0.0079727 0.0127494 0.016391 0.00818587 0.0112246 0.0139862 0.0083602 0.00781615 0.0123179 0.0201769 0.0151085 0.0138697 0.0108868 0.0123237 0.00820885 0.00938777 0.00885651 
+0.0149063 0.00834672 0.0258672 0.020298 0.00900085 0.0138103 0.00818875 0.0148981 0.0153341 0.00844291 0.00993773 0.0079899 0.0112637 0.0084996 0.00997534 0.0199401 0.0107206 0.0139009 0.00872373 0.00833983 0.0142801 0.0138285 0.0164602 0.0131497 0.00819378 0.00781263 0.0113703 0.00913357 0.0109295 0.0120916 0.0112749 0.0122972 0.0135997 0.0127951 0.0207119 0.0090782 0.0173306 0.0127178 0.0111936 0.00840975 0.0121481 0.0086159 0.00999495 0.00944532 0.0110877 0.0114007 0.00954282 0.00847484 0.0157283 
+0.0136951 0.00947508 0.0253767 0.00785013 0.00987724 0.0116808 0.00820833 0.00857517 0.0110849 0.0106047 0.00791872 0.0129586 0.0109218 0.0113774 0.0118702 0.0124808 0.0104141 0.0133277 0.0157505 0.0120121 0.00832354 0.0150398 0.0145849 0.0106571 0.00781995 0.00948235 0.00791023 0.0130278 0.0177706 0.00934947 0.00917786 0.00976387 0.011106 0.00872163 0.0125572 0.00872901 0.00833814 0.0119969 0.0114674 0.00899175 0.00761545 0.0152811 0.0113137 0.0112011 0.0155478 0.0090728 0.00907604 0.00936913 0.00997295 
+0.00950312 0.0137425 0.0227265 0.0127662 0.0137664 0.00990442 0.00792798 0.0123088 0.00768171 0.0101829 0.0113718 0.0184991 0.0102974 0.0118027 0.00956921 0.0102662 0.0093895 0.0101815 0.00963158 0.00755334 0.0108035 0.0190889 0.0148255 0.0130755 0.0107963 0.0128566 0.018184 0.00967175 0.0103734 0.0109859 0.0101677 0.0172851 0.0137611 0.00897412 0.0156099 0.00973854 0.00879859 0.0107113 0.0115928 0.0120723 0.00812917 0.0140335 0.0103966 0.00821714 0.00988885 0.00879647 0.00862479 0.0178016 0.0108844 
+
+60 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+60 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 >

+ 154 - 0
tests/toyExampleTest.data

@@ -0,0 +1,154 @@
+150 x 49
+0.0259695 0.0105403 0.0110604 0.0160947 0.00950027 0.0188586 0.0118317 0.0126629 0.00929102 0.0130691 0.00773724 0.0117242 0.0186207 0.00753209 0.0112492 0.00827115 0.00863989 0.014142 0.00777097 0.0128102 0.0114344 0.00784775 0.0160248 0.0107051 0.00827794 0.00769294 0.00802757 0.00909618 0.0119556 0.0140857 0.0100281 0.00829676 0.01237 0.00871181 0.01287 0.0101447 0.00948488 0.00805543 0.00947402 0.00882249 0.00796149 0.0172038 0.0114972 0.0125575 0.00895035 0.0116704 0.0145875 0.00764121 0.0151904 
+0.0271022 0.011318 0.00996974 0.0110196 0.00925289 0.0109691 0.0108103 0.0108613 0.0116696 0.00787904 0.0109383 0.00769676 0.00829481 0.00969134 0.00929602 0.0130977 0.0177721 0.0186529 0.00898793 0.00846386 0.00851917 0.0142691 0.00784852 0.0179733 0.00924451 0.0112334 0.0105803 0.013269 0.01111 0.0107221 0.0134643 0.00755481 0.0162522 0.00919093 0.0112732 0.0108264 0.00968506 0.0159471 0.0106169 0.017662 0.00839274 0.0103854 0.00808056 0.011202 0.0145698 0.0101001 0.0143089 0.0134676 0.0107477 
+0.0230633 0.0114753 0.00784154 0.00929423 0.00889608 0.0116286 0.0150744 0.0123752 0.0122833 0.010329 0.0124179 0.0140873 0.0126277 0.0111987 0.0103069 0.00949489 0.0146096 0.00890901 0.00783397 0.0112901 0.013117 0.00811653 0.00767978 0.0122787 0.00962363 0.0200206 0.011309 0.0105445 0.00752052 0.0122045 0.00849636 0.0113249 0.00950693 0.00775034 0.0129373 0.010784 0.0114393 0.0119846 0.0088356 0.00808703 0.0103027 0.0111316 0.0077251 0.0119095 0.00794289 0.0154611 0.0132691 0.0132304 0.0142578 
+0.0239072 0.00923091 0.0108958 0.0105975 0.0148438 0.0115835 0.0139692 0.00877158 0.00861952 0.016659 0.0123196 0.0199786 0.0134272 0.00917523 0.00795431 0.019383 0.0159615 0.0100525 0.00801319 0.0117417 0.00912759 0.0101336 0.00795618 0.0111197 0.0095205 0.0110377 0.0137116 0.0111116 0.0120717 0.0139439 0.0166161 0.0142446 0.00807274 0.00767242 0.0149445 0.0119278 0.013909 0.00800654 0.0104336 0.0131392 0.00808004 0.013861 0.00981833 0.0118265 0.0126384 0.0096132 0.0134302 0.0162862 0.0119054 
+0.0223962 0.0127152 0.00925128 0.0129104 0.00813518 0.0136416 0.0122769 0.0114984 0.0128947 0.00802646 0.00793451 0.00908589 0.0137168 0.00981374 0.0124787 0.0109228 0.0106603 0.0149199 0.00805851 0.00907345 0.011635 0.0144065 0.0131782 0.00814107 0.00788798 0.0169886 0.0138701 0.0125473 0.0075731 0.0134445 0.0179868 0.0129148 0.0148743 0.00902368 0.0182203 0.00754253 0.0160189 0.0109286 0.00852182 0.0159449 0.00850632 0.00917183 0.0176302 0.0150493 0.0106994 0.0141455 0.011174 0.00956637 0.0159048 
+0.0304368 0.00985075 0.00924785 0.00869394 0.0140252 0.0100286 0.0161972 0.0158769 0.0108712 0.00941128 0.0100974 0.0138416 0.00780139 0.00798227 0.0115011 0.0118319 0.0096153 0.00843833 0.0123238 0.00848155 0.0117371 0.01002 0.00968988 0.00976564 0.0102043 0.00838277 0.0199357 0.0101832 0.00840926 0.00891564 0.00995065 0.0111052 0.0101932 0.0129618 0.0135528 0.0115114 0.00822212 0.0105505 0.0133001 0.0105868 0.0126539 0.0100525 0.00810846 0.00946776 0.0081359 0.008567 0.00906365 0.0202011 0.0115662 
+0.0241134 0.0143089 0.00763809 0.0103048 0.0147389 0.0116759 0.0143365 0.00955372 0.0136892 0.0112737 0.0131253 0.0128577 0.0136494 0.00796086 0.0117908 0.0114703 0.0119028 0.00813583 0.0144375 0.00849329 0.0126432 0.0141733 0.0078145 0.0144172 0.00884723 0.0151062 0.008119 0.00825625 0.0101327 0.00828455 0.00826487 0.0114902 0.0122419 0.00858807 0.0128025 0.0150127 0.01324 0.00885035 0.0110488 0.011903 0.00945571 0.017715 0.00826096 0.00909538 0.0103109 0.0098135 0.00834678 0.016477 0.0128185 
+0.0256172 0.00805043 0.0158787 0.0123214 0.0124122 0.013729 0.0137244 0.00867987 0.0135993 0.00762924 0.00890781 0.0144716 0.00900865 0.00791389 0.0147116 0.0107582 0.0110171 0.012628 0.00907872 0.00827167 0.0120265 0.00826881 0.0151552 0.0113008 0.0116872 0.00987471 0.0148357 0.00969207 0.0142194 0.0111191 0.00852744 0.0129664 0.0123777 0.00889003 0.00941833 0.00866721 0.0112646 0.0138308 0.010994 0.00828151 0.0119669 0.0113105 0.0198909 0.0111647 0.0151214 0.00941323 0.0157938 0.00781977 0.0104112 
+0.0234519 0.0128553 0.00967367 0.00816595 0.0104776 0.00946706 0.00879412 0.0133896 0.0180495 0.0107266 0.0163932 0.00757559 0.0105604 0.0166635 0.0135827 0.0134133 0.0101368 0.00822928 0.00839668 0.0129781 0.0084781 0.0108501 0.00910018 0.0083439 0.00794161 0.00977823 0.0155439 0.0195073 0.0100602 0.0105728 0.0139483 0.0114243 0.0191313 0.00922986 0.00896528 0.0127137 0.0100254 0.00847564 0.00751717 0.0118792 0.0112046 0.0140585 0.0108731 0.0103888 0.00972049 0.0172753 0.00852407 0.00983208 0.0120798 
+0.0244513 0.00916373 0.0129416 0.00941483 0.0161396 0.00909459 0.00879067 0.00823309 0.0186154 0.00810004 0.00980972 0.0139805 0.0110224 0.0152112 0.00918473 0.00759198 0.0121576 0.00774559 0.0131761 0.0162813 0.0100207 0.0136941 0.00990834 0.0128233 0.0139276 0.00916473 0.0158321 0.00826871 0.0150633 0.0148289 0.0160133 0.0119603 0.0115088 0.0119952 0.0185482 0.0159011 0.012829 0.00832603 0.013801 0.00838511 0.0113083 0.0130678 0.00842253 0.00917392 0.00795815 0.0149192 0.0113192 0.00929634 0.0113226 
+0.0270594 0.00807159 0.00991026 0.0105547 0.0148077 0.0128059 0.0122153 0.0143011 0.00932909 0.0151779 0.0127106 0.00992389 0.0183903 0.0156485 0.0157064 0.0164128 0.0116402 0.00943215 0.0105756 0.00813119 0.0112977 0.0131093 0.0140546 0.0183521 0.010916 0.0138169 0.0102092 0.013503 0.00970905 0.00830962 0.0181664 0.00841476 0.0101295 0.0111494 0.00986364 0.0178961 0.0174996 0.0137928 0.0163185 0.0157177 0.017789 0.0110789 0.0117625 0.0113533 0.0087363 0.0103995 0.0153326 0.0196703 0.0113684 
+0.024149 0.0105725 0.00887366 0.00989522 0.0153129 0.0112372 0.0173714 0.0107147 0.0101773 0.0167279 0.0090978 0.0079373 0.0103145 0.0152955 0.012309 0.0210111 0.0080108 0.00998822 0.0157671 0.00933217 0.00820574 0.0187734 0.0177674 0.009834 0.00918752 0.0179541 0.0186268 0.00756802 0.0109617 0.00983035 0.0112832 0.00861512 0.0245526 0.00975396 0.0123681 0.0132452 0.00788526 0.0131673 0.018516 0.0118614 0.0147822 0.0079077 0.0167211 0.0108169 0.00820817 0.0117829 0.00824693 0.0122202 0.0116028 
+0.0207545 0.0125084 0.0108505 0.0104894 0.0128691 0.00994967 0.00832289 0.0130178 0.0150846 0.0160362 0.00750938 0.0108468 0.0160085 0.0127219 0.0144578 0.0124182 0.0105225 0.0101767 0.0107503 0.0144781 0.00883719 0.0102866 0.00984082 0.00817868 0.0163922 0.00778454 0.00821095 0.00961263 0.0127874 0.013317 0.0104098 0.0120479 0.0175316 0.00782725 0.00831699 0.0125904 0.015201 0.0105327 0.00952129 0.0111621 0.00871013 0.0141928 0.00771442 0.00870064 0.00921121 0.00832019 0.0095926 0.0152257 0.00842376 
+0.0313984 0.007982 0.00995331 0.00981045 0.00876915 0.0126173 0.00999689 0.00851341 0.00934417 0.00899455 0.0100468 0.0108085 0.00850033 0.0086989 0.0091643 0.00956867 0.0089853 0.00985235 0.0190657 0.010688 0.00827333 0.0099693 0.0134224 0.0174105 0.0152557 0.00929907 0.0161777 0.0150555 0.00806505 0.00850974 0.0096831 0.0104143 0.0118908 0.00930829 0.0125519 0.011415 0.00957852 0.00792941 0.00892285 0.0101932 0.0121507 0.0139159 0.0122315 0.00979685 0.0107431 0.00871917 0.0141024 0.00864975 0.0106822 
+0.0223997 0.0080833 0.0119063 0.0123641 0.00763316 0.0119364 0.00935 0.0110724 0.0114145 0.0095334 0.0119481 0.009164 0.0193016 0.0196916 0.00848646 0.00863559 0.0168786 0.0128225 0.0113463 0.00856433 0.00803072 0.0111494 0.012586 0.0175893 0.0178392 0.0103538 0.00967134 0.00806565 0.0128146 0.0199558 0.0162451 0.0099261 0.0121075 0.0158896 0.00812237 0.00779004 0.00800657 0.0109748 0.00877823 0.00833482 0.015287 0.00981958 0.012355 0.00902026 0.0118177 0.00912929 0.0143999 0.0107564 0.0103279 
+0.0227656 0.00873123 0.00783844 0.0155652 0.00943663 0.0100323 0.00815993 0.0101672 0.0132211 0.0153565 0.0105628 0.0158833 0.0152073 0.00801318 0.0109822 0.00822322 0.0110543 0.0120444 0.00895155 0.0150476 0.012667 0.00974858 0.0103141 0.00822587 0.00752527 0.0115945 0.00912847 0.0111587 0.00892064 0.0105066 0.0152925 0.00898104 0.0084449 0.0135059 0.0115401 0.00794416 0.00754046 0.00796978 0.0101548 0.00950692 0.0128038 0.0151233 0.00849278 0.00925549 0.00973895 0.0134272 0.0143091 0.00965296 0.00872116 
+0.0220867 0.00952122 0.0142567 0.00857123 0.0112302 0.00998641 0.0104499 0.0134628 0.00992557 0.0138055 0.0172484 0.00923038 0.00773323 0.0141641 0.0107527 0.0104726 0.0129019 0.00930548 0.00855496 0.0115993 0.0166085 0.0102857 0.00924597 0.0110356 0.0116808 0.00765342 0.00769771 0.00903775 0.0141177 0.00807407 0.0120771 0.00976994 0.0188901 0.0127345 0.00857711 0.0182242 0.0117 0.0091719 0.0172657 0.00978154 0.00936656 0.0093512 0.0121914 0.0165407 0.00926855 0.0129943 0.00847156 0.00906975 0.0107144 
+0.0219103 0.017343 0.0196978 0.0104879 0.0105515 0.00999576 0.00928981 0.0149162 0.0090834 0.0113828 0.0109014 0.0153987 0.00770247 0.00823608 0.0128081 0.00914814 0.00751693 0.0107139 0.0125536 0.0102118 0.0116546 0.0123145 0.00896101 0.0118348 0.0104389 0.0133669 0.0111383 0.0153073 0.00830141 0.00823326 0.0108762 0.00976553 0.00819337 0.00757586 0.0104232 0.0150566 0.0112633 0.00918437 0.0088266 0.0131901 0.0148984 0.0114526 0.0173006 0.0126395 0.0134126 0.00977013 0.00799374 0.0209565 0.0158383 
+0.024387 0.0100473 0.0085098 0.0110007 0.0102743 0.0145895 0.0142096 0.00932722 0.0115369 0.00849483 0.00933373 0.00822497 0.0101023 0.0129909 0.0124775 0.0171622 0.00770938 0.0136457 0.00845559 0.00977008 0.00957626 0.00809545 0.0189998 0.00803223 0.00904886 0.00753141 0.00798637 0.0117644 0.00897732 0.00867011 0.0091308 0.0118075 0.00975002 0.0132517 0.0159158 0.0167903 0.0115717 0.00884695 0.016522 0.00820418 0.0075003 0.0130556 0.00946251 0.00784477 0.00826967 0.013128 0.0123578 0.0170744 0.0188824 
+0.0288534 0.0134006 0.0110453 0.0140292 0.015017 0.0128366 0.00788706 0.0178822 0.0127398 0.00967744 0.00767562 0.00849709 0.0133257 0.00838418 0.0171635 0.0132189 0.0105086 0.0139051 0.00804162 0.0101824 0.0154833 0.0156085 0.00862076 0.0130732 0.014416 0.00788632 0.0104821 0.0128714 0.00923762 0.010608 0.0136589 0.0134713 0.0138101 0.00986637 0.00790978 0.0112465 0.0159947 0.00901536 0.0101493 0.0134912 0.00978309 0.015527 0.00969498 0.0126513 0.0111776 0.00755319 0.0184152 0.00789488 0.00930304 
+0.0217148 0.0087382 0.0210872 0.0105126 0.0127658 0.0112209 0.0115129 0.0113154 0.011329 0.0134951 0.00901304 0.00848824 0.0177882 0.0190767 0.0136747 0.010352 0.0152489 0.0109627 0.010929 0.0127208 0.0107025 0.0114791 0.00890777 0.0104932 0.0129995 0.0156713 0.00762406 0.0129343 0.0108742 0.0127172 0.0102928 0.00827948 0.0136815 0.0113224 0.00843217 0.0152795 0.0149782 0.007825 0.0109507 0.0137877 0.0127452 0.0136955 0.00991781 0.00827777 0.0179353 0.00886824 0.0141212 0.0168107 0.0173965 
+0.0219153 0.00879596 0.0139392 0.00853973 0.0096694 0.0162534 0.012773 0.00981645 0.00805063 0.00937836 0.0138217 0.0112746 0.0086497 0.0152205 0.00913559 0.0166785 0.0145056 0.00797746 0.00843824 0.00793002 0.0169129 0.00869163 0.0107405 0.00847737 0.01042 0.00827518 0.0117657 0.00869265 0.0109001 0.0162319 0.0165439 0.0132549 0.0102501 0.00996245 0.0114955 0.0101729 0.0117175 0.0174851 0.0104437 0.00795506 0.0126651 0.0108139 0.0143188 0.00871343 0.012538 0.0223205 0.00845874 0.00775098 0.0148045 
+0.0257533 0.00776964 0.00942699 0.0162227 0.0084621 0.00894997 0.00974841 0.0148166 0.00800937 0.0123893 0.00793666 0.0109363 0.0129667 0.011252 0.0132105 0.018058 0.0100507 0.00875572 0.010091 0.0192821 0.00799599 0.0140755 0.0137605 0.00786973 0.0154267 0.0131201 0.0200065 0.0200133 0.00802706 0.0111778 0.0172287 0.0111324 0.00835332 0.00857922 0.00900446 0.0134356 0.0128486 0.00765603 0.0153792 0.0136076 0.00936792 0.0111302 0.00957391 0.00810223 0.0118117 0.0133464 0.0127174 0.0105692 0.00892939 
+0.024227 0.0122199 0.00901293 0.0131072 0.0129681 0.0157613 0.00831601 0.0111685 0.0130465 0.00889493 0.012909 0.0119596 0.00750153 0.0109797 0.0112245 0.0117992 0.014789 0.0134853 0.0105948 0.00783141 0.0103964 0.0106642 0.00827252 0.0112292 0.00756793 0.0107174 0.0139279 0.0112388 0.00919734 0.00932427 0.00860366 0.0131025 0.0195901 0.00827442 0.0112736 0.00896313 0.0156302 0.0116154 0.00805878 0.014369 0.0132608 0.0105323 0.00876208 0.0118269 0.00961405 0.0105055 0.0126735 0.0082246 0.00777274 
+0.0232398 0.0182848 0.0100607 0.0125212 0.0160683 0.00981616 0.00925346 0.0075162 0.00980768 0.00900646 0.00890371 0.0136854 0.0145326 0.00856758 0.0126471 0.0137377 0.00961645 0.0078712 0.0150319 0.0114971 0.0110445 0.0115416 0.0126913 0.0116137 0.00759611 0.00829835 0.00931262 0.0165359 0.00807646 0.0112452 0.0129125 0.0138121 0.0144461 0.009263 0.00773189 0.0118874 0.0141273 0.0141659 0.00968382 0.0129926 0.0109789 0.0101879 0.0102743 0.0165389 0.0144569 0.00819232 0.0153246 0.00796191 0.00828517 
+0.0259787 0.010485 0.010609 0.00792477 0.0102463 0.0121859 0.0156888 0.0116376 0.0167157 0.0187747 0.00783081 0.0121942 0.00987135 0.0127848 0.00864133 0.00867839 0.0102234 0.0201533 0.00969555 0.00895597 0.00927638 0.0105126 0.0103961 0.0104856 0.0136381 0.00759534 0.0106049 0.0110775 0.00906157 0.0190068 0.00763674 0.00796651 0.00811015 0.00867397 0.00900911 0.00931233 0.00754657 0.00751041 0.00936925 0.0128884 0.0142454 0.0183237 0.00776128 0.00834111 0.0163471 0.0145873 0.0105058 0.0106947 0.0119503 
+0.0206671 0.00769482 0.0108698 0.00797924 0.00989985 0.00925665 0.0116527 0.0134322 0.0113285 0.0132066 0.0110814 0.016983 0.0160277 0.0102325 0.0121382 0.010984 0.00756652 0.0125121 0.0107692 0.00966968 0.00933033 0.0133006 0.0103347 0.00842852 0.008004 0.00865679 0.0131454 0.00917677 0.0194663 0.0112063 0.00876808 0.00773606 0.0201326 0.0100063 0.00910977 0.0113968 0.00985285 0.00829201 0.0115724 0.00918228 0.00913257 0.016697 0.0110031 0.011874 0.0143604 0.0129367 0.0122788 0.0141067 0.0165299 
+0.0235355 0.0115779 0.00907672 0.0159438 0.0174978 0.0170136 0.0141612 0.0139259 0.0126844 0.0096883 0.0111207 0.00927742 0.00780215 0.0112174 0.00753353 0.00820472 0.00798994 0.0111971 0.0129177 0.0120068 0.0165062 0.00856146 0.00820934 0.0128443 0.0101435 0.0123611 0.00848645 0.0159466 0.0117611 0.00778523 0.0108223 0.00792226 0.0146178 0.0167422 0.00888029 0.0084917 0.0103163 0.0108963 0.0131455 0.009296 0.00776834 0.0101146 0.0101946 0.0158065 0.0104041 0.0162822 0.0133552 0.01034 0.00806414 
+0.0223017 0.0102084 0.00847506 0.0122524 0.012433 0.0093421 0.0143454 0.0101233 0.0107128 0.0135012 0.00833962 0.0104653 0.00775288 0.0140836 0.0107977 0.00846482 0.0120958 0.0160775 0.0106818 0.00906504 0.00925368 0.0121868 0.0145626 0.0136249 0.00756399 0.0135822 0.0108359 0.00928053 0.0115864 0.0130171 0.00915042 0.00795784 0.00875577 0.00889915 0.00949089 0.0081355 0.00818921 0.0152822 0.0107207 0.0125019 0.00785935 0.00807379 0.0133005 0.0153737 0.00844602 0.008706 0.00832958 0.0176599 0.0113929 
+0.0222785 0.0115685 0.00882136 0.0115904 0.0101322 0.0149058 0.0100778 0.019607 0.0131377 0.0130118 0.0124715 0.0103685 0.00807073 0.00928566 0.0108924 0.0103663 0.0151538 0.0140399 0.0135388 0.0116879 0.00862708 0.0103798 0.0149153 0.0200176 0.00864548 0.0102865 0.0123699 0.0148262 0.0103001 0.0126099 0.0152885 0.0123521 0.0193287 0.00835033 0.00942435 0.0101484 0.014357 0.0137524 0.0175744 0.00881443 0.0139454 0.00923441 0.0154462 0.00888159 0.0111433 0.0163142 0.00983176 0.010001 0.00882299 
+0.0219002 0.00885172 0.00892213 0.0160531 0.0110302 0.0103801 0.0153188 0.012857 0.0122882 0.0084038 0.00827598 0.00821238 0.0100706 0.00916037 0.0145212 0.00997158 0.00996835 0.00804101 0.00832772 0.0085908 0.0136335 0.0131631 0.0118268 0.0083565 0.0106843 0.0181602 0.0116033 0.0126854 0.0160203 0.00970096 0.00926267 0.00816163 0.00921396 0.0150989 0.0111025 0.00957662 0.0173483 0.0118917 0.00918161 0.014099 0.0153688 0.0134076 0.0103182 0.013726 0.0180079 0.00983955 0.0120475 0.00939958 0.0153206 
+0.023467 0.0105336 0.00939131 0.0113621 0.0130088 0.0153581 0.00841515 0.00865124 0.0135816 0.0162395 0.0203847 0.0127909 0.00829511 0.00881781 0.0163614 0.00820557 0.0124668 0.0123785 0.00910737 0.0111985 0.00886406 0.0112144 0.00894312 0.0152993 0.0124651 0.0104476 0.0100528 0.0118992 0.00857994 0.0135462 0.0162855 0.00787341 0.00978844 0.00891056 0.00849655 0.0124034 0.011003 0.00954397 0.00756585 0.00871334 0.0111837 0.0148349 0.0131524 0.016365 0.00985213 0.0115147 0.00986463 0.00915865 0.00835631 
+0.0218976 0.0139627 0.0120246 0.0114489 0.011564 0.0173305 0.00881312 0.0141614 0.013083 0.00862102 0.00989734 0.0151296 0.0138561 0.00752894 0.0108879 0.0181947 0.00986264 0.0106516 0.00949566 0.0129874 0.00763186 0.0100316 0.0109182 0.0146944 0.0102073 0.00973839 0.0161131 0.0085112 0.015397 0.00965817 0.012005 0.00856396 0.0127106 0.018741 0.0150085 0.0141986 0.0113409 0.00952792 0.00785132 0.00985255 0.00927583 0.0122464 0.00818961 0.00821668 0.0136789 0.0129419 0.0121544 0.0104701 0.0158305 
+0.0206795 0.0209386 0.00751626 0.0160865 0.0101277 0.00850899 0.00752594 0.0095737 0.0106809 0.00867979 0.0113247 0.012077 0.00994838 0.00854107 0.0155612 0.0108833 0.00920965 0.00920464 0.010517 0.00900608 0.0148061 0.00965102 0.0105772 0.008064 0.0109489 0.0120811 0.0133097 0.0132957 0.0141527 0.01249 0.0112634 0.00813683 0.0110562 0.0108294 0.00796001 0.0124461 0.0132342 0.010618 0.0143029 0.00839652 0.0109242 0.00987172 0.00908014 0.0119396 0.01279 0.014592 0.00924242 0.00931531 0.00795973 
+0.0211289 0.0132589 0.0104337 0.00948422 0.0117414 0.0210517 0.0137955 0.0129675 0.0138387 0.00893837 0.0151963 0.0171065 0.00775741 0.0101589 0.0141498 0.0187153 0.00947764 0.00774628 0.0154054 0.00989548 0.0152872 0.009876 0.01436 0.0128548 0.0133433 0.0126873 0.0165809 0.00761219 0.00873266 0.0176218 0.00890786 0.007627 0.00928098 0.0118897 0.0171019 0.0098913 0.013492 0.0121169 0.011258 0.0122963 0.0108707 0.011473 0.0107655 0.0148202 0.00982258 0.00957453 0.0111155 0.0100285 0.00813544 
+0.0249937 0.0132628 0.00989043 0.00808503 0.00865407 0.0113607 0.00909992 0.013825 0.0130143 0.0134441 0.0126256 0.0119627 0.0108432 0.0130967 0.00952968 0.0164724 0.0111507 0.0160875 0.00754196 0.00870893 0.0187831 0.00819476 0.0120628 0.00911874 0.00942726 0.0179704 0.0110552 0.00994693 0.0129673 0.00918318 0.00912005 0.00789547 0.0103583 0.00988415 0.00999427 0.0121406 0.0157666 0.00870823 0.0119195 0.013 0.00982782 0.008185 0.0125848 0.0125986 0.00854327 0.00809625 0.0127336 0.0165125 0.0135715 
+0.0260616 0.0122205 0.011663 0.0103692 0.0140887 0.0106316 0.0132454 0.0104939 0.00926397 0.0120671 0.0103348 0.00877237 0.0225838 0.0107394 0.0126268 0.00957499 0.00950101 0.0147754 0.0101303 0.00940025 0.01413 0.0137589 0.00832324 0.0161982 0.00979315 0.00869996 0.00832121 0.0110396 0.0102091 0.00861289 0.011158 0.00943551 0.00805955 0.0097125 0.013317 0.0124797 0.0093203 0.0134614 0.00954158 0.0125645 0.0152999 0.0152032 0.0124181 0.0149455 0.00942742 0.0148204 0.0132616 0.0178735 0.0104298 
+0.0257762 0.0110571 0.0105898 0.013532 0.0162459 0.0142268 0.0115755 0.0158183 0.0118954 0.0168663 0.0120186 0.0114937 0.00980749 0.0129433 0.00975411 0.0131276 0.010905 0.012303 0.0130469 0.00818084 0.0160419 0.0112622 0.019154 0.010586 0.0114233 0.017696 0.010955 0.0108811 0.00757016 0.00925705 0.0138948 0.0151963 0.0101173 0.01329 0.00948939 0.0121616 0.0129249 0.013639 0.00887597 0.00781413 0.00790878 0.00990957 0.017771 0.00860841 0.0075033 0.0110234 0.0158632 0.0129937 0.0116417 
+0.0240191 0.0165563 0.0120537 0.0111116 0.0117274 0.00853298 0.00787973 0.00964887 0.0130147 0.00973637 0.0131743 0.00921314 0.0122116 0.0117603 0.0154865 0.010949 0.00978931 0.0098883 0.0172717 0.00869153 0.00793727 0.0112716 0.00787567 0.00865546 0.010282 0.0108673 0.0143578 0.0116831 0.0118659 0.00807395 0.00845995 0.0114815 0.00858862 0.00772967 0.00801831 0.0101241 0.0131679 0.00841491 0.0190119 0.0119131 0.00985021 0.00776973 0.0103715 0.0124589 0.0159059 0.00987744 0.0106877 0.00904324 0.0103904 
+0.0336864 0.0131397 0.00997658 0.013466 0.00918262 0.0102103 0.0102057 0.012457 0.00807424 0.0108472 0.00922633 0.0146097 0.0119745 0.011336 0.00792642 0.012814 0.0102497 0.00972321 0.00987833 0.0110496 0.0144665 0.00823416 0.0110999 0.0122386 0.0127554 0.0113965 0.0152782 0.0100133 0.00860568 0.00944747 0.0137201 0.00764315 0.018137 0.0117797 0.00834321 0.0131745 0.00909519 0.0186882 0.0136019 0.00959667 0.0138963 0.0240943 0.00922153 0.00795525 0.0123477 0.00760298 0.00935222 0.0116062 0.0117592 
+0.0211109 0.00823624 0.013151 0.00903996 0.00971909 0.00966642 0.00893984 0.0117034 0.0099061 0.0177389 0.0132994 0.012128 0.0102907 0.00821357 0.0130841 0.0103626 0.00939324 0.0128043 0.0114336 0.0183887 0.0122977 0.0136871 0.00766289 0.00772549 0.0091006 0.00988621 0.0140285 0.0132475 0.0149558 0.0140575 0.00941845 0.0135572 0.0127171 0.0101749 0.0133492 0.0123623 0.0157588 0.0132736 0.00873428 0.0232314 0.0144387 0.0107137 0.0114545 0.00904013 0.0133339 0.0114084 0.00760248 0.0116109 0.0140074 
+0.0225542 0.0177584 0.00820206 0.00960976 0.0100134 0.0109611 0.0153114 0.010387 0.0165983 0.0175927 0.0101028 0.0138415 0.0113752 0.00790347 0.00757291 0.00788049 0.0113011 0.00869304 0.0144222 0.00752977 0.0112919 0.0142053 0.00801023 0.0135512 0.0189808 0.0138823 0.0103193 0.0148885 0.00916007 0.0111217 0.0113887 0.00874071 0.0126873 0.00977365 0.0158879 0.0135432 0.008609 0.0177525 0.00934098 0.00775321 0.0200725 0.0138581 0.00929476 0.0103565 0.0155505 0.0114794 0.0086308 0.00882947 0.0145632 
+0.0250956 0.0119632 0.00759771 0.00978061 0.0117281 0.0104614 0.0106371 0.0119173 0.00801365 0.00928938 0.00869799 0.0130824 0.00826153 0.0113958 0.00902546 0.0158337 0.0127575 0.0108629 0.0108841 0.0122359 0.011006 0.0104098 0.0118249 0.00972292 0.00757526 0.00852472 0.0108496 0.0122731 0.00866414 0.0102172 0.0118904 0.00838573 0.00840182 0.0145641 0.0107593 0.0102488 0.00931913 0.00907936 0.00976757 0.0106548 0.00773216 0.00831631 0.0087845 0.00974962 0.0102509 0.0077432 0.011432 0.00869996 0.00920229 
+0.0235478 0.010933 0.0126415 0.0130523 0.00784461 0.00802316 0.00839757 0.0145396 0.00936429 0.0104733 0.0104434 0.0117082 0.0108046 0.0143165 0.00999919 0.0131738 0.00875868 0.00904078 0.0159884 0.0159185 0.0100566 0.0169482 0.011197 0.00924774 0.0083769 0.00853495 0.00776002 0.0107559 0.0076253 0.0110167 0.0175894 0.00870061 0.0108202 0.015549 0.00798324 0.018877 0.0218267 0.0131679 0.0118787 0.0100219 0.00780855 0.00804774 0.00867327 0.0110219 0.0084428 0.00776008 0.00766939 0.0120955 0.00801992 
+0.021955 0.00940322 0.0154049 0.0106521 0.00834238 0.0105714 0.0137723 0.0160339 0.0106749 0.0106381 0.0129054 0.0109451 0.0141554 0.0139853 0.00848258 0.0147618 0.00882912 0.0115394 0.0144487 0.0175726 0.00958771 0.0140424 0.01117 0.00765008 0.0151508 0.0153911 0.00774375 0.0158878 0.00870456 0.00851632 0.00798942 0.0155605 0.00763791 0.0151582 0.00907071 0.00860455 0.00927132 0.00840893 0.00814229 0.0173009 0.00972256 0.0114171 0.00879717 0.00768555 0.0100898 0.0108291 0.0117871 0.00967495 0.00888629 
+0.0257467 0.00900563 0.0171562 0.00903157 0.00825902 0.009528 0.0084389 0.00793054 0.0111465 0.0118638 0.00973942 0.0148717 0.0139167 0.00888057 0.010006 0.00958233 0.0114284 0.01154 0.00976147 0.0105533 0.0123792 0.0127316 0.0113913 0.00938501 0.0108933 0.0132494 0.0100134 0.00793871 0.0119074 0.0138443 0.00850183 0.0106248 0.00916611 0.0142109 0.0174247 0.00920319 0.00830646 0.00963313 0.00905203 0.0171472 0.0109715 0.0116792 0.00925263 0.0113661 0.00752427 0.00986478 0.0174423 0.00845846 0.00833033 
+0.0224793 0.0180033 0.00849516 0.00887345 0.0127341 0.0113676 0.00913553 0.0121502 0.00777487 0.00912217 0.00850362 0.0154226 0.0155775 0.00892688 0.0114641 0.00942511 0.0156517 0.015633 0.0160293 0.00924149 0.0129434 0.0100237 0.011184 0.0153932 0.0100437 0.0124477 0.0126745 0.0119709 0.0140577 0.0135944 0.0188757 0.0106933 0.010631 0.0137078 0.0129499 0.0105144 0.0119992 0.0131591 0.0118469 0.0149973 0.0176692 0.0125257 0.00758253 0.0138059 0.0134798 0.0110038 0.0132354 0.0107177 0.0134322 
+0.0210345 0.0102932 0.0132349 0.00963761 0.00906311 0.00865032 0.0109847 0.00928407 0.0077928 0.0135659 0.0130834 0.00843539 0.0185632 0.0078 0.0152633 0.00880949 0.0132717 0.0168182 0.0103695 0.0089427 0.0106294 0.0107249 0.0081818 0.0196271 0.00827723 0.0136525 0.00876158 0.00939363 0.0133215 0.0150743 0.0109293 0.011322 0.0127186 0.00856855 0.0225302 0.0100105 0.00911908 0.0105362 0.0105037 0.00855259 0.0135456 0.00982338 0.0107018 0.00759774 0.0152149 0.00881609 0.0143926 0.00817332 0.00882965 
+0.0222467 0.00938584 0.0102153 0.00996461 0.0157442 0.00896774 0.0112177 0.0102015 0.011268 0.0148865 0.0130086 0.00867837 0.0146227 0.00816429 0.00796441 0.00974237 0.00811556 0.00877084 0.00792424 0.00825762 0.00984428 0.00905027 0.0113013 0.0116487 0.0117235 0.01317 0.015996 0.0129839 0.0081974 0.00819008 0.00809835 0.00812309 0.0135022 0.0105967 0.0129249 0.00907971 0.011666 0.0102498 0.0102823 0.0127665 0.0177145 0.00864585 0.0109531 0.00915439 0.0163722 0.0169408 0.00839886 0.00808527 0.0132475 
+0.0351424 0.0123037 0.00996178 0.0133582 0.0122959 0.00961472 0.0170842 0.0097461 0.00928027 0.0188664 0.0103514 0.0090499 0.0162294 0.0104919 0.0103414 0.0151058 0.0105229 0.00968669 0.0112248 0.00925596 0.0118245 0.0125294 0.00951293 0.0091115 0.0100605 0.0151374 0.0113915 0.0181758 0.0174017 0.010752 0.013117 0.0109705 0.0178813 0.00835953 0.0152218 0.0105508 0.0112765 0.0148581 0.0117385 0.0142863 0.0142671 0.0102961 0.00755827 0.00936552 0.0149442 0.00892664 0.010704 0.00807923 0.0103055 
+0.0129276 0.0233393 0.0123679 0.00811758 0.0125915 0.0113035 0.0140784 0.00822778 0.011812 0.0077716 0.00817863 0.0158648 0.0134115 0.0114055 0.00879775 0.0143018 0.0127297 0.00820451 0.0140901 0.0148652 0.0105412 0.00750298 0.00978503 0.00777211 0.0106125 0.0137089 0.0100961 0.0106955 0.015458 0.0135725 0.00926069 0.00887185 0.00930858 0.00948317 0.0118273 0.0104596 0.0148373 0.0122002 0.0121313 0.00896792 0.00780052 0.0142384 0.0162418 0.0120706 0.0108821 0.00821499 0.00899263 0.0152771 0.00929117 
+0.00852234 0.0223121 0.00963208 0.0101909 0.0154755 0.0132661 0.00775918 0.00946033 0.00824469 0.0076171 0.0168582 0.0149323 0.012522 0.0119527 0.0135063 0.0140917 0.0138473 0.0136181 0.00928085 0.0120208 0.00863997 0.0101098 0.00867349 0.0127716 0.00905366 0.0104012 0.018715 0.0120801 0.0123961 0.00815842 0.0104416 0.00964878 0.0117088 0.00801172 0.0142401 0.0154078 0.0139241 0.0141611 0.00867801 0.0141621 0.0188441 0.00857864 0.00867845 0.0108114 0.00963661 0.0177781 0.0158615 0.00982639 0.0121654 
+0.0121402 0.0283287 0.0123046 0.012044 0.0151337 0.0107913 0.0132634 0.0144782 0.00976324 0.0160855 0.011114 0.0173437 0.0184064 0.00914569 0.00893225 0.00906138 0.0172909 0.0156876 0.0155012 0.0121186 0.00848431 0.0100921 0.00914746 0.0138674 0.00830907 0.00889417 0.00908697 0.0131617 0.00816596 0.00967272 0.0136794 0.0079061 0.0101782 0.0077368 0.0116617 0.00841962 0.0100697 0.0114808 0.0134463 0.0173815 0.00865972 0.0153556 0.0134127 0.00793805 0.0107085 0.00977577 0.0196041 0.0113622 0.0141046 
+0.0107332 0.0272767 0.00973959 0.0120877 0.0212239 0.0123182 0.0106044 0.0117571 0.00806122 0.0081122 0.0101717 0.0117068 0.00929421 0.012269 0.00839687 0.016317 0.00808212 0.0116174 0.010069 0.00762633 0.0118785 0.0170918 0.00906191 0.0167674 0.00790757 0.0109736 0.0122299 0.00811963 0.00960001 0.00925493 0.0123415 0.01325 0.00753833 0.0123176 0.0145587 0.0118021 0.00766919 0.0124873 0.00973067 0.00847831 0.012676 0.0123287 0.0097535 0.0106664 0.0107547 0.0138246 0.0195624 0.0100525 0.00962325 
+0.0119281 0.0251323 0.0113759 0.0101683 0.0130994 0.00879826 0.00823088 0.0114712 0.0161398 0.0100226 0.0100097 0.0125042 0.0138629 0.0117501 0.00756436 0.0144745 0.0136194 0.0124086 0.0114991 0.0143748 0.0181917 0.0136612 0.00962511 0.0103765 0.0101686 0.0113172 0.00870255 0.019323 0.00995193 0.0118664 0.00851607 0.00946212 0.00907452 0.00818943 0.0131628 0.00996086 0.0141944 0.0150947 0.0111328 0.0130214 0.00983337 0.0136919 0.0113602 0.00871084 0.0157483 0.010904 0.0236183 0.0143695 0.011746 
+0.016832 0.025437 0.0098946 0.0150509 0.00825244 0.0089719 0.012338 0.0111107 0.00807729 0.0130397 0.0134324 0.0135269 0.0137549 0.00965155 0.0143415 0.0147855 0.00917189 0.00983137 0.0147469 0.00907731 0.0109262 0.0137849 0.00978295 0.0082266 0.00806215 0.00863035 0.00929611 0.0115255 0.0100205 0.0107482 0.0104747 0.0106799 0.0162054 0.00826127 0.0137276 0.00833625 0.0130827 0.00891462 0.010127 0.0143162 0.00873058 0.0115233 0.013493 0.0151634 0.00956458 0.00948767 0.0135435 0.0170953 0.00955095 
+0.008911 0.0235109 0.00912204 0.0087342 0.00759654 0.00953132 0.019344 0.0164507 0.00760068 0.0114743 0.010967 0.0113929 0.00926275 0.00968558 0.0181604 0.0111836 0.010156 0.0166397 0.00962995 0.00851972 0.012249 0.00780138 0.0104243 0.0111163 0.0123718 0.00754142 0.00995157 0.00903349 0.0116911 0.0102157 0.00941913 0.0126701 0.0079727 0.0127494 0.016391 0.00818587 0.0112246 0.0139862 0.0083602 0.00781615 0.0123179 0.0201769 0.0151085 0.0138697 0.0108868 0.0123237 0.00820885 0.00938777 0.00885651 
+0.0149063 0.0212549 0.012959 0.020298 0.00900085 0.0138103 0.00818875 0.0148981 0.0153341 0.00844291 0.00993773 0.0079899 0.0112637 0.0084996 0.00997534 0.0199401 0.0107206 0.0139009 0.00872373 0.00833983 0.0142801 0.0138285 0.0164602 0.0131497 0.00819378 0.00781263 0.0113703 0.00913357 0.0109295 0.0120916 0.0112749 0.0122972 0.0135997 0.0127951 0.0207119 0.0090782 0.0173306 0.0127178 0.0111936 0.00840975 0.0121481 0.0086159 0.00999495 0.00944532 0.0110877 0.0114007 0.00954282 0.00847484 0.0157283 
+0.0136951 0.0223832 0.0124686 0.00785013 0.00987724 0.0116808 0.00820833 0.00857517 0.0110849 0.0106047 0.00791872 0.0129586 0.0109218 0.0113774 0.0118702 0.0124808 0.0104141 0.0133277 0.0157505 0.0120121 0.00832354 0.0150398 0.0145849 0.0106571 0.00781995 0.00948235 0.00791023 0.0130278 0.0177706 0.00934947 0.00917786 0.00976387 0.011106 0.00872163 0.0125572 0.00872901 0.00833814 0.0119969 0.0114674 0.00899175 0.00761545 0.0152811 0.0113137 0.0112011 0.0155478 0.0090728 0.00907604 0.00936913 0.00997295 
+0.00950312 0.0266506 0.00981835 0.0127662 0.0137664 0.00990442 0.00792798 0.0123088 0.00768171 0.0101829 0.0113718 0.0184991 0.0102974 0.0118027 0.00956921 0.0102662 0.0093895 0.0101815 0.00963158 0.00755334 0.0108035 0.0190889 0.0148255 0.0130755 0.0107963 0.0128566 0.018184 0.00967175 0.0103734 0.0109859 0.0101677 0.0172851 0.0137611 0.00897412 0.0156099 0.00973854 0.00879859 0.0107113 0.0115928 0.0120723 0.00812917 0.0140335 0.0103966 0.00821714 0.00988885 0.00879647 0.00862479 0.0178016 0.0108844 
+0.00931047 0.0212717 0.0108262 0.0127634 0.0145661 0.00913274 0.00856461 0.00937716 0.0198238 0.00896952 0.0135313 0.00771199 0.0120329 0.0100075 0.0106133 0.00874778 0.0167496 0.00823572 0.00935717 0.00824935 0.0168475 0.00921845 0.00827007 0.0123167 0.00973159 0.00828868 0.00947891 0.0157224 0.0131671 0.00849454 0.0186665 0.00985705 0.00901789 0.0103838 0.00960163 0.014531 0.00818125 0.0126618 0.0158882 0.0115781 0.00928472 0.010543 0.0105983 0.0163608 0.00812475 0.00812854 0.0150405 0.00956537 0.0112847 
+0.0133507 0.0222575 0.0161251 0.0119533 0.0144081 0.0108558 0.0114905 0.0142285 0.0144361 0.00921056 0.00770848 0.00862711 0.0103536 0.00787416 0.00814622 0.00792632 0.0127709 0.0129858 0.0107349 0.0148318 0.00797624 0.00755296 0.00969787 0.0101482 0.00783036 0.017277 0.0134317 0.00821771 0.00886024 0.0104904 0.0176322 0.0121712 0.020734 0.0100178 0.00845033 0.0106503 0.00899612 0.0113405 0.0108297 0.0138385 0.0157567 0.0112884 0.0111557 0.0132281 0.00975073 0.014763 0.0136806 0.0146974 0.0131973 
+0.00757317 0.0316546 0.00916651 0.0139182 0.0100605 0.0148793 0.0132188 0.0123282 0.013338 0.0145837 0.00844133 0.0127421 0.013383 0.00855457 0.0122084 0.0159186 0.00929636 0.00840363 0.011132 0.00982903 0.0123923 0.0103834 0.00992641 0.0109025 0.0101771 0.0114342 0.0235065 0.00930357 0.0146577 0.0136719 0.0112625 0.0153417 0.0109942 0.0109348 0.0136389 0.0123666 0.00907794 0.00952423 0.0104219 0.0224332 0.00993761 0.0173772 0.012886 0.0150848 0.011544 0.00961976 0.00860173 0.0131099 0.0118314 
+0.0120723 0.0255761 0.0112617 0.01382 0.0132065 0.0130526 0.0215032 0.010341 0.00897479 0.0114267 0.0101744 0.0139777 0.0102806 0.010422 0.00785908 0.0123871 0.00796796 0.0106122 0.00882741 0.0149923 0.0127838 0.00863863 0.0122057 0.0079143 0.0122751 0.0116304 0.00776789 0.010707 0.0120565 0.00975561 0.00916567 0.0170236 0.00930545 0.00808708 0.0121516 0.00843588 0.0096937 0.00829772 0.0113528 0.0201463 0.0133134 0.0139057 0.00758521 0.00771479 0.0170862 0.012938 0.0088074 0.0134293 0.0132683 
+0.00803278 0.0209172 0.00757131 0.0113841 0.01333 0.010207 0.00833083 0.0115314 0.00901439 0.0172958 0.00783639 0.0117991 0.00905356 0.0187385 0.0125364 0.0124897 0.00777334 0.00762111 0.00764487 0.0100852 0.0112004 0.00766297 0.011141 0.011336 0.0116206 0.0153244 0.0110559 0.0111093 0.0191496 0.00825756 0.00835087 0.00920095 0.0086951 0.00975748 0.00775385 0.00784449 0.0105598 0.0146987 0.0149052 0.0099039 0.00847096 0.00765091 0.00821896 0.0133852 0.0151837 0.00769759 0.0084672 0.00772901 0.0108049 
+0.0109955 0.0249024 0.0101091 0.00983563 0.00974612 0.0156559 0.0156009 0.0117273 0.00984906 0.00939121 0.0091798 0.0079146 0.00850079 0.0120497 0.0192706 0.00796791 0.0100349 0.00803927 0.00944602 0.0123078 0.0188128 0.0121223 0.0138691 0.0171691 0.0190786 0.00755588 0.0131079 0.0122201 0.0131311 0.00985387 0.0129397 0.00832782 0.00770978 0.0146897 0.0085377 0.0093607 0.0136283 0.00861774 0.0119357 0.00754826 0.00933493 0.0165091 0.0150146 0.0129198 0.00897361 0.0110699 0.010965 0.00913158 0.0094458 
+0.00816776 0.0255658 0.0108657 0.016728 0.0186649 0.00843576 0.00754841 0.0118891 0.0167045 0.00990853 0.00915239 0.0145487 0.0115878 0.0171883 0.0120623 0.0104243 0.0156599 0.0133134 0.00761177 0.0082783 0.00844763 0.0192286 0.0158587 0.0111308 0.0165118 0.008163 0.0112963 0.0105155 0.00872855 0.0145013 0.00883721 0.00903406 0.0113706 0.013245 0.0101755 0.0113627 0.00977604 0.00805712 0.0135069 0.0075737 0.0138943 0.0169186 0.0117394 0.0116851 0.0101898 0.00902569 0.00750745 0.0104441 0.0180939 
+0.00852116 0.0205726 0.0121222 0.0149015 0.0154244 0.0121064 0.0109069 0.0118412 0.0111598 0.00890083 0.00910357 0.00800454 0.00882927 0.0133648 0.0176747 0.00950788 0.00813835 0.00806194 0.0166886 0.0144441 0.0104103 0.0146281 0.0100274 0.0149547 0.00857742 0.009651 0.00830572 0.0122816 0.0108633 0.0082782 0.0141194 0.0094251 0.0115744 0.00767372 0.0124238 0.00836088 0.0103497 0.0103135 0.00950646 0.0139942 0.00843365 0.00868824 0.00761651 0.013486 0.0103854 0.01307 0.0100172 0.00912684 0.00886648 
+0.0129786 0.0289352 0.00844202 0.0107492 0.011887 0.00897751 0.0107938 0.00837794 0.00976195 0.0118045 0.0125038 0.00778407 0.0152679 0.00939623 0.0104466 0.0149055 0.0114223 0.0165696 0.0162068 0.0150471 0.00937458 0.0120342 0.00796215 0.00998259 0.0107024 0.0128497 0.00851376 0.0148639 0.00797989 0.0115649 0.0105127 0.0177058 0.00805555 0.0109158 0.0128019 0.0157836 0.0116487 0.0133724 0.0135865 0.0131214 0.0112112 0.0115187 0.0121206 0.0104741 0.015845 0.00882153 0.0111644 0.0139008 0.0116919 
+0.00971783 0.0229114 0.0128805 0.0158851 0.0152331 0.0124343 0.00779956 0.0124177 0.0105913 0.0126525 0.0141405 0.0128456 0.0110557 0.0107332 0.00822904 0.0100937 0.00954246 0.0124383 0.0128021 0.0089498 0.0117146 0.0163783 0.0152088 0.0155538 0.0101817 0.0108035 0.00916592 0.00766908 0.0132058 0.0115911 0.00851837 0.0150466 0.0152156 0.0208577 0.00839355 0.0097649 0.00816572 0.0139933 0.0124919 0.0141569 0.0139173 0.00758375 0.0133833 0.0100039 0.0092417 0.0150639 0.011603 0.016477 0.00799832 
+0.00790319 0.0235324 0.0111015 0.00967112 0.0174059 0.0109606 0.00799718 0.0125871 0.00852947 0.00817606 0.00753688 0.00934729 0.00901239 0.0083186 0.0115995 0.0111184 0.0128168 0.0170916 0.0140882 0.00984907 0.0203378 0.00933781 0.00873858 0.0169729 0.0124777 0.0121524 0.0134012 0.00895315 0.0130562 0.0123688 0.020535 0.007508 0.0156554 0.0117507 0.0156376 0.0104359 0.00988385 0.0104138 0.0108033 0.0123854 0.0102539 0.00805637 0.00824154 0.0169302 0.00815921 0.0112258 0.0111159 0.00820837 0.0173984 
+0.00838806 0.0253994 0.0164964 0.0153526 0.00792645 0.0142281 0.00823136 0.00793983 0.00896964 0.0125574 0.0084571 0.0114111 0.0103704 0.0115691 0.0117722 0.0105804 0.0136371 0.0147888 0.00789175 0.0116784 0.0118744 0.0103136 0.00823018 0.0119564 0.00910947 0.00955901 0.0107229 0.00882304 0.00809067 0.00863467 0.0158661 0.0128813 0.011772 0.00854266 0.013148 0.00895431 0.0164438 0.0112154 0.0157801 0.00851084 0.0146646 0.0104425 0.0123866 0.0101705 0.00863098 0.0105372 0.0190253 0.0105951 0.0120388 
+0.0126034 0.0248546 0.0105986 0.015903 0.0083998 0.0117779 0.0090602 0.0127347 0.01536 0.0162639 0.0141152 0.0124823 0.0123341 0.0163151 0.0103473 0.00849348 0.00778947 0.0170963 0.00797977 0.0108984 0.0117723 0.0113179 0.00856268 0.00920236 0.0170649 0.00845961 0.0111051 0.0125911 0.0125433 0.0120349 0.0164834 0.0151292 0.0137128 0.0111898 0.00755945 0.0119485 0.0173044 0.0139533 0.00882071 0.00964128 0.00760202 0.011044 0.00769635 0.0111013 0.00780582 0.0110244 0.010414 0.00832692 0.0165455 
+0.00973368 0.0213454 0.00933092 0.00870622 0.00874845 0.0126756 0.00966286 0.0126159 0.0104429 0.0100714 0.00799636 0.0120667 0.0149694 0.00789823 0.00942509 0.0126152 0.0108931 0.0138853 0.0129878 0.0120057 0.014164 0.00845237 0.0188999 0.0101422 0.00995276 0.00941929 0.010031 0.00774711 0.0128427 0.0128063 0.0120045 0.0134737 0.00866746 0.00988377 0.01029 0.0146154 0.0109321 0.0106991 0.0123186 0.00965139 0.0144665 0.0137031 0.0128053 0.0171564 0.0106439 0.00871359 0.00933303 0.00854897 0.00801469 
+0.0207639 0.0302596 0.00921102 0.0112939 0.00895949 0.0195714 0.0082511 0.00851709 0.0201907 0.0104338 0.00789831 0.00779601 0.0116592 0.0115129 0.00933967 0.0106008 0.00993621 0.00977606 0.00969845 0.00859994 0.010113 0.0101853 0.00768342 0.00829759 0.00896101 0.00914328 0.0121011 0.00789938 0.0114143 0.00778448 0.00930274 0.0110046 0.0151578 0.0115392 0.0119695 0.00916508 0.0113961 0.00925488 0.0119853 0.0115414 0.00800942 0.0085351 0.013696 0.00978607 0.0117781 0.00836317 0.0115831 0.00784782 0.00988281 
+0.0125919 0.0225245 0.00872785 0.0079511 0.010217 0.0138643 0.0128534 0.0106925 0.0134129 0.0123817 0.0148298 0.0159778 0.0112608 0.0126049 0.00781257 0.00897772 0.00770906 0.0101846 0.00861081 0.0173156 0.0113365 0.0206655 0.00952747 0.00824274 0.0130284 0.00934343 0.0162803 0.0106074 0.0105148 0.0114925 0.00898429 0.0081491 0.00924381 0.00865159 0.0085172 0.0127236 0.0155168 0.0151714 0.0129874 0.00857607 0.00787606 0.0125513 0.0118531 0.0168491 0.00761251 0.00816883 0.0097344 0.0145175 0.00858486 
+0.0116113 0.0273119 0.0139355 0.0114967 0.0115408 0.0161081 0.0152266 0.00898215 0.0111644 0.00940009 0.00859884 0.0155261 0.0139528 0.0109696 0.0111752 0.00762303 0.0168868 0.00788242 0.0123671 0.0113137 0.0150454 0.00905556 0.012208 0.0104372 0.0109824 0.00886656 0.00773069 0.0139625 0.0166492 0.0121905 0.0106001 0.00935319 0.0106352 0.0126785 0.0141775 0.00966866 0.00980012 0.0101705 0.0105319 0.0129313 0.0111178 0.0124385 0.0111511 0.010093 0.00932455 0.0104925 0.00981385 0.0129528 0.00940077 
+0.0102708 0.025453 0.0161008 0.0115701 0.00894551 0.0206021 0.0126973 0.0127642 0.0127999 0.0137975 0.00949311 0.00851046 0.00922536 0.0171908 0.0110097 0.0131312 0.0114842 0.00888155 0.00923136 0.0114026 0.0123358 0.0091245 0.00871509 0.0131133 0.00930696 0.01481 0.00886211 0.0118571 0.0133691 0.00892296 0.0153625 0.0128242 0.0164881 0.0186806 0.00840839 0.0115504 0.0157651 0.0100688 0.0193315 0.0142515 0.0177245 0.00852747 0.0148372 0.00774316 0.0124167 0.0227457 0.00919254 0.0167126 0.0101306 
+0.00949793 0.022419 0.0114635 0.00787325 0.0151513 0.00994148 0.011955 0.0144922 0.0139474 0.00849281 0.0123349 0.0154528 0.0175107 0.0146296 0.00787882 0.01191 0.0135602 0.0111431 0.00991613 0.00928491 0.00862271 0.014956 0.0114031 0.0141196 0.00886118 0.00936798 0.0124562 0.00866346 0.0109636 0.00942369 0.0158231 0.0107395 0.0148939 0.00867654 0.00843151 0.00843801 0.00987098 0.0136324 0.0125352 0.0113096 0.0100173 0.00963262 0.0106216 0.0133212 0.00942482 0.008157 0.0149036 0.00775486 0.0110296 
+0.0112069 0.0272446 0.00854308 0.0178902 0.011465 0.00939404 0.017011 0.00996359 0.0145181 0.0150909 0.0097186 0.00829955 0.0109255 0.0141977 0.0144254 0.00969187 0.0159607 0.0150539 0.0202983 0.00827695 0.00834562 0.0174092 0.0174481 0.00861657 0.0168125 0.0195186 0.00948454 0.0140216 0.0160348 0.014776 0.00958188 0.0156618 0.0095608 0.0120682 0.00889043 0.0132259 0.0133092 0.00934697 0.0138392 0.0139268 0.0097421 0.017502 0.0113001 0.0105501 0.0128437 0.0114893 0.0129684 0.0103808 0.010232 
+0.0111391 0.0237827 0.0145166 0.0132462 0.0184881 0.0136112 0.00767246 0.01169 0.0131049 0.0162347 0.0135606 0.0165905 0.0164936 0.014142 0.014366 0.00985739 0.0147803 0.00989201 0.00982342 0.0089444 0.0119379 0.00867752 0.00979992 0.0116547 0.0134995 0.010695 0.0128166 0.013538 0.0174419 0.0100733 0.0092548 0.012256 0.0110311 0.00905062 0.00870131 0.0110167 0.00758026 0.00787282 0.00750765 0.0129866 0.00770725 0.0115635 0.0110494 0.0100412 0.00816833 0.0167224 0.00758952 0.0105225 0.0102737 
+0.00911587 0.0233013 0.0083151 0.0173036 0.00955425 0.015144 0.0181004 0.00954625 0.0116367 0.00798605 0.00866689 0.00782728 0.0114024 0.00900233 0.0160443 0.0112031 0.0139449 0.0156776 0.00907191 0.00975704 0.0111857 0.0083113 0.0136895 0.00835854 0.0130534 0.0109855 0.00986861 0.0101119 0.0130915 0.019609 0.00949442 0.0103352 0.0137966 0.0144428 0.00892645 0.013021 0.00753629 0.00814756 0.0114642 0.0114017 0.0151005 0.0080772 0.010644 0.0096878 0.00945567 0.00820002 0.0107892 0.0122035 0.0146939 
+0.00948907 0.021767 0.00802098 0.00817442 0.0149274 0.0139743 0.00988414 0.0159532 0.0154766 0.0123433 0.013743 0.0141259 0.0112577 0.0153047 0.00914512 0.0102263 0.00841865 0.010485 0.0125646 0.0149482 0.0106739 0.0116981 0.0103237 0.0148731 0.00752372 0.0131573 0.00956262 0.0100887 0.00842314 0.0131717 0.0100196 0.00845348 0.0108719 0.0134481 0.00998926 0.0116886 0.00990498 0.0151376 0.0132805 0.0138879 0.0133914 0.00917293 0.00870911 0.0135626 0.00899951 0.00796089 0.00878003 0.0101218 0.0130878 
+0.00879304 0.026332 0.0115077 0.0102817 0.0102767 0.0194288 0.00758405 0.0123007 0.00910844 0.0144404 0.0103388 0.00984202 0.0104896 0.00955573 0.0140721 0.00806365 0.00959985 0.0147091 0.00897474 0.00800116 0.0110669 0.0132495 0.0110214 0.0207907 0.00835903 0.00918193 0.0146073 0.00812745 0.0118054 0.0112085 0.00760006 0.00766575 0.00827451 0.0131362 0.0170556 0.00837341 0.0179868 0.0092481 0.0102636 0.00962309 0.0152318 0.0102423 0.0152355 0.00750615 0.00829509 0.00889198 0.0081398 0.00808986 0.014526 
+0.0153858 0.0243443 0.0108733 0.00761468 0.00765843 0.0135658 0.0102784 0.00755595 0.00932887 0.0112079 0.0136589 0.02018 0.00867432 0.0102834 0.00880161 0.0146448 0.00882262 0.00901113 0.00898133 0.0114991 0.0116231 0.008646 0.0125625 0.0115526 0.00796062 0.012466 0.00944426 0.0104905 0.0106008 0.00849779 0.0144311 0.0121217 0.00969907 0.0114571 0.0108629 0.0142184 0.0102308 0.00876892 0.0154134 0.0109281 0.00946842 0.0131872 0.017346 0.0128602 0.0123659 0.00997315 0.00909212 0.0164329 0.0179937 
+0.0097369 0.0206338 0.0117702 0.0130346 0.0137561 0.0110761 0.0138221 0.0140112 0.00990652 0.00863303 0.0125001 0.00816226 0.00878419 0.00831256 0.0143745 0.007553 0.00898112 0.00903825 0.00786233 0.00799705 0.0111157 0.00762704 0.0161722 0.0115543 0.00996799 0.0121211 0.0143124 0.0133965 0.00932923 0.0100121 0.0162585 0.00809587 0.0110415 0.00755567 0.0113767 0.0149726 0.0165917 0.00937406 0.0232884 0.0101733 0.00872945 0.0133672 0.0103846 0.0186145 0.0145743 0.00837608 0.00988489 0.0123534 0.0146489 
+0.0101712 0.0204331 0.00790912 0.00809119 0.00777641 0.0122152 0.00949551 0.0132367 0.00839406 0.00817484 0.0133188 0.00973259 0.00891589 0.00838892 0.00983956 0.0112519 0.0101722 0.0121647 0.0125072 0.0110025 0.0104913 0.0109601 0.0106955 0.00833219 0.0148282 0.0119938 0.0132905 0.00949414 0.0133034 0.00764992 0.0133496 0.0129774 0.012058 0.0133022 0.011975 0.0130977 0.0095401 0.010568 0.0106268 0.00852929 0.00910748 0.00767768 0.0137776 0.00760561 0.0190109 0.0110245 0.0128955 0.00939386 0.0113458 
+0.00827374 0.0251757 0.0121353 0.00850211 0.00797146 0.0169248 0.0157808 0.0108443 0.00988492 0.0153356 0.0110898 0.010639 0.0187146 0.0110922 0.00757705 0.0155794 0.00939305 0.00778932 0.015371 0.0122381 0.0101932 0.0103607 0.0136196 0.0128625 0.0137172 0.010893 0.0114684 0.0117487 0.0138237 0.00908936 0.0124191 0.00794224 0.012688 0.0197028 0.0127473 0.00949431 0.00853454 0.0115539 0.0169602 0.0121847 0.00826065 0.0144138 0.00930039 0.00873084 0.0136588 0.00828384 0.0114327 0.00953625 0.0113293 
+0.0113127 0.0210537 0.0143738 0.0137528 0.00951468 0.0102551 0.00902772 0.0128791 0.0110488 0.0191901 0.0145764 0.013239 0.00838651 0.0144335 0.0084625 0.0125919 0.00979329 0.00937839 0.0105994 0.0124972 0.00838924 0.0126485 0.00853518 0.00761188 0.0188739 0.0107682 0.00819324 0.0117259 0.0158024 0.0118522 0.0108466 0.00811003 0.0106455 0.0116571 0.00797254 0.0123537 0.0168134 0.0115286 0.0127207 0.0167774 0.00837524 0.00882726 0.00842221 0.00756125 0.0122617 0.0088207 0.0097199 0.00908593 0.0083942 
+0.011705 0.0207155 0.0152487 0.0075282 0.0152447 0.0101465 0.00843718 0.0118198 0.014138 0.0113742 0.00841356 0.00994094 0.00884302 0.0094945 0.00967041 0.0155148 0.00886344 0.00783294 0.0133801 0.00961572 0.00752072 0.0113831 0.00910355 0.0195317 0.00937186 0.0121532 0.0140828 0.0118588 0.00977985 0.00851993 0.0161894 0.012683 0.0130459 0.0100294 0.00818087 0.00970356 0.0171704 0.0123165 0.00926904 0.00861139 0.00852465 0.0163877 0.0111038 0.0140121 0.0102067 0.0101279 0.0084616 0.00768695 0.0082938 
+0.0109417 0.0278678 0.0115426 0.00929491 0.00946448 0.00760588 0.00768034 0.0141711 0.00756956 0.0106113 0.0120755 0.0129529 0.00958833 0.0121574 0.00889081 0.0133243 0.00849482 0.0103025 0.00886195 0.0115098 0.0123605 0.009946 0.00855143 0.00836292 0.0109544 0.00941135 0.0132882 0.00839464 0.00937392 0.0105541 0.0130762 0.0129366 0.0127021 0.0143427 0.00868624 0.0251505 0.0195877 0.00831681 0.0180369 0.0100545 0.00755814 0.0123776 0.0141274 0.00763343 0.00753381 0.0226265 0.0129672 0.00837964 0.0146988 
+0.00983075 0.022107 0.0173331 0.0129429 0.0176057 0.0134848 0.010576 0.0139869 0.0105101 0.0130117 0.0140576 0.0105017 0.0102882 0.0105781 0.00897215 0.00867277 0.0110143 0.016271 0.0121182 0.0116707 0.00795979 0.00865818 0.0144063 0.0127252 0.0144501 0.0101421 0.013125 0.0196909 0.00898426 0.010581 0.0100945 0.00801164 0.0135221 0.0111198 0.00806585 0.0213253 0.0100274 0.0144109 0.0153699 0.0122143 0.00918134 0.00916568 0.011621 0.0150439 0.00872375 0.0176738 0.00782867 0.0151941 0.0106163 
+0.0126987 0.0229007 0.0075607 0.0143304 0.0114757 0.0109198 0.0078244 0.0127706 0.0100428 0.0101192 0.0165348 0.0103811 0.00811427 0.0101145 0.00897922 0.0139112 0.0123679 0.00917989 0.0123572 0.0149642 0.00855637 0.00881887 0.00820185 0.00787485 0.012718 0.0205514 0.0110306 0.0117908 0.00860124 0.0112494 0.0149107 0.0148037 0.0150269 0.00883738 0.0127948 0.0110679 0.01341 0.00750559 0.0121756 0.0131417 0.01218 0.012626 0.0100698 0.00782545 0.00828973 0.00868378 0.00840721 0.00818535 0.0133931 
+0.0102257 0.027918 0.00787297 0.0133354 0.0147737 0.0109556 0.0152169 0.0132703 0.010289 0.0115668 0.0091825 0.0107671 0.0133121 0.0122393 0.010704 0.0120498 0.00803717 0.0106021 0.00829115 0.0113567 0.0107112 0.0117996 0.00896112 0.0118919 0.0123738 0.0126902 0.0093068 0.00859058 0.0105693 0.0122587 0.00888327 0.00759207 0.0191256 0.0107541 0.00983975 0.0117202 0.0105958 0.0128959 0.00800243 0.0115068 0.0107724 0.00840248 0.00908611 0.0129777 0.0126822 0.0127465 0.00822219 0.00994533 0.0139303 
+0.01393 0.0233185 0.00984235 0.00967116 0.015753 0.00814746 0.0085956 0.0130108 0.0110582 0.0096444 0.0114632 0.0217317 0.0161044 0.015933 0.014491 0.0115375 0.00760199 0.016022 0.00850128 0.0107254 0.0109347 0.0101816 0.00847551 0.0157225 0.010204 0.00792328 0.0103398 0.0119777 0.00946288 0.00792099 0.011687 0.0117966 0.00933882 0.00921819 0.011287 0.0107074 0.0210967 0.013812 0.0102911 0.0125356 0.00977311 0.0148956 0.00945254 0.0188149 0.0116953 0.0106693 0.0184994 0.013314 0.0148077 
+0.00987667 0.02296 0.00831947 0.00776761 0.0117147 0.00911469 0.010755 0.0119091 0.00809856 0.00948884 0.0117268 0.00885928 0.0107636 0.0100157 0.0103407 0.0112012 0.0132345 0.0083606 0.0108678 0.0137489 0.00812977 0.012016 0.0123967 0.00838596 0.0104066 0.00985035 0.0173604 0.00935084 0.0215164 0.00841426 0.0129268 0.0104227 0.0119512 0.00862789 0.0146201 0.00877835 0.013587 0.0130225 0.0119491 0.0172831 0.0176339 0.00777948 0.0109782 0.0140325 0.0168951 0.00762716 0.00774235 0.0196693 0.00880236 
+0.00765074 0.0232714 0.0105446 0.0136031 0.014729 0.0135182 0.00854858 0.011058 0.0125898 0.0130737 0.0145321 0.00869487 0.00764535 0.012576 0.00932989 0.00794551 0.0164383 0.0158679 0.00991578 0.00959083 0.0120535 0.0176219 0.00925489 0.010526 0.0155791 0.00919009 0.0123899 0.0125916 0.00847203 0.00970496 0.00848572 0.0134509 0.00966093 0.0163208 0.0110939 0.0159553 0.014289 0.00771749 0.0110259 0.013375 0.00922792 0.00821641 0.0116486 0.0143069 0.0136127 0.0157618 0.0105326 0.00899571 0.00774907 
+0.00909697 0.0214162 0.0158611 0.00768949 0.0110213 0.00873957 0.0127007 0.00765071 0.00816289 0.0125967 0.0118592 0.0102903 0.0130878 0.0187603 0.0180501 0.0105242 0.0154561 0.014303 0.0133177 0.0110271 0.0116074 0.00769644 0.0191824 0.00866946 0.0114176 0.0111474 0.00947195 0.00801283 0.00759541 0.0083278 0.00937849 0.0155311 0.0119323 0.0175447 0.00873595 0.0125612 0.0163533 0.0132767 0.0126399 0.0145919 0.0134597 0.0115377 0.0172491 0.00863526 0.0101643 0.0145975 0.00913221 0.0131374 0.0113838 
+0.00788248 0.021498 0.00827927 0.0127234 0.0229448 0.0107585 0.0140549 0.00968618 0.0123351 0.0117977 0.00892402 0.00999836 0.00759201 0.0140604 0.0131339 0.00844792 0.00935001 0.0101972 0.0108008 0.0139108 0.00766936 0.00847795 0.00800967 0.0162745 0.0144014 0.0116628 0.00989151 0.0123791 0.00944051 0.0137685 0.0101494 0.0100635 0.00902156 0.0123614 0.0182608 0.0138456 0.00829875 0.0103482 0.0197994 0.00903373 0.0132794 0.0091919 0.00880481 0.009586 0.0114356 0.0113806 0.0130965 0.00800096 0.00866452 
+0.0201254 0.0222382 0.00944811 0.0101604 0.0100038 0.0131513 0.0132618 0.00803858 0.013152 0.0101965 0.0122165 0.0141532 0.0130336 0.0131043 0.00830514 0.0137026 0.0117982 0.00825661 0.0102438 0.00785197 0.00820101 0.00848317 0.0111416 0.0164459 0.019478 0.0193448 0.0159232 0.0113944 0.0221068 0.0110601 0.0129015 0.0126173 0.0122514 0.0162261 0.00813594 0.00801 0.0104284 0.0145931 0.0117998 0.0103684 0.0153803 0.0123371 0.0163547 0.0152386 0.00832999 0.00846248 0.0156608 0.0112197 0.00823777 
+0.00764168 0.0126484 0.0230105 0.0104165 0.0156115 0.00841817 0.0135565 0.00871995 0.0153296 0.00771436 0.00880684 0.00967623 0.00797091 0.00959075 0.0177778 0.0089844 0.00782083 0.00774189 0.0148468 0.0102188 0.0189107 0.0108572 0.0129114 0.0108314 0.010969 0.0120859 0.0168999 0.00769284 0.00897377 0.00794035 0.0173879 0.0141403 0.00866515 0.0150242 0.0122666 0.0181444 0.0114562 0.0113557 0.0135812 0.00901969 0.0098019 0.013859 0.0129119 0.00848159 0.013666 0.00950731 0.00813539 0.012093 0.00940506 
+0.0075106 0.00905345 0.0224812 0.0102122 0.00890096 0.0151734 0.0144503 0.00806235 0.0148231 0.0182744 0.00867049 0.0123656 0.0116066 0.00767773 0.011868 0.00997198 0.0131304 0.0131469 0.0107145 0.00806584 0.0140984 0.00771124 0.0121408 0.0131947 0.00906139 0.0159156 0.00861005 0.0107816 0.0213066 0.00855229 0.0127981 0.0166143 0.0108062 0.0158642 0.00831508 0.00823525 0.0101702 0.0122099 0.0146826 0.0130393 0.0132136 0.0112152 0.00995753 0.0114338 0.00855652 0.0124759 0.00911219 0.00860892 0.00787939 
+0.0113172 0.0112069 0.0239106 0.011519 0.0100094 0.00769998 0.00884556 0.0108098 0.0115115 0.0100932 0.0105567 0.00840721 0.00959704 0.0115982 0.01481 0.011289 0.0140722 0.0108015 0.00941295 0.015969 0.0177054 0.0120477 0.00875758 0.0119941 0.0115399 0.0147543 0.0133939 0.0218081 0.00814408 0.00804521 0.0165441 0.0107537 0.0145468 0.00955961 0.011432 0.0103134 0.0137152 0.0098306 0.0114712 0.00875457 0.020783 0.0124064 0.010763 0.00851077 0.00974855 0.01161 0.00758752 0.0099065 0.0129115 
+0.0104302 0.020681 0.0341576 0.0119758 0.00779795 0.010878 0.0129573 0.0148032 0.0101535 0.00782686 0.00949278 0.00986503 0.017033 0.014135 0.0122921 0.010136 0.00760893 0.0134817 0.0148405 0.0151213 0.00820814 0.00898511 0.0106513 0.00863311 0.0108657 0.0119623 0.0120308 0.0108416 0.0114063 0.0145675 0.017643 0.00770984 0.0166035 0.00972981 0.0113184 0.00930196 0.00752699 0.0161066 0.0183984 0.0130279 0.0109317 0.00750941 0.0101594 0.0130217 0.00968345 0.0199435 0.016425 0.00926529 0.0108241 
+0.00842985 0.0116583 0.0239661 0.0193098 0.0160614 0.0106777 0.0136834 0.0128487 0.008635 0.0117674 0.00988806 0.00797833 0.0101427 0.0126712 0.00935477 0.0111085 0.00847387 0.00904561 0.0145591 0.0093536 0.00875963 0.00769103 0.0104163 0.00839756 0.0148411 0.00887104 0.0141076 0.0136605 0.0133822 0.0177023 0.0131952 0.0105551 0.0177548 0.0117743 0.015945 0.00839919 0.0173193 0.00985752 0.00984488 0.00793236 0.00821181 0.0155575 0.0108483 0.0137763 0.0144704 0.0184506 0.0136829 0.0121279 0.0122903 
+0.0150556 0.0144245 0.0320712 0.0079419 0.0102347 0.00828416 0.00997947 0.0148921 0.0178198 0.0125915 0.0106039 0.00976146 0.00790479 0.0143079 0.0079744 0.0083561 0.0191093 0.00961766 0.00754107 0.0153945 0.01789 0.0083027 0.00840771 0.0110594 0.0182871 0.0135208 0.0104117 0.013121 0.0132309 0.0118168 0.0103677 0.014463 0.00996247 0.0114514 0.0132421 0.0112798 0.0127602 0.00985035 0.0124105 0.0145872 0.0171717 0.0164293 0.00813592 0.0104492 0.0138917 0.0113236 0.00914717 0.00947034 0.0142414 
+0.0123704 0.00783529 0.0210885 0.0127015 0.0127479 0.0137197 0.0174801 0.0104512 0.014899 0.0119558 0.0078553 0.0115179 0.00816551 0.0077331 0.00888904 0.00995901 0.00851453 0.0177223 0.00861482 0.0130459 0.0133419 0.0161999 0.0075903 0.0105314 0.00866041 0.00995943 0.00821797 0.0100352 0.014138 0.0133265 0.0141092 0.00950137 0.00856584 0.00778119 0.016309 0.00924851 0.010069 0.0115427 0.0207616 0.00829647 0.0128691 0.0167283 0.0112638 0.0157415 0.00794426 0.0089895 0.0159371 0.0093016 0.00909894 
+0.0100089 0.0139208 0.0235168 0.00771069 0.0105458 0.00879662 0.0134831 0.0126753 0.0103809 0.0113404 0.00974845 0.0130035 0.0133396 0.00870825 0.0127044 0.00881419 0.00813668 0.013053 0.00899747 0.00909621 0.0126025 0.010059 0.0110158 0.00885818 0.0127099 0.0125338 0.0109845 0.00756816 0.0104291 0.0126954 0.0121969 0.00804388 0.0143856 0.00854188 0.0115734 0.0102478 0.0187246 0.0112893 0.0150119 0.00977175 0.0118713 0.0122003 0.0084612 0.0090514 0.0101114 0.0104241 0.00799327 0.0185359 0.00973914 
+0.00962295 0.0113295 0.0257345 0.00893206 0.00997418 0.0101334 0.0115945 0.0126395 0.00904426 0.0103381 0.00910468 0.00981648 0.0116458 0.0100866 0.0145031 0.00926139 0.0134746 0.0117218 0.0116152 0.00871872 0.00847218 0.010985 0.00958883 0.0136977 0.0112829 0.0113791 0.0092345 0.00845356 0.0137225 0.0120203 0.0141264 0.020026 0.0096278 0.0122282 0.0180575 0.01084 0.0091869 0.0108086 0.00966318 0.00913813 0.0091568 0.0112805 0.00764796 0.00802639 0.00837611 0.00758026 0.0131511 0.0101646 0.0123427 
+0.0161961 0.0101601 0.0233183 0.00860849 0.0109066 0.0144746 0.0130843 0.0127659 0.0135531 0.0186367 0.0186101 0.00853118 0.00771378 0.00818142 0.00835735 0.0140488 0.0144469 0.010657 0.010951 0.00945288 0.0115504 0.0123779 0.0110594 0.0164135 0.0137209 0.00929199 0.00914049 0.0105935 0.0170752 0.0194339 0.0110833 0.0104639 0.00934008 0.0108329 0.0083529 0.00968863 0.0112075 0.00896931 0.0148413 0.0103983 0.0134676 0.0121498 0.0119313 0.00776565 0.009707 0.0103674 0.0084636 0.0128797 0.00836471 
+0.00868783 0.0125536 0.0253106 0.0087953 0.00996643 0.0109312 0.00907963 0.00978649 0.0107347 0.0122464 0.0127795 0.00836102 0.01342 0.0100521 0.015077 0.0120222 0.015058 0.0089105 0.0147931 0.0138512 0.0121619 0.0116615 0.0187896 0.0107166 0.00908538 0.008893 0.00943697 0.0108028 0.0124738 0.0094311 0.00947726 0.0128943 0.0113159 0.0127814 0.00846501 0.0126336 0.00906144 0.0104129 0.00956887 0.0125608 0.00919229 0.0128424 0.0104112 0.012688 0.00808584 0.0105043 0.00911026 0.0123063 0.0139241 
+0.0139146 0.0125389 0.0252688 0.00861564 0.0183847 0.011136 0.010072 0.00902131 0.0154392 0.00852487 0.00973004 0.0104679 0.00860781 0.0130281 0.0140839 0.0120112 0.0101229 0.0079446 0.0081942 0.00843187 0.00837764 0.00755949 0.0125053 0.0123848 0.00982596 0.0100942 0.00754747 0.00863919 0.0095872 0.0103136 0.0124733 0.00985111 0.00996041 0.00828818 0.015331 0.00817586 0.00797321 0.0104069 0.011341 0.014998 0.0117018 0.00924404 0.00776046 0.0121482 0.0115773 0.0100378 0.0105344 0.0118343 0.0105633 
+0.0134106 0.00963797 0.0217813 0.0105701 0.00875567 0.0124175 0.00759184 0.0148966 0.00857576 0.014321 0.00973865 0.0107557 0.0154607 0.00855962 0.00964446 0.00904494 0.0109691 0.00863798 0.00752947 0.0120623 0.0100218 0.0106181 0.0202118 0.0140897 0.00968971 0.0131774 0.00981573 0.00935469 0.0141551 0.0144094 0.013019 0.00857478 0.0107662 0.0125932 0.00935797 0.00845116 0.00768182 0.00882847 0.015329 0.0155858 0.00807462 0.0193427 0.00973681 0.00832105 0.00871719 0.00931971 0.0139212 0.0120613 0.00987758 
+0.0109859 0.00885845 0.0241467 0.0115446 0.0140071 0.00815091 0.0129175 0.0118188 0.0122007 0.0138906 0.0100633 0.00814199 0.0101043 0.00977005 0.0145775 0.0115953 0.0130692 0.0132509 0.0145579 0.0102481 0.0188523 0.00931528 0.0131969 0.00813627 0.0139732 0.00753529 0.0141665 0.00811139 0.00968957 0.013399 0.0116497 0.0108029 0.0137134 0.0152664 0.0107242 0.00888746 0.00988936 0.00925132 0.0174508 0.016145 0.0134893 0.0113448 0.0144625 0.0148933 0.00918787 0.0121898 0.0138205 0.0134483 0.00825392 
+0.00995247 0.0131395 0.0247869 0.0125854 0.0138368 0.0080786 0.00801479 0.0159455 0.0109062 0.00832218 0.0141729 0.0127139 0.0134351 0.0105158 0.00852197 0.00791615 0.0107584 0.0117814 0.0238455 0.0163327 0.0106855 0.0138213 0.0148461 0.0112349 0.017634 0.0111021 0.0126928 0.0110513 0.00941875 0.0152726 0.0108583 0.0144212 0.0112602 0.011277 0.011598 0.0178644 0.0115558 0.00764972 0.0111486 0.0111895 0.0154236 0.0100611 0.0103689 0.021965 0.0142007 0.0101295 0.011833 0.00890362 0.0116748 
+0.00859156 0.00775239 0.0218845 0.0151381 0.020182 0.00952944 0.00790989 0.0193358 0.0106388 0.0106541 0.0099987 0.0134071 0.0182958 0.00776494 0.0126733 0.0101838 0.0130709 0.0192457 0.00885684 0.00915037 0.0123865 0.0157005 0.00884128 0.00951834 0.011077 0.0119219 0.0151222 0.00876509 0.0119032 0.0126904 0.0115483 0.0111714 0.0142407 0.00898703 0.0116697 0.00798791 0.010128 0.0080347 0.00839506 0.0119162 0.00930847 0.00884288 0.00898779 0.00923705 0.0101195 0.00768234 0.0099016 0.00921312 0.0094782 
+0.00774841 0.0115025 0.0258726 0.010439 0.01933 0.00831796 0.0112306 0.00901766 0.00800044 0.0111945 0.00978058 0.00863968 0.014603 0.0103301 0.00989767 0.0180338 0.0147345 0.00890185 0.0143002 0.0172769 0.0202698 0.0150716 0.0124557 0.00874757 0.00932352 0.00782629 0.0123121 0.00815988 0.0150227 0.0103242 0.00980298 0.013517 0.0106365 0.0187941 0.00968282 0.0125772 0.00918889 0.0122207 0.013369 0.0107439 0.0129822 0.011957 0.0152148 0.0105788 0.0138885 0.0153187 0.009106 0.0110161 0.00874364 
+0.0184227 0.00779894 0.0210283 0.008521 0.0124139 0.0118359 0.0199954 0.00838303 0.00966765 0.010522 0.0105595 0.01525 0.0142252 0.00978268 0.0111702 0.0132608 0.0160711 0.0124382 0.00783503 0.00763183 0.00876614 0.0177611 0.0116925 0.0116517 0.0108655 0.0139154 0.00868276 0.00936977 0.00950444 0.0121467 0.0161873 0.00990076 0.0137855 0.00769336 0.00868235 0.00773555 0.0135486 0.0130167 0.0151815 0.0185735 0.0147726 0.0216887 0.0117114 0.0174715 0.0191175 0.0163739 0.00888401 0.0171241 0.0171718 
+0.0198671 0.0158433 0.0252777 0.00773802 0.00793059 0.00895468 0.00808429 0.00895008 0.0117688 0.0134706 0.0099741 0.0121687 0.0143376 0.0110646 0.0125927 0.00936633 0.00802971 0.012236 0.0162339 0.011717 0.00910268 0.0140897 0.0135515 0.0124077 0.0116335 0.0176466 0.00777531 0.0202474 0.0102814 0.00853543 0.0212056 0.018903 0.0106519 0.00843905 0.00808486 0.0107486 0.014425 0.0138187 0.0111296 0.0109754 0.0159908 0.0129835 0.0150093 0.0104203 0.0134097 0.00855073 0.0108149 0.0149157 0.0116743 
+0.0152409 0.0192845 0.0292109 0.0156155 0.00851637 0.00999159 0.0146146 0.0115099 0.00829983 0.0107979 0.0180484 0.0108741 0.0104735 0.0136641 0.010342 0.0110425 0.00824294 0.0123612 0.00802886 0.0130029 0.0117876 0.0143988 0.00954937 0.00901185 0.00829761 0.0154293 0.0103108 0.00870391 0.0106134 0.0102049 0.00817554 0.0076947 0.0136909 0.0162045 0.00910305 0.0155838 0.00782449 0.00964188 0.0110266 0.00981736 0.0144848 0.0111642 0.0132136 0.00959523 0.0110957 0.0114765 0.00884466 0.013655 0.0194158 
+0.00768915 0.0174124 0.0279806 0.0123129 0.00901758 0.0104237 0.0090769 0.0120688 0.0169897 0.0116229 0.0161726 0.0157694 0.0110289 0.00946359 0.0133498 0.00877964 0.00957379 0.0104815 0.0174654 0.011165 0.01008 0.0108337 0.0158219 0.00819278 0.0150143 0.0129431 0.00960865 0.0124196 0.00751531 0.00911116 0.0104065 0.0160058 0.0114029 0.0127703 0.00868456 0.00913931 0.0152828 0.0116009 0.00787301 0.0141963 0.0170866 0.0101728 0.00788144 0.00858623 0.0141309 0.0115588 0.00991466 0.0134934 0.0077779 
+0.00932098 0.0159831 0.0211103 0.0117281 0.0154403 0.0125459 0.00794171 0.0124239 0.00978349 0.0157523 0.0114212 0.0130223 0.00839278 0.0116368 0.0124874 0.0127075 0.014662 0.00858233 0.0123881 0.0127916 0.00988765 0.00851219 0.00906771 0.0120899 0.0136004 0.0206259 0.0110238 0.00966049 0.0166852 0.0104116 0.0125121 0.00754788 0.0116692 0.00973615 0.00799964 0.00978443 0.00887666 0.0128453 0.023308 0.0100821 0.00863286 0.00795619 0.0116094 0.00924092 0.0146596 0.0101232 0.00992286 0.00820092 0.00951462 
+0.00751008 0.0140706 0.023813 0.0110305 0.015864 0.012396 0.00961872 0.013776 0.0112496 0.0108408 0.0125254 0.0122267 0.00820981 0.00784571 0.0103205 0.00879172 0.00751458 0.0101426 0.00816506 0.0141612 0.0111599 0.00803333 0.00923109 0.0075066 0.0102739 0.0100369 0.0102014 0.0116726 0.0114812 0.0135939 0.0148114 0.00960598 0.00977583 0.00821958 0.00872658 0.0119304 0.0193965 0.0127645 0.0166958 0.0168682 0.019195 0.014796 0.00983455 0.01345 0.00890569 0.00973891 0.00932483 0.0109065 0.0150562 
+0.0142091 0.0171275 0.0212959 0.0226795 0.00965348 0.00789792 0.0115653 0.00837956 0.0116296 0.0125248 0.00758683 0.0104345 0.0112091 0.013267 0.00861568 0.012174 0.00921962 0.00848854 0.0110919 0.0120665 0.011341 0.0106532 0.00987843 0.01072 0.0104443 0.0116164 0.00813353 0.0088635 0.00799066 0.0109428 0.0143597 0.0138117 0.0151204 0.0125059 0.012088 0.00908966 0.0111811 0.00750218 0.00891379 0.0107601 0.0124985 0.00946911 0.0106922 0.0160245 0.0103273 0.0111631 0.00846295 0.00792895 0.0130929 
+0.0160969 0.0165426 0.0215565 0.00830087 0.0119963 0.0145873 0.01132 0.0136657 0.0130255 0.010279 0.0117847 0.0146936 0.0164979 0.0161178 0.00904972 0.0149438 0.00922885 0.00960637 0.0154394 0.00855918 0.0126156 0.0164066 0.0123765 0.00758348 0.0129609 0.0150053 0.00921955 0.0106423 0.00992697 0.0129815 0.0190519 0.0108316 0.00852557 0.0124664 0.01052 0.0198776 0.0105744 0.0134703 0.0156568 0.0162452 0.013177 0.012689 0.0147488 0.0119753 0.0111859 0.00760898 0.0117605 0.0165987 0.0101982 
+0.00787813 0.0152792 0.0231161 0.00860357 0.0124436 0.0170042 0.0103345 0.0134063 0.0107158 0.0145966 0.0150371 0.0166037 0.00984823 0.0112391 0.0112079 0.0103534 0.00880878 0.014011 0.0109664 0.00887019 0.00869275 0.0169769 0.0101929 0.0135257 0.0117968 0.0152627 0.0117932 0.00992476 0.0149444 0.00956194 0.00768462 0.00801937 0.0136012 0.00815406 0.014295 0.00809303 0.0103223 0.00929317 0.0127022 0.0125694 0.00971197 0.0100839 0.0120315 0.0102075 0.014631 0.00764415 0.00873789 0.00987447 0.00987622 
+0.0117808 0.0131686 0.0260394 0.0124847 0.00917253 0.00935599 0.0121716 0.0102702 0.0119934 0.00960652 0.00970628 0.0140549 0.0122548 0.00875833 0.00919377 0.00800878 0.00767322 0.0143421 0.011313 0.00766405 0.00963594 0.00954734 0.0189531 0.00766549 0.0109007 0.00812593 0.00751261 0.0105147 0.00966133 0.0129068 0.0116813 0.0112717 0.00768834 0.014912 0.0111737 0.00960563 0.010495 0.00892403 0.0094268 0.0131169 0.00957561 0.0098624 0.00875655 0.00809833 0.00980761 0.0129471 0.0114696 0.0128509 0.0132198 
+0.0128728 0.00754542 0.0272263 0.00841808 0.00961898 0.0134181 0.0119189 0.00969064 0.0141764 0.00847745 0.0087991 0.00801522 0.00934744 0.013008 0.00884788 0.0114456 0.00988856 0.00826338 0.00976729 0.00765628 0.00786383 0.0110866 0.0136011 0.0142686 0.00886824 0.00878301 0.00908172 0.017851 0.0110583 0.0167745 0.00937047 0.0122023 0.0119426 0.00983617 0.00910604 0.0103776 0.0152968 0.0110378 0.0143341 0.0128733 0.0125787 0.0134641 0.00964661 0.0115224 0.0103523 0.00804197 0.0094482 0.0086975 0.00818573 
+0.00834996 0.00860843 0.0220945 0.0156014 0.00955845 0.0126231 0.00927052 0.013229 0.00944917 0.0111339 0.0163859 0.0091304 0.0135686 0.00793309 0.0126384 0.00932867 0.0128817 0.0151253 0.0150509 0.00851554 0.0149856 0.015832 0.0118321 0.0101956 0.00880305 0.0126798 0.0100365 0.00992585 0.0125127 0.00956911 0.0162798 0.0156639 0.0107167 0.00869821 0.012255 0.00816853 0.0192703 0.0143898 0.00874272 0.00905154 0.0142665 0.00990381 0.0113684 0.00759074 0.00783852 0.00756475 0.0101522 0.0110912 0.00986131 
+0.00788678 0.00754396 0.0213054 0.00952812 0.00790573 0.00924967 0.0146543 0.0125513 0.0186887 0.0107692 0.00781497 0.0113856 0.00978296 0.0107904 0.0209126 0.0183813 0.00983343 0.0173344 0.0116196 0.0104162 0.0118127 0.00889438 0.00896459 0.00983978 0.00912694 0.0106256 0.0132608 0.0125923 0.00989816 0.00935849 0.00809012 0.00796844 0.00824349 0.00800019 0.00794825 0.0145521 0.00910509 0.0121976 0.0126351 0.014056 0.0189283 0.0132789 0.00778931 0.0111159 0.0167268 0.0077924 0.0100442 0.00754377 0.00846491 
+0.0151892 0.0147033 0.0214282 0.0158833 0.0140911 0.0170617 0.0173505 0.016094 0.0125712 0.00973822 0.0156904 0.00969671 0.0131546 0.00913591 0.0117313 0.0076096 0.0102728 0.0100895 0.00836626 0.00794622 0.00752702 0.00830944 0.0134547 0.0131475 0.0127175 0.0145938 0.011852 0.0137899 0.00786304 0.0097474 0.0132174 0.0132138 0.00766026 0.0107259 0.0141722 0.0114705 0.0125668 0.0137401 0.0117221 0.0100631 0.00867171 0.0124746 0.00915375 0.0159038 0.0098235 0.0116108 0.0136184 0.0100869 0.0132627 
+0.00850717 0.0125984 0.0317246 0.0128163 0.00898107 0.0112215 0.0159226 0.0104672 0.00830461 0.0105821 0.0136896 0.00808372 0.00906894 0.010944 0.00916257 0.00798513 0.0139255 0.0113831 0.0111963 0.0293453 0.0171321 0.0123739 0.0138761 0.0109913 0.0114357 0.0109147 0.0130646 0.00904413 0.0142583 0.0125932 0.0134521 0.00784065 0.00888373 0.0121247 0.00871721 0.00841431 0.012501 0.0165497 0.0090014 0.0128262 0.00779781 0.00886985 0.0153363 0.0121441 0.0149739 0.0157173 0.00927695 0.0173405 0.00943229 
+0.0108811 0.0100375 0.0211207 0.00758668 0.0167146 0.009191 0.01282 0.00817154 0.0143318 0.0163734 0.0103566 0.00970306 0.00882377 0.00941159 0.0111139 0.00913247 0.0086223 0.0190639 0.0121515 0.00903945 0.00845229 0.0175803 0.0138564 0.00849259 0.0137876 0.0119419 0.012902 0.0132621 0.00769724 0.00858538 0.012828 0.0156855 0.00763261 0.00928921 0.00959716 0.00909133 0.00911388 0.00811706 0.0141702 0.00982251 0.0146678 0.0157889 0.00855938 0.0124337 0.0136499 0.00760498 0.00857573 0.011739 0.00828991 
+0.0127836 0.0158323 0.0229342 0.013479 0.01336 0.0100643 0.0115649 0.0101129 0.0115251 0.00990176 0.0102713 0.0143071 0.01646 0.0206183 0.0145796 0.00980051 0.0105814 0.00800557 0.0115127 0.0118343 0.00764139 0.011606 0.00808887 0.0156069 0.0142965 0.00920225 0.00901175 0.0179186 0.010476 0.0126085 0.0113398 0.0165795 0.0102332 0.0130093 0.0108143 0.0107896 0.0136776 0.00758194 0.00898096 0.0112934 0.0088035 0.00899996 0.00900983 0.0181271 0.0104256 0.0135279 0.0207845 0.00974133 0.0132526 
+0.00827218 0.0112403 0.0234895 0.0136407 0.0127061 0.0100592 0.0211344 0.00832512 0.0102033 0.0101187 0.00950312 0.00794106 0.00881176 0.00794202 0.0126941 0.00758617 0.00982952 0.0118735 0.00805153 0.0121912 0.0120513 0.00924288 0.0123826 0.0130124 0.012946 0.00779521 0.00853779 0.00885318 0.0112932 0.0114906 0.0140359 0.00815076 0.00869559 0.00935492 0.0162081 0.0145135 0.0104643 0.00800823 0.0121961 0.0111726 0.00941698 0.0156518 0.00814818 0.008761 0.0108818 0.00998925 0.0260524 0.0119787 0.0188741 
+0.0136581 0.00830176 0.0237861 0.00768103 0.0108723 0.0136541 0.0129849 0.0119139 0.012533 0.00948092 0.00884711 0.00838237 0.0148984 0.0151726 0.0137164 0.010204 0.00813674 0.0151501 0.00755114 0.0109602 0.00879855 0.0085259 0.012194 0.00771172 0.0199299 0.0143162 0.00899754 0.00780947 0.0110992 0.0142764 0.0117287 0.00809462 0.00883272 0.010033 0.0139142 0.0142002 0.00956673 0.0078916 0.0131095 0.00961118 0.00801448 0.0101322 0.00757168 0.0102547 0.0105793 0.00825964 0.0127969 0.00964856 0.0119805 
+0.0117749 0.0168925 0.0295973 0.0126909 0.0136641 0.00774103 0.00904598 0.0144074 0.0134041 0.0162224 0.00875431 0.0176688 0.0166057 0.011937 0.00911534 0.0128318 0.0115654 0.0125339 0.00820005 0.0099213 0.015953 0.0143193 0.0127352 0.00866696 0.00986538 0.00955488 0.00925682 0.0115961 0.00904329 0.015993 0.010365 0.0112922 0.00769039 0.00946341 0.0151136 0.00863344 0.00960083 0.0107055 0.0166727 0.0115462 0.0102101 0.0148867 0.0127146 0.00799505 0.00755202 0.00821962 0.0173684 0.0114934 0.0100888 
+0.0115904 0.0161935 0.0210645 0.0128695 0.0102525 0.0130026 0.0110466 0.013117 0.0153459 0.0111719 0.0105414 0.0168115 0.0153026 0.0117334 0.00847964 0.013288 0.00948692 0.00893241 0.0157404 0.00783662 0.0155561 0.0126165 0.0137172 0.00754896 0.00946696 0.00857036 0.00921242 0.00842347 0.0113927 0.00835872 0.011205 0.0112152 0.0139044 0.00771779 0.00880467 0.00810825 0.00943809 0.0114274 0.00999356 0.00844256 0.0116472 0.0112303 0.0126282 0.00864799 0.0141594 0.0133817 0.0139686 0.013671 0.00757762 
+0.00847853 0.0101838 0.0207027 0.00823753 0.0193805 0.0173396 0.011372 0.0100498 0.0124164 0.0102363 0.0122627 0.0119874 0.00884046 0.0120114 0.0126816 0.00805045 0.011121 0.00980431 0.00838928 0.012769 0.00859078 0.0120737 0.00768856 0.0123908 0.0133008 0.0107473 0.0111521 0.00875186 0.0079327 0.0092638 0.00846935 0.0115291 0.0127159 0.0149261 0.021916 0.0132002 0.00803489 0.0114366 0.0125612 0.011724 0.0152145 0.0104299 0.00761108 0.0108463 0.011324 0.00971698 0.00889901 0.0170197 0.0101838 
+0.0087501 0.0129461 0.0210226 0.00982926 0.00857503 0.0105955 0.00903077 0.00786965 0.0171157 0.0142813 0.0120519 0.00772997 0.0173771 0.0159128 0.0117969 0.00982372 0.00779776 0.00996083 0.0109033 0.0136487 0.00972399 0.014807 0.0109314 0.0132037 0.0137179 0.0086105 0.0113871 0.0091689 0.0111315 0.012598 0.0212768 0.0174293 0.01008 0.0129319 0.0103472 0.007781 0.0148411 0.00853454 0.0171013 0.0123093 0.0127419 0.010979 0.0163594 0.00755121 0.00967992 0.00778865 0.00889473 0.016252 0.00858042 
+0.00816276 0.00893204 0.0241534 0.0078781 0.0111201 0.0107338 0.0131887 0.0127229 0.0182888 0.0102698 0.00759193 0.00924786 0.0126784 0.0105905 0.00906858 0.00832025 0.0110609 0.0181397 0.00853273 0.0139305 0.00870982 0.0149465 0.00776266 0.0083783 0.00855052 0.0114378 0.0144946 0.0105753 0.0211949 0.0108381 0.00935234 0.0164875 0.00878224 0.00805143 0.0123776 0.0180753 0.017165 0.00885738 0.0124324 0.0208931 0.0118545 0.0086379 0.0102169 0.0163785 0.012675 0.0116462 0.0117442 0.0119094 0.0110756 
+0.0148348 0.00961162 0.0226236 0.0148194 0.0153942 0.00757926 0.00982272 0.0109519 0.0139475 0.0106584 0.0130521 0.0169529 0.00899622 0.0108567 0.0140035 0.0114788 0.00942968 0.00925525 0.0114391 0.00774959 0.0143579 0.0204858 0.0155225 0.018395 0.0138634 0.0162887 0.0111597 0.0176738 0.0111368 0.0117033 0.0101117 0.00934881 0.0152992 0.00881209 0.0107505 0.00776317 0.00879125 0.00964793 0.0103612 0.00949678 0.00965235 0.0102129 0.0132147 0.00974566 0.0113424 0.0079461 0.0125196 0.0129287 0.00783567 
+0.0120911 0.00817481 0.0237549 0.0147351 0.0117855 0.00928076 0.0107701 0.0121053 0.0115229 0.00856005 0.0138143 0.00909436 0.0102478 0.0157573 0.0143705 0.00929775 0.010034 0.00962566 0.00813062 0.00835003 0.0145278 0.0198229 0.0127871 0.0108061 0.0144633 0.00859746 0.0113239 0.0103966 0.0133368 0.0100164 0.0157657 0.0106799 0.0126575 0.0145848 0.0153154 0.0133791 0.0119951 0.00827273 0.0125771 0.021774 0.00865036 0.0144126 0.016041 0.00866422 0.00829539 0.0158461 0.0178761 0.0151354 0.0131831 
+0.00805311 0.0141055 0.0209319 0.0113789 0.00773719 0.0129798 0.00781845 0.00849937 0.0120866 0.0115675 0.0113208 0.0106093 0.00851479 0.0129702 0.0162205 0.0109245 0.00811679 0.00988323 0.0130492 0.0113375 0.0101404 0.00963577 0.0104726 0.00817791 0.00894249 0.00827902 0.0192529 0.00930379 0.0126582 0.0101482 0.0110285 0.00751677 0.0123917 0.00844846 0.0154179 0.00987267 0.011543 0.0107637 0.0128567 0.0098703 0.0121757 0.0136664 0.00861062 0.00847489 0.0113065 0.0101664 0.00782308 0.00762203 0.0126791 
+0.0123994 0.00981428 0.022354 0.0118202 0.010715 0.0132672 0.00989459 0.0100312 0.010663 0.0084528 0.01021 0.0130803 0.00750065 0.00968146 0.0104169 0.0118141 0.0112502 0.0169563 0.0119578 0.00754256 0.00986909 0.00764487 0.0183683 0.00903233 0.0113879 0.00957045 0.00900771 0.0102431 0.0083436 0.012388 0.0106586 0.00759898 0.00982518 0.00961964 0.00757725 0.00913905 0.0112435 0.00938767 0.00760967 0.0200915 0.00897976 0.0126304 0.00991396 0.0144085 0.00900377 0.0104148 0.0218281 0.00844023 0.009501 
+0.0111347 0.0120638 0.0236069 0.0121996 0.00845197 0.0221136 0.0116805 0.0171462 0.00976275 0.0104405 0.00774986 0.0158893 0.00811754 0.00844794 0.0108582 0.016115 0.0104086 0.00884339 0.00827747 0.0149474 0.00861338 0.0123303 0.00795884 0.0139507 0.0110319 0.015533 0.0149989 0.0168995 0.0091204 0.0130215 0.0143505 0.00871318 0.0148872 0.0190812 0.0117922 0.00763548 0.00917549 0.0109953 0.0135283 0.0120184 0.00878941 0.00815475 0.00850422 0.0103672 0.00964898 0.0097262 0.0141046 0.0140309 0.00879619 
+0.0105143 0.013703 0.0232045 0.0113357 0.00968691 0.00904893 0.00968514 0.012664 0.0127936 0.0077164 0.0152491 0.0159674 0.011685 0.00993868 0.00918175 0.00873503 0.00957997 0.00874447 0.0171354 0.00938301 0.0143542 0.00762579 0.0110455 0.012735 0.0125789 0.00779042 0.0104997 0.0157228 0.0182966 0.0109702 0.0127351 0.00883668 0.0119592 0.0081488 0.012322 0.0156327 0.0110513 0.00956541 0.00819633 0.0140311 0.0175009 0.00928975 0.00885438 0.0097019 0.00800561 0.00777336 0.0088903 0.00898481 0.0107819 
+0.0098046 0.0117494 0.0263231 0.015436 0.013662 0.00908872 0.00862825 0.0217945 0.00828076 0.00992346 0.00937373 0.00984248 0.0101574 0.0139904 0.0106609 0.00980107 0.0193559 0.0143811 0.00938213 0.00838145 0.01026 0.00908042 0.00957255 0.0142897 0.0115678 0.00837216 0.00847856 0.00750615 0.0170843 0.00771556 0.0136041 0.0085186 0.0117335 0.0104907 0.00772486 0.0120301 0.0104384 0.011429 0.00898434 0.00895333 0.00933175 0.0129645 0.012382 0.00803812 0.010185 0.0108926 0.0098767 0.00845662 0.0111648 
+0.00819075 0.00884651 0.0287952 0.00805318 0.0131068 0.0101037 0.011449 0.00829198 0.0132162 0.013026 0.0146038 0.0155428 0.00806449 0.00793688 0.00933993 0.0117829 0.0150817 0.0120513 0.0139549 0.0161088 0.0124043 0.00832278 0.0112157 0.00761119 0.0130748 0.00753709 0.00835639 0.0128375 0.0147134 0.00809342 0.0106003 0.0101066 0.0137563 0.00912825 0.0087958 0.0099307 0.010259 0.0100663 0.00872608 0.0132512 0.00883836 0.012254 0.013586 0.00992891 0.00758181 0.0143006 0.0089286 0.0088272 0.0107939 
+0.012967 0.00825558 0.0222214 0.0128302 0.0080524 0.0110999 0.015294 0.0135671 0.0122866 0.00918322 0.0107904 0.00987674 0.012935 0.0125545 0.0142801 0.0157007 0.0106536 0.0195497 0.0138837 0.015188 0.0127691 0.0171765 0.011912 0.0112012 0.0135172 0.011662 0.0192824 0.0142208 0.0133748 0.0117129 0.0131057 0.0140483 0.010904 0.00974626 0.0186114 0.00855291 0.0161563 0.0130663 0.0232055 0.0115444 0.0111065 0.0126702 0.00987692 0.011846 0.0147607 0.00895658 0.010586 0.0142193 0.00830078 
+
+150 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+150 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 >

Some files were not shown because too many files changed in this diff