Erik Rodner 12 жил өмнө
commit
72d211d8f0
100 өөрчлөгдсөн 17731 нэмэгдсэн , 0 устгасан
  1. 1827 0
      FMKGPHyperparameterOptimization.cpp
  2. 270 0
      FMKGPHyperparameterOptimization.h
  3. 1488 0
      FastMinKernel.cpp
  4. 437 0
      FastMinKernel.h
  5. 417 0
      FeatureMatrixT.h
  6. 949 0
      FeatureMatrixT.tcc
  7. 209 0
      GMHIKernel.cpp
  8. 88 0
      GMHIKernel.h
  9. 353 0
      GPHIKClassifier.cpp
  10. 133 0
      GPHIKClassifier.h
  11. 367 0
      GPLikelihoodApprox.cpp
  12. 133 0
      GPLikelihoodApprox.h
  13. 230 0
      IKMLinearCombination.cpp
  14. 81 0
      IKMLinearCombination.h
  15. 246 0
      IKMNoise.cpp
  16. 84 0
      IKMNoise.h
  17. 21 0
      ImplicitKernelMatrix.cpp
  18. 66 0
      ImplicitKernelMatrix.h
  19. 165 0
      License
  20. 8 0
      Makefile
  21. 103 0
      Makefile.inc
  22. 39 0
      Quantization.cpp
  23. 69 0
      Quantization.h
  24. 18 0
      README
  25. 666 0
      SortedVectorSparse.h
  26. 50 0
      VectorSorter.cpp
  27. 101 0
      VectorSorter.h
  28. 33 0
      algebra/LogDetApprox.h
  29. 168 0
      algebra/LogDetApproxBaiAndGolub.cpp
  30. 116 0
      algebra/LogDetApproxBaiAndGolub.h
  31. 8 0
      algebra/Makefile
  32. 103 0
      algebra/Makefile.inc
  33. 148 0
      configs/AL_predVar_fine.conf
  34. 48 0
      configs/AwA.conf
  35. 52 0
      configs/GP_IL_New_Examples.conf
  36. 41 0
      configs/ImagenetBinaryGP.conf
  37. 6 0
      configs/computeNormHistFeat.conf
  38. 19 0
      configs/createSIFTFeatures.conf
  39. 18 0
      configs/createSIFTFeaturesHSG.conf
  40. 9 0
      configs/scenes.reclassification.conf
  41. 9 0
      configs/scenes.smalltest.conf
  42. 9 0
      configs/scenes.std.conf
  43. 1 0
      doxy/CODING
  44. 10 0
      doxy/coding.doxy
  45. BIN
      doxy/logoV1.png
  46. 101 0
      doxy/logoV1.svg
  47. 35 0
      doxy/mainpage.doxy
  48. 7 0
      doxy/readme.doxy
  49. 1685 0
      doxyfile_gp_hik.txt
  50. 122 0
      kernels/GeneralizedIntersectionKernelFunction.h
  51. 147 0
      kernels/GeneralizedIntersectionKernelFunction.tcc
  52. 45 0
      kernels/GenericKernelFunction.h
  53. 79 0
      kernels/IntersectionKernelFunction.cpp
  54. 113 0
      kernels/IntersectionKernelFunction.h
  55. 198 0
      kernels/IntersectionKernelFunction.tcc
  56. 8 0
      kernels/Makefile
  57. 103 0
      kernels/Makefile.inc
  58. 1 0
      libdepend.inc
  59. 8 0
      parameterizedFunctions/Makefile
  60. 103 0
      parameterizedFunctions/Makefile.inc
  61. 93 0
      parameterizedFunctions/PFAbsExp.h
  62. 85 0
      parameterizedFunctions/PFExp.h
  63. 99 0
      parameterizedFunctions/PFMKL.h
  64. 95 0
      parameterizedFunctions/PFWeightedDim.h
  65. 54 0
      parameterizedFunctions/ParameterizedFunction.cpp
  66. 131 0
      parameterizedFunctions/ParameterizedFunction.h
  67. 88 0
      progs/Makefile.inc
  68. 274 0
      progs/completeEvaluationFastMinkernel.cpp
  69. 122 0
      progs/toyExample.cpp
  70. 64 0
      progs/toyExampleSmallScaleTrain.data
  71. 154 0
      progs/toyExampleTest.data
  72. 89 0
      tests/Makefile.inc
  73. 1242 0
      tests/TestFastHIK.cpp
  74. 48 0
      tests/TestFastHIK.h
  75. 185 0
      tests/TestFeatureMatrixT.cpp
  76. 32 0
      tests/TestFeatureMatrixT.h
  77. 190 0
      tests/TestVectorSorter.cpp
  78. 36 0
      tests/TestVectorSorter.h
  79. BIN
      tests/sparse20x30matrixM.mat
  80. BIN
      tests/sparse3x3matrixA.mat
  81. 42 0
      tests/toyExample1.data
  82. 9 0
      tests/toyExample2.data
  83. 5 0
      todo
  84. 431 0
      tools.h
  85. 40 0
      tutorial/Makefile
  86. 4 0
      tutorial/al-base.sty
  87. 44 0
      tutorial/beamercolorthemeal.sty
  88. 53 0
      tutorial/beamercolorthemefsu-blue.sty
  89. 21 0
      tutorial/beamerfontthemeal.sty
  90. 95 0
      tutorial/beamerouterthemeal.sty
  91. 22 0
      tutorial/beamerthemeJena.sty
  92. BIN
      tutorial/img/fsuText-en.pdf
  93. BIN
      tutorial/img/hanfried-en-blue.pdf
  94. BIN
      tutorial/img/logoV1.pdf
  95. BIN
      tutorial/img/logoV1blue.pdf
  96. BIN
      tutorial/img/logoV2.pdf
  97. BIN
      tutorial/img/logoV2blue.pdf
  98. 1124 0
      tutorial/latex12.bst
  99. 180 0
      tutorial/latex12.sty
  100. 409 0
      tutorial/notations.tex

+ 1827 - 0
FMKGPHyperparameterOptimization.cpp

@@ -0,0 +1,1827 @@
+/**
+* @file FMKGPHyperparameterOptimization.cpp
+* @brief Heart of the framework to set up everything, perform optimization, incremental updates, classification, variance prediction (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 01/02/2012
+
+*/
+#include <iostream>
+#include <map>
+
+#include <core/algebra/ILSConjugateGradients.h>
+#include <core/algebra/ILSConjugateGradientsLanczos.h>
+#include <core/algebra/ILSSymmLqLanczos.h>
+#include <core/algebra/ILSMinResLanczos.h>
+#include <core/algebra/ILSPlainGradient.h>
+#include <core/algebra/EigValuesTRLAN.h>
+#include <core/algebra/CholeskyRobust.h>
+#include <core/vector/Algorithms.h>
+#include <core/vector/Eigen.h>
+#include <core/basics/Timer.h>
+#include <core/basics/ResourceStatistics.h>
+
+#include "core/optimization/blackbox/DownhillSimplexOptimizer.h"
+
+#include "FMKGPHyperparameterOptimization.h"
+#include "FastMinKernel.h"
+#include "GMHIKernel.h"
+#include "IKMNoise.h"
+
+
+using namespace NICE;
+using namespace std;
+
+FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization()
+{
+  pf = NULL;
+  eig = NULL;
+  linsolver = NULL;
+  fmk = NULL;
+  q = NULL;
+  precomputedTForVarEst = NULL;
+  verbose = false;
+  verboseTime = false;
+  debug = false;
+}
+
+FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization ( const Config *_conf, ParameterizedFunction *_pf, FastMinKernel *_fmk, const string & _confSection )
+{
+  //default settings, may become overwritten lateron
+  pf = NULL;
+  eig = NULL;
+  linsolver = NULL;
+  fmk = NULL;
+  q = NULL;
+  precomputedTForVarEst = NULL;
+
+  if ( _fmk == NULL )
+    this->initialize ( _conf, _pf ); //then the confSection is also the default value
+  //TODO not needed anymore, only for backword compatibility
+//   else if ( _confSection.compare ( "HIKGP" ) == 0 )
+//     this->initialize ( _conf, _pf, _fmk );
+  else
+    this->initialize ( _conf, _pf, _fmk, _confSection );
+}
+
+FMKGPHyperparameterOptimization::~FMKGPHyperparameterOptimization()
+{
+  //pf will delete from outer program
+  if ( this->eig != NULL )
+    delete this->eig;
+  if ( this->linsolver != NULL )
+    delete this->linsolver;
+  if ( this->fmk != NULL )
+    delete this->fmk;
+  if ( this->q != NULL )
+    delete this->q;
+
+  for ( uint i = 0 ; i < precomputedT.size(); i++ )
+    delete [] ( precomputedT[i] );
+
+  if ( precomputedTForVarEst != NULL )
+    delete precomputedTForVarEst;
+
+  for ( std::map<int, IKMLinearCombination * >::iterator it =  ikmsums.begin(); it != ikmsums.end(); it++ )
+    delete it->second;
+}
+
+void FMKGPHyperparameterOptimization::initialize ( const Config *_conf, ParameterizedFunction *_pf, FastMinKernel *_fmk, const std::string & _confSection )
+{
+  if ( this->fmk != NULL )
+    delete this->fmk;
+  if ( _fmk != NULL )
+    this->fmk = _fmk;
+  this->pf = _pf;
+  
+  
+  std::cerr << "------------" << std::endl;
+  std::cerr << "|  set-up  |" << std::endl;
+  std::cerr << "------------" << std::endl;
+
+
+  this->eig = new EVArnoldi ( _conf->gB ( _confSection, "eig_verbose", false ) /* verbose flag */, 10 );
+  // this->eig = new EigValuesTRLAN();
+  // My time measurements show that both methods use equal time, a comparision
+  // of their numerical performance has not been done yet  
+
+
+  this->parameterUpperBound = _conf->gD ( _confSection, "parameter_upper_bound", 2.5 );
+  this->parameterLowerBound = _conf->gD ( _confSection, "parameter_lower_bound", 1.0 );
+  this->parameterStepSize = _conf->gD ( _confSection, "parameter_step_size", 0.1 );
+
+  this->verifyApproximation = _conf->gB ( _confSection, "verify_approximation", false );
+  this->nrOfEigenvaluesToConsider = _conf->gI ( _confSection, "nrOfEigenvaluesToConsider", 1 );
+  this->nrOfEigenvaluesToConsiderForVarApprox = _conf->gI ( _confSection, "nrOfEigenvaluesToConsiderForVarApprox", 2 );
+
+  this->verbose = _conf->gB ( _confSection, "verbose", false );
+  this->verboseTime = _conf->gB ( _confSection, "verboseTime", false );
+  this->debug = _conf->gB ( _confSection, "debug", false );
+
+  bool useQuantization = _conf->gB ( _confSection, "use_quantization", false );
+  std::cerr << "_confSection: " << _confSection << std::endl;
+  std::cerr << "use_quantization: " << useQuantization << std::endl;
+  if ( _conf->gB ( _confSection, "use_quantization", false ) ) {
+    int numBins = _conf->gI ( _confSection, "num_bins", 100 );
+    if ( verbose )
+      cerr << "FMKGPHyperparameterOptimization: quantization initialized with " << numBins << " bins." << endl;
+    this->q = new Quantization ( numBins );
+  } else {
+    this->q = NULL;
+  }
+
+  bool ils_verbose = _conf->gB ( _confSection, "ils_verbose", false );
+  ils_max_iterations = _conf->gI ( _confSection, "ils_max_iterations", 1000 );
+  if ( verbose )
+    cerr << "FMKGPHyperparameterOptimization: maximum number of iterations is " << ils_max_iterations << endl;
+
+  double ils_min_delta = _conf->gD ( _confSection, "ils_min_delta", 1e-7 );
+  double ils_min_residual = _conf->gD ( _confSection, "ils_min_residual", 1e-7/*1e-2 */ );
+
+  string ils_method = _conf->gS ( _confSection, "ils_method", "CG" );
+  if ( ils_method.compare ( "CG" ) == 0 )
+  {
+    if ( verbose )
+      std::cerr << "We use CG with " << ils_max_iterations << " iterations, " << ils_min_delta << " as min delta, and " << ils_min_residual << " as min res " << std::endl;
+    this->linsolver = new ILSConjugateGradients ( ils_verbose , ils_max_iterations, ils_min_delta, ils_min_residual );
+    if ( verbose )
+      cerr << "FMKGPHyperparameterOptimization: using ILS ConjugateGradients" << endl;
+  }
+  else if ( ils_method.compare ( "CGL" ) == 0 )
+  {
+    this->linsolver = new ILSConjugateGradientsLanczos ( ils_verbose , ils_max_iterations );
+    if ( verbose )
+      cerr << "FMKGPHyperparameterOptimization: using ILS ConjugateGradients (Lanczos)" << endl;
+  }
+  else if ( ils_method.compare ( "SYMMLQ" ) == 0 )
+  {
+    this->linsolver = new ILSSymmLqLanczos ( ils_verbose , ils_max_iterations );
+    if ( verbose )
+      cerr << "FMKGPHyperparameterOptimization: using ILS SYMMLQ" << endl;
+  }
+  else if ( ils_method.compare ( "MINRES" ) == 0 )
+  {
+    this->linsolver = new ILSMinResLanczos ( ils_verbose , ils_max_iterations );
+    if ( verbose )
+      cerr << "FMKGPHyperparameterOptimization: using ILS MINRES" << endl;
+  }
+  else
+  {
+    cerr << "FMKGPHyperparameterOptimization: " << _confSection << ":ils_method (" << ils_method << ") does not match any type (CG,CGL,SYMMLQ,MINRES), I will use CG" << endl;
+    this->linsolver = new ILSConjugateGradients ( ils_verbose , ils_max_iterations, ils_min_delta, ils_min_residual );
+  }
+  
+  this->usePreviousAlphas = _conf->gB (_confSection, "usePreviousAlphas", true );
+
+  string optimizationMethod_s = _conf->gS ( _confSection, "optimization_method", "greedy" );
+  if ( optimizationMethod_s == "greedy" )
+    optimizationMethod = OPT_GREEDY;
+  else if ( optimizationMethod_s == "downhillsimplex" )
+    optimizationMethod = OPT_DOWNHILLSIMPLEX;
+  else if ( optimizationMethod_s == "none" )
+    optimizationMethod = OPT_NONE;
+  else
+    fthrow ( Exception, "Optimization method " << optimizationMethod_s << " is not known." );
+
+  if ( verbose )
+    cerr << "Using optimization method: " << optimizationMethod_s << endl;
+
+  downhillSimplexMaxIterations = _conf->gI ( _confSection, "downhillsimplex_max_iterations", 20 );
+  // do not run longer than a day :)
+  downhillSimplexTimeLimit = _conf->gD ( _confSection, "downhillsimplex_time_limit", 24 * 60 * 60 );
+  downhillSimplexParamTol = _conf->gD ( _confSection, "downhillsimplex_delta", 0.01 );
+
+  learnBalanced = _conf->gB ( _confSection, "learn_balanced", false );
+  std::cerr << "balanced learning: " << learnBalanced << std::endl;
+
+  optimizeNoise = _conf->gB ( _confSection, "optimize_noise", false );
+  if ( verbose )
+    cerr << "Optimize noise: " << ( optimizeNoise ? "on" : "off" ) << endl;
+  
+  std::cerr << "------------" << std::endl;
+  std::cerr << "|   start   |" << std::endl;
+  std::cerr << "------------" << std::endl;  
+}
+
+void FMKGPHyperparameterOptimization::setParameterUpperBound ( const double & _parameterUpperBound )
+{
+  parameterUpperBound = _parameterUpperBound;
+}
+void FMKGPHyperparameterOptimization::setParameterLowerBound ( const double & _parameterLowerBound )
+{
+  parameterLowerBound = _parameterLowerBound;
+}
+
+void FMKGPHyperparameterOptimization::setupGPLikelihoodApprox ( std::map<int, GPLikelihoodApprox * > & gplikes, const std::map<int, NICE::Vector> & binaryLabels, std::map<int, uint> & parameterVectorSizes )
+{
+  if ( learnBalanced )
+  {
+    if ( verbose )
+    {
+      std::cerr << "FMKGPHyperparameterOptimization::setupGPLikelihoodApprox -- balanced setting" << std::endl;
+      std::cerr << "number of ikmsum-objects: " << ikmsums.size() << std::endl;
+    }
+    
+    for ( std::map<int, IKMLinearCombination*>::const_iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+    {
+      map<int, NICE::Vector> binaryLabelsSingle;
+      binaryLabelsSingle.insert ( *binaryLabels.find ( it->first ) );
+      GPLikelihoodApprox *gplike = new GPLikelihoodApprox ( binaryLabelsSingle, it->second, linsolver, eig, verifyApproximation, nrOfEigenvaluesToConsider );
+      gplike->setUsePreviousAlphas( usePreviousAlphas );
+      gplike->setDebug( debug );
+      gplike->setVerbose( verbose );
+      gplikes.insert ( std::pair<int, GPLikelihoodApprox * > ( it->first, gplike ) );
+      parameterVectorSizes.insert ( std::pair<int, uint> ( it->first, it->second->getNumParameters() ) );
+    }
+    if ( verbose )
+      std::cerr << "resulting number of gplike-objects: " << gplikes.size() << std::endl;
+  }
+  else
+  {
+    GPLikelihoodApprox *gplike = new GPLikelihoodApprox ( binaryLabels, ikmsums.begin()->second, linsolver, eig, verifyApproximation, nrOfEigenvaluesToConsider );
+    gplike->setUsePreviousAlphas( usePreviousAlphas );
+    gplike->setDebug( debug );
+    gplike->setVerbose( verbose );
+    gplikes.insert ( std::pair<int, GPLikelihoodApprox * > ( 0, gplike ) );
+    parameterVectorSizes.insert ( std::pair<int, uint> ( 0, ikmsums.begin()->second->getNumParameters() ) );
+  }
+}
+
+void FMKGPHyperparameterOptimization::updateEigenVectors()
+{
+  if ( verbose )
+  {
+    std::cerr << "FMKGPHyperparameterOptimization::updateEigenVectors -- size of ikmsums: " << ikmsums.size() << std::endl;
+    std::cerr << "class of first object: " << ikmsums.begin()->first << std::endl;
+  }
+  
+  if ( learnBalanced )
+  {
+    //simply use the first kernel matrix to compute the eigenvalues and eigenvectors for the fine approximation of predictive uncertainties
+    std::map<int, IKMLinearCombination * >::iterator ikmsumsIt;
+    eigenMax.resize(ikmsums.size());
+    eigenMaxVectors.resize(ikmsums.size());
+    
+    int classCnt(0);
+    for ( ikmsumsIt = ikmsums.begin(); ikmsumsIt != ikmsums.end(); ikmsumsIt++, classCnt++ )
+    {
+      
+      eig->getEigenvalues ( * ikmsumsIt->second, eigenMax[classCnt], eigenMaxVectors[classCnt], nrOfEigenvaluesToConsiderForVarApprox );
+    }
+  }
+  else
+  {
+    std::cerr << "not balanced, considere for VarApprox: " << nrOfEigenvaluesToConsiderForVarApprox << " eigenvalues" << std::endl;
+    std::cerr << "and for simple: " << nrOfEigenvaluesToConsider << std::endl;
+    if (nrOfEigenvaluesToConsiderForVarApprox > 1)
+      nrOfEigenvaluesToConsiderForVarApprox = 1;
+    //compute the largest eigenvalue of K + noise
+    eigenMax.resize(1);
+    eigenMaxVectors.resize(1);    
+    
+    eig->getEigenvalues ( * ( ikmsums.begin()->second ),  eigenMax[0], eigenMaxVectors[0], nrOfEigenvaluesToConsiderForVarApprox );
+  }
+}
+
+void FMKGPHyperparameterOptimization::performOptimization ( std::map<int, GPLikelihoodApprox * > & gplikes, const std::map<int, uint> & parameterVectorSizes, const bool & roughOptimization )
+{
+  if (verbose)
+    std::cerr << "perform optimization" << std::endl;
+  
+  if ( optimizationMethod == OPT_GREEDY )
+  {
+    if ( verbose )    
+      std::cerr << "OPT_GREEDY!!! " << std::endl;
+    
+    // simple greedy strategy
+    if ( ikmsums.begin()->second->getNumParameters() != 1 )
+      fthrow ( Exception, "Reduce size of the parameter vector or use downhill simplex!" );
+
+    Vector lB = ikmsums.begin()->second->getParameterLowerBounds();
+    Vector uB = ikmsums.begin()->second->getParameterUpperBounds();
+    
+    if ( verbose )
+      cerr << "lower bound " << lB << " upper bound " << uB << endl;
+
+    if ( learnBalanced )
+    {
+      if ( lB[0] == uB[0] ) //do we already know a specific parameter?
+      {
+        for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+        {
+          if ( verbose )
+            std::cerr << "Optimizing class " << gpLikeIt->first << std::endl;
+
+          OPTIMIZATION::matrix_type hyperp ( 1, 1, lB[0] );
+          gpLikeIt->second->evaluate ( hyperp );
+        }
+      }
+      else
+      {
+        fthrow ( Exception, "HYPERPARAMETER OPTIMZIATION SHOULD NOT BE USED TOGETHER WITH BALANCED LEARNING IN THIS FRAMEWORK!!!" );
+      }
+    }
+    else
+    {
+      for ( double mypara = lB[0]; mypara <= uB[0]; mypara += this->parameterStepSize )
+      {
+        OPTIMIZATION::matrix_type hyperp ( 1, 1, mypara );
+        gplikes.begin()->second->evaluate ( hyperp );
+      }
+    }
+  }
+  else if ( optimizationMethod == OPT_DOWNHILLSIMPLEX )
+  {
+
+    if ( learnBalanced )
+    {
+      if ( verbose )
+        std::cerr << "DOWNHILLSIMPLEX WITH BALANCED LEARNING!!! " << std::endl;
+      fthrow ( Exception, "HYPERPARAMETER OPTIMZIATION SHOULD NOT BE USED TOGETHER WITH BALANCED LEARNING IN THIS FRAMEWORK!!!" );
+
+      //unfortunately, we suffer from the fact that we do only have a single fmk-object
+      //therefore, we should either copy the fmk-object as often as we have classes or do some averaging or whatsoever
+    }
+    else
+    { //standard as before, normal optimization
+      if ( verbose )    
+        std::cerr << "DOWNHILLSIMPLEX WITHOUT BALANCED LEARNING!!! " << std::endl;
+
+      // downhill simplex strategy
+      OPTIMIZATION::DownhillSimplexOptimizer optimizer;
+
+      OPTIMIZATION::matrix_type initialParams ( parameterVectorSizes.begin()->second, 1 );
+
+      Vector currentParameters;
+      ikmsums.begin()->second->getParameters ( currentParameters );
+
+      for ( uint i = 0 ; i < parameterVectorSizes.begin()->second; i++ )
+        initialParams(i,0) = currentParameters[ i ];
+
+      if ( verbose )      
+        std::cerr << "Initial parameters: " << initialParams << std::endl;
+
+//       OPTIMIZATION::matrix_type scales ( parameterVectorSizes.begin()->second, 1);
+
+      if ( roughOptimization ) //should be used when we perform the optimziation for the first time
+      {
+//         scales.Set(1.0);
+      }
+      else  //should be used, when we perform the optimization in an incremental learning scenario, so that we already have a good guess
+      {
+//         scales.Set(1.0);
+//         for ( uint i = 0 ; i < parameterVectorSizes.begin()->second; i++ )
+//           scales[i][0] = currentParameters[ i ];
+        optimizer.setDownhillParams ( 0.2 /* default: 1.0 */, 0.1 /* default: 0.5 */, 0.2 /* default: 1.0 */ );
+      }
+
+      //the scales object does not really matter in the actual implementation of Downhill Simplex
+      OPTIMIZATION::SimpleOptProblem optProblem ( gplikes.begin()->second, initialParams, initialParams /* scales*/ );
+
+      //     cerr << "OPT: " << mypara << " " << nlikelihood << " " << logdet << " " << dataterm << endl;
+      optimizer.setMaxNumIter ( true, downhillSimplexMaxIterations );
+      optimizer.setTimeLimit ( true, downhillSimplexTimeLimit );
+      optimizer.setParamTol ( true, downhillSimplexParamTol );
+      optimizer.optimizeProb ( optProblem );
+
+    }
+  }
+  else if ( optimizationMethod == OPT_NONE )
+  {
+    if ( verbose )
+      std::cerr << "NO OPTIMIZATION!!! " << std::endl;
+
+    // without optimization
+    if ( optimizeNoise )
+      fthrow ( Exception, "Deactivate optimize_noise!" );
+    
+    if ( verbose )
+      std::cerr << "Optimization is deactivated!" << std::endl;
+    
+    double value (1.0);
+    if ( this->parameterLowerBound == this->parameterUpperBound)
+      value = this->parameterLowerBound;
+
+    pf->setParameterLowerBounds ( NICE::Vector ( 1, value ) );
+    pf->setParameterUpperBounds ( NICE::Vector ( 1, value ) );
+
+    // we use the standard value
+    if ( learnBalanced )
+    {
+      for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+      {
+        OPTIMIZATION::matrix_type hyperp ( 1, 1, value);    
+        gpLikeIt->second->setParameterLowerBound ( value );
+        gpLikeIt->second->setParameterUpperBound ( value );   
+        gpLikeIt->second->evaluate ( hyperp );
+      }
+    }
+    else
+    {
+      OPTIMIZATION::matrix_type hyperp ( 1, 1, value );
+      gplikes.begin()->second->setParameterLowerBound ( value );
+      gplikes.begin()->second->setParameterUpperBound ( value );
+      gplikes.begin()->second->evaluate ( hyperp );
+    }
+  }
+
+  if ( learnBalanced )
+  {
+    lastAlphas.clear();
+    for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      if (verbose)
+        std::cerr << "Optimal hyperparameter for class " << gpLikeIt->first << " was: " << gpLikeIt->second->getBestParameters() << std::endl;
+      
+      lastAlphas = gplikes.begin()->second->getBestAlphas();
+    }
+  }
+  else
+  {
+    if ( verbose )
+      std::cerr << "Optimal hyperparameter was: " << gplikes.begin()->second->getBestParameters() << std::endl;
+    lastAlphas.clear();
+    lastAlphas = gplikes.begin()->second->getBestAlphas();
+  }
+}
+
+void FMKGPHyperparameterOptimization::transformFeaturesWithOptimalParameters ( const std::map<int, GPLikelihoodApprox * > & gplikes, const std::map<int, uint> & parameterVectorSizes )
+{
+  if ( verbose )
+    std::cerr << "FMKGPHyperparameterOptimization::transformFeaturesWithOptimalParameters" << std::endl;
+  
+  // transform all features with the "optimal" parameter
+  if ( learnBalanced )
+  {
+    if ( verbose )
+      std::cerr << "learn Balanced" << std::endl;
+    
+    double meanValue ( 0.0 );
+    for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      meanValue += gpLikeIt->second->getBestParameters() [0];
+    }
+    meanValue /= gplikes.size();
+    NICE::Vector averagedParams ( parameterVectorSizes.begin()->second, meanValue );
+    
+    if ( verbose)
+      std::cerr << "averaged Params: " << averagedParams << std::endl;
+
+    //since we only have a single fmk-object, we only have to modify our data for a single time
+    ikmsums.begin()->second->setParameters ( averagedParams );
+  }
+  else
+  {
+    if ( verbose )
+    {
+      std::cerr << "learn not Balanced" << std::endl;
+      std::cerr << "previous best parameters. " << gplikes.begin()->second->getBestParameters() << std::endl;
+//     std::cerr << "previous best alphas: " << gplikes.begin()->second->getBestAlphas() << std::endl;
+    }
+    
+    ikmsums.begin()->second->setParameters ( gplikes.begin()->second->getBestParameters() );
+  }
+}
+
+void FMKGPHyperparameterOptimization::computeMatricesAndLUTs ( const std::map<int, GPLikelihoodApprox * > & gplikes )
+{
+  precomputedA.clear();
+  precomputedB.clear();
+
+  if ( learnBalanced )
+  {
+    for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      map<int, Vector>::const_iterator i = gpLikeIt->second->getBestAlphas().begin();
+
+      PrecomputedType A;
+      PrecomputedType B;
+
+//       std::cerr << "computeMatricesAndLUTs -- alpha: " << i->second << std::endl;
+
+      fmk->hik_prepare_alpha_multiplications ( i->second, A, B );
+      A.setIoUntilEndOfFile ( false );
+      B.setIoUntilEndOfFile ( false );
+      precomputedA[ gpLikeIt->first ] = A;
+      precomputedB[ gpLikeIt->first ] = B;
+
+      if ( q != NULL )
+      {
+        double *T = fmk->hik_prepare_alpha_multiplications_fast ( A, B, *q, pf );
+        //just to be sure that we do not waste space here
+        if ( precomputedT[ gpLikeIt->first ] != NULL )
+          delete precomputedT[ gpLikeIt->first ];
+        
+        precomputedT[ gpLikeIt->first ] = T;
+      }
+    }
+  }
+  else
+  { //no GP rebalancing
+    for ( map<int, Vector>::const_iterator i = gplikes.begin()->second->getBestAlphas().begin(); i != gplikes.begin()->second->getBestAlphas().end(); i++ )
+    {
+      PrecomputedType A;
+      PrecomputedType B;
+
+//       std::cerr << "computeMatricesAndLUTs -- alpha: " << i->second << std::endl;
+
+      fmk->hik_prepare_alpha_multiplications ( i->second, A, B );
+      A.setIoUntilEndOfFile ( false );
+      B.setIoUntilEndOfFile ( false );
+      precomputedA[ i->first ] = A;
+      precomputedB[ i->first ] = B;
+
+      if ( q != NULL )
+      {
+        double *T = fmk->hik_prepare_alpha_multiplications_fast ( A, B, *q, pf );
+        //just to be sure that we do not waste space here
+        if ( precomputedT[ i->first ] != NULL )
+          delete precomputedT[ i->first ];
+        
+        precomputedT[ i->first ] = T;
+      }
+    }
+  }
+}
+
+#ifdef NICE_USELIB_MATIO
+void FMKGPHyperparameterOptimization::optimizeBinary ( const sparse_t & data, const NICE::Vector & yl, const std::set<int> & positives, const std::set<int> & negatives, double noise )
+{
+  map<int, int> examples;
+  Vector y ( yl.size() );
+  int ind = 0;
+  for ( uint i = 0 ; i < yl.size(); i++ )
+  {
+    if ( positives.find ( i ) != positives.end() ) {
+      y[ examples.size() ] = 1.0;
+      examples.insert ( pair<int, int> ( i, ind ) );
+      ind++;
+    } else if ( negatives.find ( i ) != negatives.end() ) {
+      y[ examples.size() ] = -1.0;
+      examples.insert ( pair<int, int> ( i, ind ) );
+      ind++;
+    }
+  }
+  y.resize ( examples.size() );
+  cerr << "Examples: " << examples.size() << endl;
+
+  optimize ( data, y, examples, noise );
+}
+
+
+void FMKGPHyperparameterOptimization::optimize ( const sparse_t & data, const NICE::Vector & y, const std::map<int, int> & examples, double noise )
+{
+  Timer t;
+  t.start();
+  cerr << "Initializing data structure ..." << std::endl;
+  if ( fmk != NULL ) delete fmk;
+  fmk = new FastMinKernel ( data, noise, examples );
+  t.stop();
+  if (verboseTime)
+    std::cerr << "Time used for initializing the FastMinKernel structure: " << t.getLast() << std::endl;
+  
+  optimize ( y );
+}
+#endif
+
+int FMKGPHyperparameterOptimization::prepareBinaryLabels ( map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses )
+{
+  myClasses.clear();
+  for ( NICE::Vector::const_iterator it = y.begin(); it != y.end(); it++ )
+    if ( myClasses.find ( *it ) == myClasses.end() )
+      myClasses.insert ( *it );
+
+  //count how many different classes appear in our data
+  int nrOfClasses = myClasses.size();
+
+  binaryLabels.clear();
+  //compute the corresponding binary label vectors
+  if ( nrOfClasses > 2 )
+  {
+    //resize every labelVector and set all entries to -1.0
+    for ( set<int>::const_iterator k = myClasses.begin(); k != myClasses.end(); k++ )
+    {
+      binaryLabels[ *k ].resize ( y.size() );
+      binaryLabels[ *k ].set ( -1.0 );
+    }
+
+    // now look on every example and set the entry of its corresponding label vector to 1.0
+    // proper existance should not be a problem
+    for ( int i = 0 ; i < ( int ) y.size(); i++ )
+      binaryLabels[ y[i] ][i] = 1.0;
+  }
+  else if ( nrOfClasses == 2 )
+  {
+    std::cerr << "binary setting -- prepare two binary label vectors with opposite signs" << std::endl;
+    Vector yb ( y );
+
+    int negativeClass = *(myClasses.begin());
+    std::set<int>::const_iterator classIt = myClasses.begin(); classIt++;
+    int positiveClass = *classIt;
+    
+    std::cerr << "positiveClass : " << positiveClass << " negativeClass: " << negativeClass << std::endl;
+    for ( uint i = 0 ; i < yb.size() ; i++ )
+      yb[i] = ( y[i] == negativeClass ) ? -1.0 : 1.0;
+    
+    binaryLabels[ positiveClass ] = yb;
+	  //binaryLabels[ 1 ] = yb;
+    
+    //uncomment the following, if you want to perform real binary computations with 2 classes
+// 	  //we only need one vector, which already contains +1 and -1, so we need only one computation too
+//     binaryLabels[ negativeClass ] = yb;
+//     binaryLabels[ negativeClass ] *= -1.0;  
+    
+    std::cerr << "binaryLabels.size(): " << binaryLabels.size() << std::endl;
+    
+//     binaryLabels[ 0 ] = yb;
+//     binaryLabels[ 0 ] *= -1.0;
+    
+    
+    //comment the following, if you want to do a real binary computation. It should be senseless, but let's see...
+    
+    //we do no real binary computation, but an implicite one with only a single object   
+    nrOfClasses--;
+    std::set<int>::iterator it = myClasses.begin(); it++;
+    myClasses.erase(it);    
+  }
+  else //OCC setting
+  {
+    //we set the labels to 1, independent of the previously given class number
+    Vector yNew ( y.size(), 1 );
+    myClasses.clear();
+    myClasses.insert ( 1 );
+    //we have to indicate, that we are in an OCC setting
+    nrOfClasses--;
+  }
+
+  return nrOfClasses;
+}
+
+
+
+void FMKGPHyperparameterOptimization::optimize ( const NICE::Vector & y )
+{
+  if ( fmk == NULL )
+    fthrow ( Exception, "FastMinKernel object was not initialized!" );
+
+  this->labels  = y;
+  
+  std::map<int, NICE::Vector> binaryLabels;
+  std::set<int> classesToUse;
+  prepareBinaryLabels ( binaryLabels, y , classesToUse );
+  
+  //now call the main function :)
+  this->optimize(binaryLabels);
+}
+  
+void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & binaryLabels )
+{
+  Timer t;
+  t.start();
+  //how many different classes do we have right now?
+  int nrOfClasses = binaryLabels.size();
+  std::set<int> classesToUse;
+  classesToUse.clear();
+  for (std::map<int, NICE::Vector>::const_iterator clIt = binaryLabels.begin(); clIt != binaryLabels.end(); clIt++)
+  {
+    classesToUse.insert(clIt->first);
+  }
+  
+  if (verbose)
+  {
+    std::cerr << "Initial noise level: " << fmk->getNoise() << endl;
+
+    std::cerr << "Number of classes (=1 means we have a binary setting):" << nrOfClasses << std::endl;
+    std::cerr << "Effective number of classes (neglecting classes without positive examples): " << classesToUse.size() << std::endl;
+  }
+
+  // combine standard model and noise model
+  ikmsums.clear();
+
+  Timer t1;
+
+  t1.start();
+  //setup the kernel combination
+  if ( learnBalanced )
+  {
+    for ( std::set<int>::const_iterator clIt = classesToUse.begin(); clIt != classesToUse.end(); clIt++ )
+    {
+      IKMLinearCombination *ikmsum = new IKMLinearCombination ();
+      ikmsums.insert ( std::pair<int, IKMLinearCombination*> ( *clIt, ikmsum ) );
+    }
+  }
+  else
+  {
+    IKMLinearCombination *ikmsum = new IKMLinearCombination ();
+    ikmsums.insert ( std::pair<int, IKMLinearCombination*> ( 0, ikmsum ) );
+  }
+
+  if ( verbose )
+  {
+    std::cerr << "ikmsums.size(): " << ikmsums.size() << std::endl;
+    std::cerr << "binaryLabels.size(): " << binaryLabels.size() << std::endl;
+  }
+
+//   First model: noise
+  if ( learnBalanced )
+  {
+    int cnt ( 0 );
+    for ( std::set<int>::const_iterator clIt = classesToUse.begin(); clIt != classesToUse.end(); clIt++, cnt++ )
+    {
+      ikmsums.find ( *clIt )->second->addModel ( new IKMNoise ( binaryLabels[*clIt], fmk->getNoise(), optimizeNoise ) );
+    }
+  }
+  else
+  {
+    ikmsums.find ( 0 )->second->addModel ( new IKMNoise ( fmk->get_n(), fmk->getNoise(), optimizeNoise ) );
+  }
+  
+  // set pretty low built-in noise, because we explicitely add the noise with the IKMNoise
+  fmk->setNoise ( 0.0 );
+
+  //NOTE The GMHIKernel is always the last model which is added (this is necessary for easy store and restore functionality)
+  for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+  {
+    it->second->addModel ( new GMHIKernel ( fmk, pf, NULL /* no quantization */ ) );
+  }
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
+
+  std::map<int, GPLikelihoodApprox * > gplikes;
+  std::map<int, uint> parameterVectorSizes;
+
+  t1.start();
+  this->setupGPLikelihoodApprox ( gplikes, binaryLabels, parameterVectorSizes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
+
+  if (verbose)
+  {
+    std::cerr << "parameterVectorSizes: " << std::endl;
+    for ( std::map<int, uint>::const_iterator pvsIt = parameterVectorSizes.begin(); pvsIt != parameterVectorSizes.end(); pvsIt++ )
+    {
+      std::cerr << pvsIt->first << " " << pvsIt->second << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  t1.start();
+  this->updateEigenVectors();
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
+
+  if ( verbose )
+    std::cerr << "resulting eigenvalues for first class: " << eigenMax[0] << std::endl;
+
+  t1.start();
+  this->performOptimization ( gplikes, parameterVectorSizes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
+
+  if ( verbose )
+    cerr << "Preparing classification ..." << endl;
+
+  t1.start();
+  this->transformFeaturesWithOptimalParameters ( gplikes, parameterVectorSizes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
+
+  t1.start();
+  this->computeMatricesAndLUTs ( gplikes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
+
+  t.stop();
+
+  ResourceStatistics rs;
+  std::cerr << "Time used for learning: " << t.getLast() << std::endl;
+  long maxMemory;
+  rs.getMaximumMemory ( maxMemory );
+  std::cerr << "Maximum memory used: " << maxMemory << " KB" << std::endl;
+
+  //don't waste memory
+  if ( learnBalanced )
+  {
+    for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      delete gpLikeIt->second;
+    }
+  }
+  else
+  {
+    delete gplikes.begin()->second;
+  }
+}
+
+void FMKGPHyperparameterOptimization::optimizeAfterSingleIncrement ( const NICE::SparseVector & x, const bool & performOptimizationAfterIncrement )
+{
+  Timer t;
+  t.start();
+  if ( fmk == NULL )
+    fthrow ( Exception, "FastMinKernel object was not initialized!" );
+
+  map<int, NICE::Vector> binaryLabels;
+  set<int> classesToUse;
+  prepareBinaryLabels ( binaryLabels, labels , classesToUse );
+  if ( verbose )
+    std::cerr << "labels.size() after increment: " << labels.size() << std::endl;
+
+  Timer t1;
+  t1.start();
+  //update the kernel combinations
+  std::map<int, NICE::Vector>::const_iterator labelIt = binaryLabels.begin();
+  // note, that if we only have a single ikmsum-object, than the labelvector will not be used at all in the internal objects (only relevant in ikmnoise)
+
+  for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+  {
+    it->second->addExample ( x, labelIt->second );
+    labelIt++;
+  }
+
+  //we have to reset the fmk explicitely
+  for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+  {
+    ( ( GMHIKernel* ) it->second->getModel ( it->second->getNumberOfModels() - 1 ) )->setFastMinKernel ( this->fmk );
+  }
+
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
+
+  std::map<int, GPLikelihoodApprox * > gplikes;
+  std::map<int, uint> parameterVectorSizes;
+
+  t1.start();
+  this->setupGPLikelihoodApprox ( gplikes, binaryLabels, parameterVectorSizes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
+
+  if ( verbose )
+  {
+    std::cerr << "parameterVectorSizes: " << std::endl;
+    for ( std::map<int, uint>::const_iterator pvsIt = parameterVectorSizes.begin(); pvsIt != parameterVectorSizes.end(); pvsIt++ )
+    {
+      std::cerr << pvsIt->first << " " << pvsIt->second << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  t1.start();
+  if ( usePreviousAlphas )
+  {
+    std::map<int, NICE::Vector>::const_iterator binaryLabelsIt = binaryLabels.begin();
+    std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin(); 
+    for ( std::map<int, NICE::Vector>::iterator lastAlphaIt = lastAlphas.begin() ;lastAlphaIt != lastAlphas.end(); lastAlphaIt++ )
+    {
+      int oldSize ( lastAlphaIt->second.size() );
+      lastAlphaIt->second.resize ( oldSize + 1 );
+
+      //We initialize it with the same values as we use in GPLikelihoodApprox in batch training
+      //default in GPLikelihoodApprox for the first time:
+      // alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );
+
+      double maxEigenValue ( 1.0 );
+      if ( (*eigenMaxIt).size() > 0 )
+        maxEigenValue = (*eigenMaxIt)[0];
+      double factor ( 1.0 / maxEigenValue );    
+
+      if ( binaryLabelsIt->second[oldSize] > 0 ) //we only have +1 and -1, so this might be benefitial in terms of speed
+        lastAlphaIt->second[oldSize] = factor;
+      else
+        lastAlphaIt->second[oldSize] = -factor; //we follow the initialization as done in previous steps
+        //lastAlphaIt->second[oldSize] = 0.0; // following the suggestion of Yeh and Darrell
+
+      binaryLabelsIt++;
+      
+      if (learnBalanced)
+      {
+        eigenMaxIt++;
+      }
+    }
+
+    for ( std::map<int, GPLikelihoodApprox * >::iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      gpLikeIt->second->setLastAlphas ( &lastAlphas );
+    }
+  }
+  //if we do not use previous alphas, we do not have to set up anything here  
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the alpha-objects: " << t1.getLast() << std::endl;
+
+  t1.start();
+  this->updateEigenVectors();
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
+
+  if ( verbose )
+    std::cerr << "resulting eigenvalues for first class: " << eigenMax[0] << std::endl;
+
+  // we can reuse the already given performOptimization-method:
+  // OPT_GREEDY
+  // for this strategy we can't reuse any of the previously computed scores
+  // so come on, let's do the whole thing again...
+  // OPT_DOWNHILLSIMPLEX
+  // Here we can benefit from previous results, when we use them as initialization for our optimizer
+  // ikmsums.begin()->second->getParameters ( currentParameters ); uses the previously computed optimal parameters
+  // as initialization
+  // OPT_NONE
+  // nothing to do, obviously
+  //NOTE we could skip this, if we do not want to change our parameters given new examples
+  if ( performOptimizationAfterIncrement )
+  {
+    t1.start();
+    this->performOptimization ( gplikes, parameterVectorSizes, false /* initialize not with default values but using the last solution */ );
+    t1.stop();
+    if (verboseTime)
+      std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
+
+    if ( verbose )
+      cerr << "Preparing after retraining for classification ..." << endl;
+
+    t1.start();
+    this->transformFeaturesWithOptimalParameters ( gplikes, parameterVectorSizes );
+    t1.stop();
+    if (verboseTime)
+      std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
+  }
+  else
+  {
+    t1.start();
+    t1.stop();
+    std::cerr << "skip optimization" << std::endl;
+    if (verboseTime)
+      std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
+
+    std::cerr << "skip feature transformation" << std::endl;
+    if (verboseTime)
+      std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
+  }
+  
+  //NOTE unfortunately, the whole vector alpha differs, and not only its last entry.
+  // If we knew any method, which could update this efficiently, we could also compute A and B more efficiently by updating them.
+  // Since we are not aware of any such method, we have to compute them completely new
+  // :/
+  t1.start();
+  this->computeMatricesAndLUTs ( gplikes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
+
+  t.stop();  
+
+  ResourceStatistics rs;
+  std::cerr << "Time used for re-learning: " << t.getLast() << std::endl;
+  long maxMemory;
+  rs.getMaximumMemory ( maxMemory );
+  std::cerr << "Maximum memory used: " << maxMemory << " KB" << std::endl;
+
+  //don't waste memory
+  if ( learnBalanced )
+  {
+    for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      delete gpLikeIt->second;
+    }
+  }
+  else
+  {
+    delete gplikes.begin()->second;
+  }
+}
+
+void FMKGPHyperparameterOptimization::optimizeAfterMultipleIncrements ( const std::vector<const NICE::SparseVector*> & x, const bool & performOptimizationAfterIncrement )
+{
+  Timer t;
+  t.start();
+  if ( fmk == NULL )
+    fthrow ( Exception, "FastMinKernel object was not initialized!" );
+
+  map<int, NICE::Vector> binaryLabels;
+  set<int> classesToUse;
+  prepareBinaryLabels ( binaryLabels, labels , classesToUse );
+  if ( verbose )
+    std::cerr << "labels.size() after increment: " << labels.size() << std::endl;
+
+  Timer t1;
+  t1.start();
+  //update the kernel combinations
+  std::map<int, NICE::Vector>::const_iterator labelIt = binaryLabels.begin();
+  // note, that if we only have a single ikmsum-object, than the labelvector will not be used at all in the internal objects (only relevant in ikmnoise)
+
+  //TODO
+  for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+  {
+    for ( std::vector<const NICE::SparseVector*>::const_iterator exampleIt = x.begin(); exampleIt != x.end(); exampleIt++ )
+    {
+      it->second->addExample ( **exampleIt, labelIt->second );
+    }
+    labelIt++;
+  }
+
+  //we have to reset the fmk explicitely
+  for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+  {
+    ( ( GMHIKernel* ) it->second->getModel ( it->second->getNumberOfModels() - 1 ) )->setFastMinKernel ( this->fmk );
+  }
+
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
+
+  std::map<int, GPLikelihoodApprox * > gplikes;
+  std::map<int, uint> parameterVectorSizes;
+
+  t1.start();
+  this->setupGPLikelihoodApprox ( gplikes, binaryLabels, parameterVectorSizes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
+
+  if ( verbose )
+  {
+    std::cerr << "parameterVectorSizes: " << std::endl;
+    for ( std::map<int, uint>::const_iterator pvsIt = parameterVectorSizes.begin(); pvsIt != parameterVectorSizes.end(); pvsIt++ )
+    {
+      std::cerr << pvsIt->first << " " << pvsIt->second << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  t1.start();
+  if ( usePreviousAlphas )
+  {
+    std::map<int, NICE::Vector>::const_iterator binaryLabelsIt = binaryLabels.begin();
+    std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
+    
+    for ( std::map<int, NICE::Vector>::iterator lastAlphaIt = lastAlphas.begin() ;lastAlphaIt != lastAlphas.end(); lastAlphaIt++ )
+    {
+      int oldSize ( lastAlphaIt->second.size() );
+      lastAlphaIt->second.resize ( oldSize + x.size() );
+
+      //We initialize it with the same values as we use in GPLikelihoodApprox in batch training
+      //default in GPLikelihoodApprox for the first time:
+      // alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );
+
+      double maxEigenValue ( 1.0 );
+      if ( (*eigenMaxIt).size() > 0 )
+        maxEigenValue = (*eigenMaxIt)[0];
+      double factor ( 1.0 / maxEigenValue );    
+
+      for ( uint i = 0; i < x.size(); i++ )
+      {
+        if ( binaryLabelsIt->second[oldSize+i] > 0 ) //we only have +1 and -1, so this might be benefitial in terms of speed
+          lastAlphaIt->second[oldSize+i] = factor;
+        else
+          lastAlphaIt->second[oldSize+i] = -factor; //we follow the initialization as done in previous steps
+          //lastAlphaIt->second[oldSize+i] = 0.0; // following the suggestion of Yeh and Darrell
+      }
+
+      binaryLabelsIt++;
+      
+      if (learnBalanced)
+      {
+        eigenMaxIt++;
+      }
+    }
+
+    for ( std::map<int, GPLikelihoodApprox * >::iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      gpLikeIt->second->setLastAlphas ( &lastAlphas );
+    }
+  }
+  //if we do not use previous alphas, we do not have to set up anything here  
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the alpha-objects: " << t1.getLast() << std::endl;
+
+  t1.start();
+  this->updateEigenVectors();
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
+
+  if ( verbose )
+    std::cerr << "resulting eigenvalues of first class: " << eigenMax[0] << std::endl;
+
+  // we can reuse the already given performOptimization-method:
+  // OPT_GREEDY
+  // for this strategy we can't reuse any of the previously computed scores
+  // so come on, let's do the whole thing again...
+  // OPT_DOWNHILLSIMPLEX
+  // Here we can benefit from previous results, when we use them as initialization for our optimizer
+  // ikmsums.begin()->second->getParameters ( currentParameters ); uses the previously computed optimal parameters
+  // as initialization
+  // OPT_NONE
+  // nothing to do, obviously
+  //NOTE we could skip this, if we do not want to change our parameters given new examples
+  if ( performOptimizationAfterIncrement )
+  {
+    t1.start();
+    this->performOptimization ( gplikes, parameterVectorSizes, false /* initialize not with default values but using the last solution */ );
+    t1.stop();
+    if (verboseTime)
+      std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
+    
+    t1.start();
+    this->transformFeaturesWithOptimalParameters ( gplikes, parameterVectorSizes );
+    t1.stop();
+    if (verboseTime)
+      std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
+  }
+  else
+  {
+    t1.start();
+    t1.stop();
+    std::cerr << "skip optimization" << std::endl;
+    if (verboseTime)
+      std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
+
+    std::cerr << "skip feature transformation" << std::endl;
+    if (verboseTime)
+      std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
+
+    std::cerr << "skip computation of A, B and LUTs" << std::endl;
+    if (verboseTime)
+      std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
+  }
+
+  if ( verbose )
+    cerr << "Preparing after retraining for classification ..." << endl;
+
+
+  //NOTE unfortunately, the whole vector alpha differs, and not only its last entry.
+  // If we knew any method, which could update this efficiently, we could also compute A and B more efficiently by updating them.
+  // Since we are not aware of any such method, we have to compute them completely new
+  // :/
+  t1.start();
+  this->computeMatricesAndLUTs ( gplikes );
+  t1.stop();
+  if (verboseTime)
+    std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
+
+  t.stop();
+
+  ResourceStatistics rs;
+  std::cerr << "Time used for re-learning: " << t.getLast() << std::endl;
+  long maxMemory;
+  rs.getMaximumMemory ( maxMemory );
+  std::cerr << "Maximum memory used: " << maxMemory << " KB" << std::endl;
+
+  //don't waste memory
+  if ( learnBalanced )
+  {
+    for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
+    {
+      delete gpLikeIt->second;
+    }
+  }
+  else
+  {
+    delete gplikes.begin()->second;
+  }
+}
+
+void FMKGPHyperparameterOptimization::prepareVarianceApproximation()
+{
+  PrecomputedType AVar;
+  fmk->hikPrepareKVNApproximation ( AVar );
+
+  precomputedAForVarEst = AVar;
+  precomputedAForVarEst.setIoUntilEndOfFile ( false );
+
+  if ( q != NULL )
+  {
+    //do we have results from previous runs but called this method nonetheless?
+    //then delete it and compute it again
+    if (precomputedTForVarEst != NULL)
+      delete precomputedTForVarEst;
+    
+    double *T = fmk->hikPrepareLookupTableForKVNApproximation ( *q, pf );
+    precomputedTForVarEst = T;
+  }
+}
+
+int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar, NICE::SparseVector & scores ) const
+{
+  // loop through all classes
+  if ( precomputedA.size() == 0 )
+  {
+    fthrow ( Exception, "The precomputation vector is zero...have you trained this classifier?" );
+  }
+
+  uint maxClassNo = 0;
+  for ( map<int, PrecomputedType>::const_iterator i = precomputedA.begin() ; i != precomputedA.end(); i++ )
+  {
+    uint classno = i->first;
+    maxClassNo = std::max ( maxClassNo, classno );
+    double beta;
+
+    if ( q != NULL ) {
+      map<int, double *>::const_iterator j = precomputedT.find ( classno );
+      double *T = j->second;
+      fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
+    } else {
+      const PrecomputedType & A = i->second;
+      map<int, PrecomputedType>::const_iterator j = precomputedB.find ( classno );
+      const PrecomputedType & B = j->second;
+
+      // fmk->hik_kernel_sum ( A, B, xstar, beta ); if A, B are of type Matrix
+      // Giving the transformation pf as an additional
+      // argument is necessary due to the following reason:
+      // FeatureMatrixT is sorted according to the original values, therefore,
+      // searching for upper and lower bounds ( findFirst... functions ) require original feature
+      // values as inputs. However, for calculation we need the transformed features values.
+
+      fmk->hik_kernel_sum ( A, B, xstar, beta, pf );
+    }
+
+    scores[ classno ] = beta;
+  }
+  scores.setDim ( maxClassNo + 1 );
+
+  if ( precomputedA.size() > 1 ) {
+    // multi-class classification
+    return scores.maxElement();
+  } else {
+    // binary setting
+    // FIXME: not really flexible for every situation
+    scores[1] = -scores[0];
+    scores[0] = scores[0];
+    scores.setDim ( 2 );
+    return scores[ 0 ] <= 0.0 ? 0 : 1;
+  }
+}
+
+void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateRough ( const NICE::SparseVector & x, NICE::Vector & predVariances ) const
+{
+  double kSelf ( 0.0 );
+  for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
+  {
+    kSelf += pf->f ( 0, it->second );
+    // if weighted dimensions:
+    //kSelf += pf->f(it->first,it->second);
+  }
+
+  double normKStar;
+
+  if ( q != NULL )
+  {
+    if ( precomputedTForVarEst == NULL )
+    {
+      fthrow ( Exception, "The precomputed LUT for uncertainty prediction is NULL...have you prepared the uncertainty prediction?" );
+    }
+    fmk->hikComputeKVNApproximationFast ( precomputedTForVarEst, *q, x, normKStar );
+  }
+  else
+  {
+    fmk->hikComputeKVNApproximation ( precomputedAForVarEst, x, normKStar, pf );
+  }
+
+  predVariances.clear();
+  predVariances.resize( eigenMax.size() );
+  
+  // for balanced setting, we get approximations for every binary task
+  int cnt( 0 );
+  for (std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin(); eigenMaxIt != eigenMax.end(); eigenMaxIt++, cnt++)
+  {
+    predVariances[cnt] = kSelf - ( 1.0 / (*eigenMaxIt)[0] )* normKStar;
+  }
+}
+
+void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine ( const NICE::SparseVector & x, NICE::Vector & predVariances ) const
+{
+  // ---------------- compute the first term --------------------
+//   Timer t;
+//   t.start();
+
+  double kSelf ( 0.0 );
+  for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
+  {
+    kSelf += pf->f ( 0, it->second );
+    // if weighted dimensions:
+    //kSelf += pf->f(it->first,it->second);
+  }
+  // ---------------- compute the approximation of the second term --------------------
+//    t.stop();  
+//   std::cerr << "ApproxFine -- time for first term: "  << t.getLast()  << std::endl;
+
+//   t.start();
+  NICE::Vector kStar;
+  fmk->hikComputeKernelVector ( x, kStar );
+/*  t.stop();
+  std::cerr << "ApproxFine -- time for kernel vector: "  << t.getLast()  << std::endl;*/
+  
+  std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
+  
+  predVariances.clear();
+  predVariances.resize( eigenMax.size() );  
+
+  int classIdx( 0 );
+  // for balanced setting, we get approximations for every binary task
+  for (std::vector< NICE::Matrix>::const_iterator eigenMaxVectorIt = eigenMaxVectors.begin(); eigenMaxVectorIt != eigenMaxVectors.end(); eigenMaxVectorIt++, eigenMaxIt++, classIdx++)
+  {
+    
+    double currentSecondTerm ( 0.0 );
+    double sumOfProjectionLengths ( 0.0 );
+
+    if ( ( kStar.size() != (*eigenMaxVectorIt).rows() ) || ( kStar.size() <= 0 ) )
+    {
+      //NOTE output?
+    }
+
+//     NICE::Vector multiplicationResults; // will contain nrOfEigenvaluesToConsiderForVarApprox many entries
+//     multiplicationResults.multiply ( *eigenMaxVectorIt, kStar, true/* transpose */ );
+    NICE::Vector multiplicationResults( nrOfEigenvaluesToConsiderForVarApprox, 0.0 );
+    //ok, there seems to be a nasty thing in computing multiplicationResults.multiply ( *eigenMaxVectorIt, kStar, true/* transpose */ );
+    //wherefor it takes aeons...
+    //so we compute it by ourselves
+    for ( uint tmpI = 0; tmpI < kStar.size(); tmpI++)
+    {
+      double kStarI ( kStar[tmpI] );
+      for ( int tmpJ = 0; tmpJ < nrOfEigenvaluesToConsiderForVarApprox; tmpJ++)
+      {
+        multiplicationResults[tmpJ] += kStarI * (*eigenMaxVectorIt)(tmpI,tmpJ);
+      }
+    }
+
+    double projectionLength ( 0.0 );
+    int cnt ( 0 );
+    NICE::Vector::const_iterator it = multiplicationResults.begin();
+
+    while ( cnt < ( nrOfEigenvaluesToConsiderForVarApprox - 1 ) )
+    {
+      projectionLength = ( *it );
+      currentSecondTerm += ( 1.0 / (*eigenMaxIt)[cnt] ) * pow ( projectionLength, 2 );
+      sumOfProjectionLengths += pow ( projectionLength, 2 );
+      it++;
+      cnt++;
+    }
+
+    double normKStar ( pow ( kStar.normL2 (), 2 ) );
+
+    currentSecondTerm += ( 1.0 / (*eigenMaxIt)[nrOfEigenvaluesToConsiderForVarApprox-1] ) * ( normKStar - sumOfProjectionLengths );
+
+    if ( ( normKStar - sumOfProjectionLengths ) < 0 )
+    {
+  //     std::cerr << "Attention: normKStar - sumOfProjectionLengths is smaller than zero -- strange!" << std::endl;
+    }
+    predVariances[classIdx] = kSelf - currentSecondTerm; 
+  }
+}
+
+void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NICE::SparseVector & x, NICE::Vector & predVariances ) const
+{
+    Timer t;
+//   t.start();
+  // ---------------- compute the first term --------------------
+  double kSelf ( 0.0 );
+  for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
+  {
+    kSelf += pf->f ( 0, it->second );
+    // if weighted dimensions:
+    //kSelf += pf->f(it->first,it->second);
+  }
+
+  // ---------------- compute the second term --------------------
+//     t.stop();  
+//   std::cerr << "ApproxExact -- time for first term: "  << t.getLast()  << std::endl;
+
+//   t.start();  
+  NICE::Vector kStar;
+  fmk->hikComputeKernelVector ( x, kStar );
+//  t.stop();
+//   std::cerr << "ApproxExact -- time for kernel vector: "  << t.getLast()  << std::endl;
+//   
+
+  // for balanced setting, we get uncertainties for every binary task 
+  std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
+  
+  predVariances.clear();
+  predVariances.resize( eigenMax.size() );  
+
+  int cnt( 0 );
+  for (std::map<int, IKMLinearCombination * >::const_iterator ikmSumIt = ikmsums.begin(); ikmSumIt != ikmsums.end(); ikmSumIt++, eigenMaxIt++, cnt++ )
+  {  
+    //now run the ILS method
+    NICE::Vector diagonalElements;
+    ikmSumIt->second->getDiagonalElements ( diagonalElements );
+
+//     t.start();
+    // init simple jacobi pre-conditioning
+    ILSConjugateGradients *linsolver_cg = dynamic_cast<ILSConjugateGradients *> ( linsolver );
+  
+
+    //perform pre-conditioning
+    if ( linsolver_cg != NULL )
+      linsolver_cg->setJacobiPreconditioner ( diagonalElements );
+   
+
+    Vector beta;
+    
+      /** About finding a good initial solution (see also GPLikelihoodApproximation)
+        * K~ = K + sigma^2 I
+        *
+        * K~ \approx lambda_max v v^T
+        * \lambda_max v v^T * alpha = k_*     | multiply with v^T from left
+        * => \lambda_max v^T alpha = v^T k_*
+        * => alpha = k_* / lambda_max could be a good initial start
+        * If we put everything in the first equation this gives us
+        * v = k_*
+        *  This reduces the number of iterations by 5 or 8
+        */  
+    beta = (kStar * (1.0 / (*eigenMaxIt)[0]) );
+/*    t.stop();
+  std::cerr << "ApproxExact -- time for preconditioning etc: "  << t.getLast()  << std::endl;    
+    
+  t.start();*/
+    //   t.start();
+    linsolver->solveLin ( * ( ikmSumIt->second ), kStar, beta );
+    //   t.stop();
+//     t.stop();
+//         t.stop();
+//   std::cerr << "ApproxExact -- time for lin solve: "  << t.getLast()  << std::endl;
+
+    beta *= kStar;
+    
+    double currentSecondTerm( beta.Sum() );
+    predVariances[cnt] = kSelf - currentSecondTerm;
+  }
+}
+
+// ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
+
+void FMKGPHyperparameterOptimization::restore ( std::istream & is, int format )
+{
+  if ( is.good() )
+  {
+    //load the underlying data
+    if (fmk != NULL)
+      delete fmk;
+    fmk = new FastMinKernel;
+    fmk->restore(is,format);    
+    
+    //now set up the GHIK-things in ikmsums
+    for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
+    {
+      it->second->addModel ( new GMHIKernel ( fmk, this->pf, this->q ) );
+    }    
+    
+    is.precision ( numeric_limits<double>::digits10 + 1 );
+
+    string tmp;
+    is >> tmp; //class name
+
+    is >> tmp;
+    is >> learnBalanced;
+    
+    is >> tmp; //precomputedA:
+    is >> tmp; //size:
+
+    int preCompSize ( 0 );
+    is >> preCompSize;
+    precomputedA.clear();
+    
+    std::cerr << "precomputedA.size(): "<< preCompSize << std::endl;
+
+    for ( int i = 0; i < preCompSize; i++ )
+    {
+      int nr;
+      is >> nr;
+      PrecomputedType pct;
+      pct.setIoUntilEndOfFile ( false );
+      pct.restore ( is, format );
+      precomputedA.insert ( std::pair<int, PrecomputedType> ( nr, pct ) );
+    }
+    
+    is >> tmp; //precomputedB:
+    is >> tmp; //size:
+
+    is >> preCompSize;
+    precomputedB.clear();
+
+    for ( int i = 0; i < preCompSize; i++ )
+    {
+      int nr;
+      is >> nr;
+      PrecomputedType pct;
+      pct.setIoUntilEndOfFile ( false );
+      pct.restore ( is, format );
+      precomputedB.insert ( std::pair<int, PrecomputedType> ( nr, pct ) );
+    }    
+    
+    is >> tmp;
+    int precomputedTSize;
+    is >> precomputedTSize;
+
+    precomputedT.clear();
+
+    if ( precomputedTSize > 0 )
+    {
+      is >> tmp;
+      int sizeOfLUT;
+      is >> sizeOfLUT;    
+      
+      for (int i = 0; i < precomputedTSize; i++)
+      {
+        is >> tmp;
+        int index;
+        is >> index;        
+        double * array = new double [ sizeOfLUT];
+        for ( int i = 0; i < sizeOfLUT; i++ )
+        {
+          is >> array[i];
+        }
+        precomputedT.insert ( std::pair<int, double*> ( index, array ) );
+      }
+    }    
+
+    //now restore the things we need for the variance computation
+    is >> tmp;
+    int sizeOfAForVarEst;
+    is >> sizeOfAForVarEst;
+    if ( sizeOfAForVarEst > 0 )
+    
+    if (precomputedAForVarEst.size() > 0)
+    {
+      precomputedAForVarEst.setIoUntilEndOfFile ( false );
+      precomputedAForVarEst.restore ( is, format );
+    }    
+
+    is >> tmp; //precomputedTForVarEst
+    is >> tmp; // NOTNULL or NULL
+    if (tmp.compare("NOTNULL") == 0)
+    {
+      int sizeOfLUT;
+      is >> sizeOfLUT;      
+      precomputedTForVarEst = new double [ sizeOfLUT ];
+      for ( int i = 0; i < sizeOfLUT; i++ )
+      {
+        is >> precomputedTForVarEst[i];
+      }      
+    }
+    else
+    {
+      if (precomputedTForVarEst != NULL)
+        delete precomputedTForVarEst;
+    }
+    
+    //restore eigenvalues and eigenvectors
+    is >> tmp; //eigenMax.size():
+    int eigenMaxSize;
+    is >> eigenMaxSize;
+    
+    for (int i = 0; i < eigenMaxSize; i++)
+    {
+      NICE::Vector eigenMaxEntry;
+      is >> eigenMaxEntry;
+      eigenMax.push_back( eigenMaxEntry );
+    }
+    
+    is >> tmp; //eigenMaxVector.size():
+    int eigenMaxVectorsSize;
+    is >> eigenMaxVectorsSize;
+    
+    for (int i = 0; i < eigenMaxVectorsSize; i++)
+    {
+      NICE::Matrix eigenMaxVectorsEntry;
+      is >> eigenMaxVectorsEntry;
+      eigenMaxVectors.push_back( eigenMaxVectorsEntry );
+    }       
+
+    is >> tmp; //ikmsums:
+    is >> tmp; //size:
+    int ikmSumsSize ( 0 );
+    is >> ikmSumsSize;
+    ikmsums.clear();
+
+    for ( int i = 0; i < ikmSumsSize; i++ )
+    {
+      int clNr ( 0 );
+      is >> clNr;
+
+      IKMLinearCombination *ikmsum = new IKMLinearCombination ();
+
+      int nrOfModels ( 0 );
+      is >> tmp;
+      is >> nrOfModels;
+
+      //the first one is always our noise-model
+      IKMNoise * ikmnoise = new IKMNoise ();
+      ikmnoise->restore ( is, format );
+
+      ikmsum->addModel ( ikmnoise );
+
+      //NOTE are there any more models you added? then add them here respectively in the correct order
+
+      ikmsums.insert ( std::pair<int, IKMLinearCombination*> ( clNr, ikmsum ) );
+
+      //the last one is the GHIK - which we do not have to restore, but simple reset it lateron
+    }
+  }
+  else
+  {
+    std::cerr << "InStream not initialized - restoring not possible!" << std::endl;
+  }
+}
+
+void FMKGPHyperparameterOptimization::store ( std::ostream & os, int format ) const
+{
+  if ( os.good() )
+  {
+    fmk->store ( os, format );
+
+    os.precision ( numeric_limits<double>::digits10 + 1 );
+
+    os << "FMKGPHyperparameterOptimization" << std::endl;
+
+    os << "learnBalanced: " << learnBalanced << std::endl;
+
+    //we only have to store the things we computed, since the remaining settings come with the config file afterwards
+    
+    os << "precomputedA: size: " << precomputedA.size() << std::endl;
+    std::map< int, PrecomputedType >::const_iterator preCompIt = precomputedA.begin();
+    for ( uint i = 0; i < precomputedA.size(); i++ )
+    {
+      os << preCompIt->first << std::endl;
+      ( preCompIt->second ).store ( os, format );
+      preCompIt++;
+    }
+    os << "precomputedB: size: " << precomputedB.size() << std::endl;
+    preCompIt = precomputedB.begin();
+    for ( uint i = 0; i < precomputedB.size(); i++ )
+    {
+      os << preCompIt->first << std::endl;
+      ( preCompIt->second ).store ( os, format );
+      preCompIt++;
+    }    
+    
+    
+    os << "precomputedT.size(): " << precomputedT.size() << std::endl;
+    if ( precomputedT.size() > 0 )
+    {
+      int sizeOfLUT ( 0 );
+      if ( q != NULL )
+        sizeOfLUT = q->size() * this->fmk->get_d();
+      os << "SizeOfLUTs: " << sizeOfLUT << std::endl;      
+      for ( std::map< int, double * >::const_iterator it = precomputedT.begin(); it != precomputedT.end(); it++ )
+      {
+        os << "index: " << it->first << std::endl;
+        for ( int i = 0; i < sizeOfLUT; i++ )
+        {
+          os << ( it->second ) [i] << " ";
+        }
+        os << std::endl;
+      }
+    }    
+
+    //now store the things needed for the variance estimation
+    
+    os << "precomputedAForVarEst.size(): "<< precomputedAForVarEst.size() << std::endl;
+    
+    if (precomputedAForVarEst.size() > 0)
+    {
+      precomputedAForVarEst.store ( os, format );
+      os << std::endl; 
+    }
+    
+    if ( precomputedTForVarEst != NULL )
+    {
+      os << "precomputedTForVarEst NOTNULL" << std::endl;
+      int sizeOfLUT ( 0 );
+      if ( q != NULL )
+        sizeOfLUT = q->size() * this->fmk->get_d();
+      
+      os << sizeOfLUT << std::endl;
+      for ( int i = 0; i < sizeOfLUT; i++ )
+      {
+        os << precomputedTForVarEst[i] << " ";
+      }
+      os << std::endl;
+    }
+    else
+    {
+      os << "precomputedTForVarEst NULL" << std::endl;
+    }
+    
+    //store the eigenvalues and eigenvectors
+    os << "eigenMax.size(): " << std::endl;
+    os << eigenMax.size() << std::endl;
+    
+    for (std::vector<NICE::Vector>::const_iterator it = this->eigenMax.begin(); it != this->eigenMax.end(); it++)
+    {
+      os << *it << std::endl;
+    }
+    
+    os << "eigenMaxVectors.size(): " << std::endl;
+    os << eigenMaxVectors.size() << std::endl;
+    
+    for (std::vector<NICE::Matrix>::const_iterator it = eigenMaxVectors.begin(); it != eigenMaxVectors.end(); it++)
+    {
+      os << *it << std::endl;
+    }      
+
+    os << "ikmsums: size: " << ikmsums.size() << std::endl;
+
+    std::map<int, IKMLinearCombination * >::const_iterator ikmSumIt = ikmsums.begin();
+
+    for ( uint i = 0; i < ikmsums.size(); i++ )
+    {
+      os << ikmSumIt->first << std::endl;
+      os << "numberOfModels: " << ( ikmSumIt->second )->getNumberOfModels() << std::endl;
+      //the last one os always the GHIK, which we do not have to restore
+      for ( int j = 0; j < ( ikmSumIt->second )->getNumberOfModels() - 1; j++ )
+      {
+        ( ( ikmSumIt->second )->getModel ( j ) )->store ( os, format );
+      }
+      ikmSumIt++;
+    }
+  }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }
+}
+
+void FMKGPHyperparameterOptimization::clear ( ) {};
+
+void FMKGPHyperparameterOptimization::addExample ( const NICE::SparseVector & x, const double & label, const bool & performOptimizationAfterIncrement )
+{
+  this->labels.append ( label );
+
+  // add the new example to our data structure
+  // It is necessary to do this already here and not lateron for internal reasons (see GMHIKernel for more details)
+  Timer t;
+  t.start();
+  fmk->addExample ( x, pf );
+  t.stop();
+  if (verboseTime)
+    std::cerr << "Time used for adding the data to the fmk object: " << t.getLast() << std::endl;
+
+  // do the optimization again using the previously known solutions as initialization
+  // update the corresponding matrices A, B and lookup tables T
+  optimizeAfterSingleIncrement ( x, performOptimizationAfterIncrement );
+}
+
+void FMKGPHyperparameterOptimization::addMultipleExamples ( const std::vector<const NICE::SparseVector*> & newExamples, const NICE::Vector & _labels, const bool & performOptimizationAfterIncrement )
+{
+  int oldSize ( this->labels.size() );
+  this->labels.resize ( this->labels.size() + _labels.size() );
+  for ( uint i = 0; i < _labels.size(); i++ )
+  {
+    this->labels[i+oldSize] = _labels[i];
+  }
+
+  // add the new example to our data structure
+  // It is necessary to do this already here and not lateron for internal reasons (see GMHIKernel for more details)
+  Timer t;
+  t.start();
+  for ( std::vector<const NICE::SparseVector*>::const_iterator exampleIt = newExamples.begin(); exampleIt != newExamples.end(); exampleIt++ )
+  {
+    fmk->addExample ( **exampleIt , pf );
+  }
+  t.stop();
+  if (verboseTime)
+    std::cerr << "Time used for adding the data to the fmk object: " << t.getLast() << std::endl;
+  
+  Timer tVar;
+  tVar.start();  
+  //do we need  to update our matrices?
+  if ( precomputedAForVarEst.size() != 0)
+  {
+    //this compute everything from the scratch
+    this->prepareVarianceApproximation();
+    //this would perform a more sophisticated update
+    //unfortunately, there is a bug somewhere
+    //TODO fixme!
+//     std::cerr << "update the LUTs needed for variance computation" << std::endl;
+//     for ( std::vector<const NICE::SparseVector*>::const_iterator exampleIt = newExamples.begin(); exampleIt != newExamples.end(); exampleIt++ )
+//     {  
+//       std::cerr << "new example: " << std::endl;
+//       (**exampleIt).store(std::cerr);
+//       std::cerr << "now update the LUT for var est" << std::endl;
+//       fmk->updatePreparationForKVNApproximation( **exampleIt, precomputedAForVarEst, pf );  
+//       if ( q != NULL )
+//       {
+//         fmk->updateLookupTableForKVNApproximation( **exampleIt, precomputedTForVarEst, *q, pf );
+//       }
+//     }
+//     std::cerr << "update of LUTs for variance compuation done" << std::endl;
+  }
+  tVar.stop();
+  if (verboseTime)
+    std::cerr << "Time used for computing the Variance Matrix and LUT: " << tVar.getLast() << std::endl;  
+  
+
+
+  // do the optimization again using the previously known solutions as initialization
+  // update the corresponding matrices A, B and lookup tables T
+  optimizeAfterMultipleIncrements ( newExamples, performOptimizationAfterIncrement );
+}

+ 270 - 0
FMKGPHyperparameterOptimization.h

@@ -0,0 +1,270 @@
+/** 
+* @file FMKGPHyperparameterOptimization.h
+* @brief Heart of the framework to set up everything, perform optimization, incremental updates, classification, variance prediction (Interface)
+* @author Erik Rodner, Alexander Freytag
+* @date 01/02/2012
+
+*/
+#ifndef _NICE_FMKGPHYPERPARAMETEROPTIMIZATIONINCLUDE
+#define _NICE_FMKGPHYPERPARAMETEROPTIMIZATIONINCLUDE
+
+#include <vector>
+#include <set>
+#include <map>
+
+#include <core/algebra/EigValues.h>
+#include <core/algebra/IterativeLinearSolver.h>
+#include <core/basics/Config.h>
+#include <core/basics/Persistent.h>
+#include <core/vector/VectorT.h>
+
+#ifdef NICE_USELIB_MATIO
+#include <core/matlabAccess/MatFileIO.h>
+#endif
+
+#include "FastMinKernel.h"
+#include "GPLikelihoodApprox.h"
+#include "IKMLinearCombination.h"
+#include "Quantization.h"
+
+#include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
+
+namespace NICE {
+  
+  /** 
+ * @class FMKGPHyperparameterOptimization
+ * @brief Heart of the framework to set up everything, perform optimization, incremental updates, classification, variance prediction
+ * @author Erik Rodner, Alexander Freytag
+ */
+  
+class FMKGPHyperparameterOptimization : NICE::Persistent
+{
+  protected:
+    enum {
+      OPT_GREEDY = 0,
+      OPT_DOWNHILLSIMPLEX,
+      OPT_NONE,
+      OPT_NUMBEROFMETHODS
+    };
+
+    /** optimization method used */
+    int optimizationMethod;
+
+    /** the parameterized function we use within the minimum kernel */
+    ParameterizedFunction *pf;
+
+    /** method computing eigenvalues */
+    EigValues *eig;
+
+    /** method for solving linear equation systems */
+    IterativeLinearSolver *linsolver;
+
+    /** object which stores our sorted data and provides fast hik functions */
+    FastMinKernel *fmk;
+
+    /** object which stores our quantization object */
+    Quantization *q;
+
+    /** verbose flag */
+    bool verbose;    
+    /** verbose flag for time measurement outputs */
+    bool verboseTime;        
+    /** debug flag for several outputs useful for debugging*/
+    bool debug;    
+
+    /** optimization parameters */
+    double parameterUpperBound;
+    double parameterLowerBound;
+    double parameterStepSize;
+    int ils_max_iterations;
+
+    int downhillSimplexMaxIterations;
+    double downhillSimplexTimeLimit;
+    double downhillSimplexParamTol;
+
+    /** whether to compute the likelihood with the usual method */
+    bool verifyApproximation;
+    
+    /** number of Eigenvalues to consider in the approximation of |K|_F */
+    int nrOfEigenvaluesToConsider;
+    
+    /** number of Eigenvalues to consider in the fine approximation of the predictive variance */
+    int nrOfEigenvaluesToConsiderForVarApprox;
+
+    typedef VVector PrecomputedType;
+
+    /** precomputed arrays and lookup tables */
+    std::map< int, PrecomputedType > precomputedA;
+    std::map< int, PrecomputedType > precomputedB;
+    std::map< int, double * > precomputedT;
+
+    PrecomputedType precomputedAForVarEst;
+    double * precomputedTForVarEst;
+
+    //! optimize noise with the GP likelihood
+    bool optimizeNoise;
+
+    //! learn in a balanced manner
+    bool learnBalanced;       
+       
+    //! k largest eigenvalues for every kernel matrix (k == nrOfEigenvaluesToConsider, if we do not use balanced learning, we have only 1 entry)
+    std::vector< NICE::Vector> eigenMax;
+
+    //! eigenvectors corresponding to k largest eigenvalues for every matrix (k == nrOfEigenvaluesToConsider) -- format: nxk
+    std::vector< NICE::Matrix> eigenMaxVectors;
+    
+    //! needed for optimization and variance approximation
+    std::map<int, IKMLinearCombination * > ikmsums;
+    
+    //! storing the labels is needed for Incremental Learning (re-optimization)
+    NICE::Vector labels;
+    
+    //! we store the alpha vectors for good initializations in the IL setting
+    std::map<int, NICE::Vector> lastAlphas;
+
+    //! calculate binary label vectors using a multi-class label vector
+    int prepareBinaryLabels ( std::map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses);     
+    
+    //! prepare the GPLike objects for given binary labels and already given ikmsum-objects
+    inline void setupGPLikelihoodApprox(std::map<int,GPLikelihoodApprox * > & gplikes, const std::map<int, NICE::Vector> & binaryLabels, std::map<int,uint> & parameterVectorSizes);    
+    
+    //! update eigenvectors and eigenvalues for given ikmsum-objects and a method to compute eigenvalues
+    inline void updateEigenVectors();
+    
+    //! core of the optimize-functions
+    inline void performOptimization(std::map<int,GPLikelihoodApprox * > & gplikes, const std::map<int,uint> & parameterVectorSizes, const bool & roughOptimization = true);
+    
+    //! apply the optimized transformation values to the underlying features
+    inline void transformFeaturesWithOptimalParameters(const std::map<int,GPLikelihoodApprox * > & gplikes, const std::map<int,uint> & parameterVectorSizes);
+    
+    //! build the resulting matrices A and B as well as lookup tables T for fast evaluations using the optimized parameter settings
+    inline void computeMatricesAndLUTs(const std::map<int,GPLikelihoodApprox * > & gplikes);
+    
+    //! Update optimal parameters after adding a new example.  
+    void optimizeAfterSingleIncrement (const NICE::SparseVector & x, const bool & performOptimizationAfterIncrement = false);    
+    //! Update optimal parameters after adding multiple examples.  
+    void optimizeAfterMultipleIncrements (const std::vector<const NICE::SparseVector*> & x, const bool & performOptimizationAfterIncrement = false);   
+    
+    //! use the alphas from the last iteration as initial guess for the ILS?
+    bool usePreviousAlphas;
+    
+  public:  
+    
+
+    FMKGPHyperparameterOptimization();
+    
+    /**
+    * @brief standard constructor
+    *
+    * @param pf pointer to a parameterized function used within the minimum kernel min(f(x_i), f(x_j)) (will not be deleted)
+    * @param noise GP label noise
+    * @param fmk pointer to a pre-initialized structure (will be deleted)
+    */
+    FMKGPHyperparameterOptimization( const Config *conf, ParameterizedFunction *pf, FastMinKernel *fmk = NULL, const std::string & confSection = "GPHIKClassifier" );
+      
+    /** simple destructor */
+    virtual ~FMKGPHyperparameterOptimization();
+    
+    // get and set methods
+    void setParameterUpperBound(const double & _parameterUpperBound);
+    void setParameterLowerBound(const double & _parameterLowerBound);  
+    
+    //high level methods
+    
+    void initialize( const Config *conf, ParameterizedFunction *pf, FastMinKernel *fmk = NULL, const std::string & confSection = "GPHIKClassifier" );
+       
+#ifdef NICE_USELIB_MATIO
+    /**
+    * @brief Perform hyperparameter optimization
+    *
+    * @param data MATLAB data structure, like a feature matrix loaded from ImageNet
+    * @param y label vector (arbitrary), will be converted into a binary label vector
+    * @param positives set of positive examples (indices)
+    * @param negatives set of negative examples (indices)
+    */
+    void optimizeBinary ( const sparse_t & data, const NICE::Vector & y, const std::set<int> & positives, const std::set<int> & negatives, double noise );
+
+    /**
+    * @brief Perform hyperparameter optimization for GP multi-class or binary problems
+    *
+    * @param data MATLAB data structure, like a feature matrix loaded from ImageNet
+    * @param y label vector with multi-class labels
+    * @param examples mapping of example index to new index
+    */
+    void optimize ( const sparse_t & data, const NICE::Vector & y, const std::map<int, int> & examples, double noise );
+#endif
+
+    /**
+    * @brief Perform hyperparameter optimization (multi-class or binary) assuming an already initialized fmk object
+    *
+    * @param y label vector (multi-class as well as binary labels supported)
+    */
+    void optimize ( const NICE::Vector & y );
+    
+    /**
+    * @brief Perform hyperparameter optimization (multi-class or binary) assuming an already initialized fmk object
+    *
+    * @param binLabels vector of binary label vectors (1,-1) and corresponding class no.
+    */
+    void optimize ( std::map<int, NICE::Vector> & binaryLabels );    
+    
+    /**
+    * @brief Compute the necessary variables for appxorimations of predictive variance, assuming an already initialized fmk object
+    * @author Alexander Freytag
+    * @date 11-04-2012 (dd-mm-yyyy)
+    */       
+    void prepareVarianceApproximation();
+    
+    /**
+    * @brief classify an example 
+    *
+    * @param x input example
+    * @param scores scores for each class number
+    *
+    * @return class number achieving the best score
+    */
+    int classify ( const NICE::SparseVector & x, SparseVector & scores ) const;
+
+    /**
+    * @brief compute predictive variance for a given test example using a rough approximation: k_{**} -  k_*^T (K+\sigma I)^{-1} k_* <= k_{**} - |k_*|^2 * 1 / \lambda_max(K + \sigma I), where we approximate |k_*|^2 by neglecting the mixed terms
+    * @author Alexander Freytag
+    * @date 10-04-2012 (dd-mm-yyyy)
+    * @param x input example
+    * @param predVariances contains the approximations of the predictive variances
+    *
+    */    
+    void computePredictiveVarianceApproximateRough(const NICE::SparseVector & x, NICE::Vector & predVariances) const;
+
+    /**
+    * @brief compute predictive variance for a given test example using a fine approximation  (k eigenvalues and eigenvectors to approximate the quadratic term)
+    * @author Alexander Freytag
+    * @date 18-04-2012 (dd-mm-yyyy)
+    * @param x input example
+    * @param predVariances contains the approximations of the predictive variances
+    *
+    */    
+    void computePredictiveVarianceApproximateFine(const NICE::SparseVector & x, NICE::Vector & predVariances) const;    
+    
+    /**
+    * @brief compute exact predictive variance for a given test example using ILS methods (exact, but more time consuming than approx versions)
+    * @author Alexander Freytag
+    * @date 10-04-2012 (dd-mm-yyyy)
+    * @param x input example
+    * @param predVariances contains the approximations of the predictive variances
+    *
+    */    
+    void computePredictiveVarianceExact(const NICE::SparseVector & x, NICE::Vector & predVariances) const;
+    
+    /** Persistent interface */
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ( ) ;
+    
+    void addExample( const NICE::SparseVector & x, const double & label, const bool & performOptimizationAfterIncrement = true);
+    void addMultipleExamples( const std::vector<const NICE::SparseVector*> & newExamples, const NICE::Vector & labels, const bool & performOptimizationAfterIncrement = false);
+        
+};
+
+}
+
+#endif

+ 1488 - 0
FastMinKernel.cpp

@@ -0,0 +1,1488 @@
+/** 
+ * @file FastMinKernel.cpp
+ * @brief Efficient GPs with HIK for classification by regression (Implementation)
+ * @author Alexander Freytag
+ * @date 06-12-2011 (dd-mm-yyyy)
+*/
+#include <iostream>
+//#include "tools.h"
+
+#include "core/basics/vectorio.h"
+#include "core/basics/Timer.h"
+#include "FastMinKernel.h"
+
+using namespace std;
+using namespace NICE;
+
+/* protected methods*/
+
+
+/* public methods*/
+
+
+FastMinKernel::FastMinKernel()
+{
+  this->d = -1;
+  this->n = -1;
+  this->noise = 1.0;
+  approxScheme = MEDIAN;
+  verbose = false;
+  this->setDebug(false);
+}
+
+FastMinKernel::FastMinKernel( const std::vector<std::vector<double> > & X, const double noise, const bool _debug, const int & _dim)
+{
+  this->setDebug(_debug);
+  this->hik_prepare_kernel_multiplications ( X, this->X_sorted, _dim);
+  this->d = X_sorted.get_d();
+  this->n = X_sorted.get_n();
+  this->noise = noise;
+  approxScheme = MEDIAN;
+  verbose = false;
+}
+      
+#ifdef NICE_USELIB_MATIO
+FastMinKernel::FastMinKernel ( const sparse_t & X, const double noise, const std::map<int, int> & examples, const bool _debug, const int & _dim) : X_sorted( X, examples, _dim )
+{
+  this->d = X_sorted.get_d();
+  this->n = X_sorted.get_n();
+  this->noise = noise;
+  approxScheme = MEDIAN;
+  verbose = false;
+  this->setDebug(_debug);
+}
+#endif
+
+FastMinKernel::FastMinKernel ( const vector< SparseVector * > & X, const double noise, const bool _debug, const bool & dimensionsOverExamples, const int & _dim)
+{
+  this->setDebug(_debug);
+  this->hik_prepare_kernel_multiplications ( X, this->X_sorted, dimensionsOverExamples, _dim);
+  this->d = X_sorted.get_d();
+  this->n = X_sorted.get_n();
+  this->noise = noise;
+  approxScheme = MEDIAN;
+  verbose = false;
+}
+
+FastMinKernel::~FastMinKernel()
+{
+}
+
+void FastMinKernel::applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf)
+{
+  this->X_sorted.applyFunctionToFeatureMatrix(pf);
+}
+
+void FastMinKernel::hik_prepare_kernel_multiplications(const std::vector<std::vector<double> > & X, NICE::FeatureMatrixT<double> & X_sorted, const int & _dim)
+{
+  X_sorted.set_features(X, _dim);
+}
+
+void FastMinKernel::hik_prepare_kernel_multiplications(const std::vector< NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim)
+{
+  X_sorted.set_features(X, dimensionsOverExamples, _dim);
+}
+
+void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & alpha, NICE::VVector & A, NICE::VVector & B) const
+{
+//   std::cerr << "FastMinKernel::hik_prepare_alpha_multiplications" << std::endl;
+//   std::cerr << "alpha: " << alpha << std::endl;
+  A.resize(d);
+  B.resize(d);
+
+  //  efficient calculation of k*alpha
+  //  ---------------------------------
+  //  
+  //  sum_i alpha_i k(x^i,x) = sum_i alpha_i sum_k min(x^i_k,x_k)
+  //  = sum_k sum_i alpha_i min(x^i_k, x_k)
+  //  
+  //  now let us define l_k = { i | x^i_k <= x_k }
+  //  and u_k = { i | x^i_k > x_k }, this leads to
+  //  
+  //  = sum_k ( sum_{l \in l_k} alpha_l x^i_k + sum_{u \in u_k} alpha_u x_k
+  //  = sum_k ( sum_{l \in l_k} \alpha_l x^l_k + x_k * sum_{u \in u_k}
+  //  alpha_u
+  // 
+  //  We also define 
+  //  l^j_k = { i | x^i_j <= x^j_k } and
+  //  u^j_k = { i | x^i_k > x^j_k }
+  //
+  //  We now need the partial sums 
+  //
+  //  (Definition 1)
+  //  a_{k,j} = \sum_{l \in l^j_k} \alpha_l x^l_k 
+  //
+  //  and \sum_{u \in u^j_k} \alpha_u 
+  //  according to increasing values of x^l_k
+  //
+  //  With 
+  //  (Definition 2)
+  //  b_{k,j} =  \sum_{l \in l^j_k} \alpha_l, 
+  //
+  //  we get
+  //  \sum_{u \in u^j_k} \alpha_u  = \sum_{u=1}^n alpha_u - \sum_{l \in l^j_k} \alpha_l
+  //  = b_{k,n} - b_{k,j}
+
+  //  we only need as many entries as we have nonZero entries in our features for the corresponding dimensions
+  for (int i = 0; i < d; i++)
+  {
+    uint numNonZero = X_sorted.getNumberOfNonZeroElementsPerDimension(i);
+    //DEBUG
+    //std::cerr << "number of non-zero elements in dimension " << i << " / " << d << ": " << numNonZero << std::endl;
+    A[i].resize( numNonZero );
+    B[i].resize( numNonZero  );
+  }
+  
+  //  for more information see hik_prepare_alpha_multiplications
+  
+  for (int dim = 0; dim < d; dim++)
+  {
+    double alpha_sum(0.0);
+    double alpha_times_x_sum(0.0);
+
+    int cntNonzeroFeat(0);
+    
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+    // loop through all elements in sorted order
+    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++ )
+    {
+      const SortedVectorSparse<double>::dataelement & de = i->second;
+      
+      // index of the feature
+      int index = de.first;
+      // transformed element of the feature
+      //
+      double elem( de.second );
+                
+      alpha_times_x_sum += alpha[index] * elem;
+      A[dim][cntNonzeroFeat] = alpha_times_x_sum;
+      
+      alpha_sum += alpha[index];
+      B[dim][cntNonzeroFeat] = alpha_sum;
+      cntNonzeroFeat++;
+    }
+  }
+
+//   A.store(std::cerr);
+//   B.store(std::cerr);
+}
+
+double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVector & A, const NICE::VVector & B, const Quantization & q, const ParameterizedFunction *pf ) const
+{
+  //NOTE keep in mind: for doing this, we already have precomputed A and B using hik_prepare_alpha_multiplications!
+  
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+
+
+  // creating the lookup table as pure C, which might be beneficial
+  // for fast evaluation
+  double *Tlookup = new double [ hmax * this->d ];
+//     std::cerr << "size of LUT: " << hmax * this->d << std::endl;
+//   sizeOfLUT = hmax * this->d;
+
+
+  // loop through all dimensions
+  for (int dim = 0; dim < this->d; dim++)
+  {
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n )
+      continue;
+
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+      
+    SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin();
+    SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
+    
+    // index of the element, which is always bigger than the current value fval
+    int index = 0;
+    // we use the quantization of the original features! the transformed feature were
+    // already used to calculate A and B, this of course assumes monotonic functions!!!
+    int qBin = q.quantize ( i->first ); 
+
+    // the next loop is linear in max(hmax, n)
+    // REMARK: this could be changed to hmax*log(n), when
+    // we use binary search
+    
+    for (int j = 0; j < (int)hmax; j++)
+    {
+      double fval = prototypes[j];
+      double t;
+
+      if (  (index == 0) && (j < qBin) ) {
+        // current element is smaller than everything else
+        // resulting value = fval * sum_l=1^n alpha_l
+        t = fval*( B[dim][this->n-1 - nrZeroIndices] );
+      } else {
+
+         // move to next example, if necessary   
+        while ( (j >= qBin) && ( index < (this->n-1-nrZeroIndices)) )
+        {
+          index++;
+          iPredecessor = i;
+          i++;
+
+          if ( i->first !=  iPredecessor->first )
+            qBin = q.quantize ( i->first );
+        }
+        // compute current element in the lookup table and keep in mind that
+        // index is the next element and not the previous one
+        //NOTE pay attention: this is only valid if we all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+        if ( (j >= qBin) && ( index==(this->n-1-nrZeroIndices) ) ) {
+          // the current element (fval) is equal or bigger to the element indexed by index
+          // in fact, the term B[dim][this->n-1-nrZeroIndices] - B[dim][index] is equal to zero and vanishes, which is logical, since all elements are smaller than j!
+          t = A[dim][index];// + fval*( B[dim][this->n-1-nrZeroIndices] - B[dim][index] );
+        } else {
+          // standard case
+          t = A[dim][index-1] + fval*( B[dim][this->n-1-nrZeroIndices] - B[dim][index-1] );
+        }
+      }
+
+      Tlookup[ dim*hmax + j ] = t;
+    }
+  }
+
+  delete [] prototypes;
+
+  return Tlookup;
+}
+
+double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & alpha, const Quantization & q, const ParameterizedFunction *pf ) const
+{
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+
+  // creating the lookup table as pure C, which might be beneficial
+  // for fast evaluation
+  double *Tlookup = new double [ hmax * this->d ];
+//   sizeOfLUT = hmax * this->d;
+  
+  // loop through all dimensions
+  for (int dim = 0; dim < this->d; dim++)
+  {
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n )
+      continue;
+
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+    
+    double alphaSumTotalInDim(0.0);
+    double alphaTimesXSumTotalInDim(0.0);
+    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++ )
+    {
+      alphaSumTotalInDim += alpha[i->second.first];
+      alphaTimesXSumTotalInDim += alpha[i->second.first] * i->second.second;
+    }    
+      
+    SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin();
+    SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
+    
+    // index of the element, which is always bigger than the current value fval
+    int index = 0;
+    
+    // we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
+    int qBin = q.quantize ( i->first ); 
+    
+    double alpha_sum(0.0);
+    double alpha_times_x_sum(0.0);
+    double alpha_sum_prev(0.0);
+    double alpha_times_x_sum_prev(0.0);
+    
+    for (uint j = 0; j < hmax; j++)
+    {
+      double fval = prototypes[j];
+      double t;
+
+      if (  (index == 0) && (j < (uint)qBin) ) {
+        // current element is smaller than everything else
+        // resulting value = fval * sum_l=1^n alpha_l
+        //t = fval*( B[dim][this->n-1 - nrZeroIndices] );
+        t = fval*alphaSumTotalInDim;
+      } else {
+
+         // move to next example, if necessary   
+        while ( (j >= (uint)qBin) && ( index < (this->n-1-nrZeroIndices)) )
+        {
+          alpha_times_x_sum_prev = alpha_times_x_sum;
+          alpha_sum_prev = alpha_sum;
+          alpha_times_x_sum += alpha[i->second.first] * i->second.second; //i->dataElement.transformedFeatureValue
+          alpha_sum += alpha[i->second.first]; //i->dataElement.OrigIndex
+          
+          index++;
+          iPredecessor = i;
+          i++;
+
+          if ( i->first !=  iPredecessor->first )
+            qBin = q.quantize ( i->first );
+        }
+        // compute current element in the lookup table and keep in mind that
+        // index is the next element and not the previous one
+        //NOTE pay attention: this is only valid if all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+        if ( (j >= (uint)qBin) && ( index==(this->n-1-nrZeroIndices) ) ) {
+          // the current element (fval) is equal or bigger to the element indexed by index
+          // in fact, the term B[dim][this->n-1-nrZeroIndices] - B[dim][index] is equal to zero and vanishes, which is logical, since all elements are smaller than j!
+//           double lastTermAlphaTimesXSum;
+//           double lastTermAlphaSum;
+          t = alphaTimesXSumTotalInDim;
+        } else {
+          // standard case
+          t = alpha_times_x_sum + fval*( alphaSumTotalInDim - alpha_sum );
+        }
+      }
+
+      Tlookup[ dim*hmax + j ] = t;
+    }
+  }
+
+  delete [] prototypes;
+
+  return Tlookup;
+}
+
+
+void FastMinKernel::hikUpdateLookupTable(double * T, const double & alphaNew, const double & alphaOld, const int & idx, const Quantization & q, const ParameterizedFunction *pf ) const
+{
+  
+  if (T == NULL)
+  {
+    fthrow(Exception, "FastMinKernel::hikUpdateLookupTable LUT not initialized, run FastMinKernel::hikPrepareLookupTable first!");
+    return;
+  }
+  
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+  
+  double diffOfAlpha(alphaNew - alphaOld);
+  
+  // loop through all dimensions
+  for (int dim = 0; dim < this->d; dim++)
+  {  
+    double x_i ( (X_sorted(dim,idx)) );
+    
+    //TODO we could also check wether x_i < tol, if we would store the tol explicitely
+    if (x_i == 0.0) //nothing to do in this dimension
+      continue;
+
+    //TODO we could speed up this with first do a binary search for the position where the min changes, and then do two separate for-loops
+    for (uint j = 0; j < hmax; j++)
+    {
+        double fval;
+        int q_bin = q.quantize(x_i);
+        if (q_bin > j)
+          fval = prototypes[j];
+        else
+          fval = x_i;      
+        
+//       double fval = std::min(prototypes[j],x_i);      
+      T[ dim*hmax + j ] += diffOfAlpha*fval;
+    }
+  }
+
+  delete [] prototypes;
+}
+
+
+void FastMinKernel::hik_kernel_multiply(const NICE::VVector & A, const NICE::VVector & B, const NICE::Vector & alpha, NICE::Vector & beta) const
+{
+  beta.resize(n);
+  beta.set(0.0);
+
+  // runtime is O(n*d), we do no benefit from an additional lookup table here
+  for (int dim = 0; dim < d; dim++)
+  {
+    // -- efficient sparse solution
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+
+    if ( nrZeroIndices == n ) {
+      // all values are zero in this dimension :) and we can simply ignore the feature
+      continue;
+    }
+
+    int cnt(0);
+    for ( multimap< double, SortedVectorSparse<double>::dataelement>::const_iterator i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, cnt++)
+    {
+      const SortedVectorSparse<double>::dataelement & de = i->second;
+      uint feat = de.first;
+      int inversePosition = cnt; 
+      double fval = de.second;
+
+      // in which position was the element sorted in? actually we only care about the nonzero elements, so we have to subtract the number of zero elements. 
+      //NOTE pay attention: this is only valid if all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+
+      //we definitly know that this element exists in inversePermutation, so we have not to check wether find returns .end() or not
+      //int inversePosition(inversePermutation.find(feat)->second - nrZeroIndices);
+      // sum_{l \in L_k} \alpha_l x^l_k
+      //
+      // A is zero for zero feature values (x^l_k is zero for all l \in L_k)
+      double firstPart( A[dim][inversePosition] );
+      // sum_{u \in U_k} alpha_u
+      // B is not zero for zero feature values, but we do not
+      // have to care about them, because it is multiplied with
+      // the feature value
+      // DEBUG for Björns code
+      if ( (uint)dim >= B.size() )
+        fthrow(Exception, "dim exceeds B.size: " << dim << " " << B.size() );
+      if ( B[dim].size() == 0 )
+        fthrow(Exception, "B[dim] is empty");
+      if ( (n-1-nrZeroIndices < 0)  || ((uint)(n-1-nrZeroIndices) >= B[dim].size() ) )
+        fthrow(Exception, "n-1-nrZeroIndices is invalid: " << n << " " << nrZeroIndices << " " << B[dim].size() << " d: " << d);
+      if ( inversePosition < 0 || (uint)inversePosition >= B[dim].size() )
+        fthrow(Exception, "inverse position is invalid: " << inversePosition << " " << B[dim].size() );
+      double secondPart( B[dim][n-1-nrZeroIndices] - B[dim][inversePosition]);
+
+      beta[feat] += firstPart + fval * secondPart; // i->elementpointer->dataElement->Value
+    }
+  }
+  
+  //do we really want to considere noisy labels?
+  for (int feat = 0; feat < n; feat++)
+  {
+    beta[feat] += noise*alpha[feat];
+  }
+}
+
+void FastMinKernel::hik_kernel_multiply_fast(const double *Tlookup, const Quantization & q, const NICE::Vector & alpha, NICE::Vector & beta) const
+{
+  beta.resize(n);
+  beta.set(0.0);
+
+  // runtime is O(n*d), we do no benefit from an additional lookup table here
+  for (int dim = 0; dim < d; dim++)
+  {
+    // -- efficient sparse solution
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+
+    int cnt(0);
+    for ( multimap< double, SortedVectorSparse<double>::dataelement>::const_iterator i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, cnt++)
+    {
+      const SortedVectorSparse<double>::dataelement & de = i->second;
+      uint feat = de.first;
+      uint qBin = q.quantize(i->first);
+      beta[feat] += Tlookup[dim*q.size() + qBin];
+    }
+  }
+  
+  //do we really want to considere noisy labels?
+  for (int feat = 0; feat < n; feat++)
+  {
+    beta[feat] += noise*alpha[feat];
+  }
+}
+
+void FastMinKernel::hik_kernel_sum(const NICE::VVector & A, const NICE::VVector & B, const NICE::SparseVector & xstar, double & beta, const ParameterizedFunction *pf) const
+{
+  // sparse version of hik_kernel_sum, no really significant changes,
+  // we are just skipping zero elements
+  // for additional comments see the non-sparse version of hik_kernel_sum
+  beta = 0.0;
+  for (SparseVector::const_iterator i = xstar.begin(); i != xstar.end(); i++)
+  {
+  
+    int dim = i->first;
+    double fval = i->second;
+    
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n ) {
+      // all features are zero and let us ignore it completely
+      continue;
+    }
+
+    int position;
+
+    //where is the example x^z_i located in
+    //the sorted array? -> perform binary search, runtime O(log(n))
+    // search using the original value
+    X_sorted.findFirstLargerInDimension(dim, fval, position);
+    position--;
+  
+    //NOTE again - pay attention! This is only valid if all entries are NOT negative! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+    //sum_{l \in L_k} \alpha_l x^l_k
+    double firstPart(0.0);
+    //TODO in the "overnext" line there occurs the following error
+    // Invalid read of size 8
+    if (position >= 0) 
+      firstPart = (A[dim][position-nrZeroIndices]);
+    
+    // sum_{u \in U_k} alpha_u
+    
+    // sum_{u \in U_k} alpha_u
+    // => double secondPart( B(dim, n-1) - B(dim, position));
+    //TODO in the next line there occurs the following error
+    // Invalid read of size 8      
+    double secondPart( B[dim][n-1-nrZeroIndices]);
+    //TODO in the "overnext" line there occurs the following error
+    // Invalid read of size 8    
+    if (position >= 0) 
+      secondPart-= B[dim][position-nrZeroIndices];
+    
+    if ( pf != NULL )
+    {
+      fval = pf->f ( dim, fval );
+    }   
+    
+    // but apply using the transformed one
+    beta += firstPart + secondPart* fval;
+  }
+}
+
+void FastMinKernel::hik_kernel_sum_fast(const double *Tlookup, const Quantization & q, const NICE::Vector & xstar, double & beta) const
+{
+  beta = 0.0;
+  if ((int) xstar.size() != d)
+  {
+    fthrow(Exception, "FastMinKernel::hik_kernel_sum_fast sizes of xstar and training data does not match!");
+    return;
+  }
+
+  // runtime is O(d) if the quantizer is O(1)
+  for (int dim = 0; dim < d; dim++)
+  {
+    double v = xstar[dim];
+    uint qBin = q.quantize(v);
+    
+    beta += Tlookup[dim*q.size() + qBin];
+  }
+}
+
+void FastMinKernel::hik_kernel_sum_fast(const double *Tlookup, const Quantization & q, const NICE::SparseVector & xstar, double & beta) const
+{
+  beta = 0.0;
+  // sparse version of hik_kernel_sum_fast, no really significant changes,
+  // we are just skipping zero elements
+  // for additional comments see the non-sparse version of hik_kernel_sum_fast
+  // runtime is O(d) if the quantizer is O(1)
+  for (SparseVector::const_iterator i = xstar.begin(); i != xstar.end(); i++ )
+  {
+    int dim = i->first;
+    double v = i->second;
+    uint qBin = q.quantize(v);
+    
+    beta += Tlookup[dim*q.size() + qBin];
+  }
+}
+
+double *FastMinKernel::solveLin(const NICE::Vector & y, NICE::Vector & alpha, const Quantization & q, const ParameterizedFunction *pf, const bool & useRandomSubsets, uint maxIterations, const int & _sizeOfRandomSubset, double minDelta, bool timeAnalysis) const
+{ 
+  int sizeOfRandomSubset(_sizeOfRandomSubset);
+  bool verbose ( false );
+  bool verboseMinimal ( false );
+  
+  // number of quantization bins
+  uint hmax = q.size();
+  
+  NICE::Vector diagonalElements(y.size(),0.0);
+  X_sorted.hikDiagonalElements(diagonalElements);
+  diagonalElements += this->noise;
+  
+  NICE::Vector pseudoResidual (y.size(),0.0);
+  NICE::Vector delta_alpha (y.size(),0.0);
+  double alpha_old;
+  double alpha_new;
+  double x_i;
+  
+  // initialization
+  if (alpha.size() != y.size())
+    alpha.resize(y.size());
+  alpha.set(0.0);
+  
+  double *Tlookup = new double [ hmax * this->d ];
+  if ( (hmax*this->d) <= 0 ) return Tlookup;
+  memset(Tlookup, 0, sizeof(Tlookup[0])*hmax*this->d);
+  
+  uint iter;
+  Timer t;
+  if ( timeAnalysis )
+    t.start();
+  
+  if (useRandomSubsets)
+  {
+    std::vector<int> indices(y.size());
+    for (uint i = 0; i < y.size(); i++)
+      indices[i] = i;
+    
+    if (sizeOfRandomSubset <= 0) 
+      sizeOfRandomSubset = y.size();
+    
+    for ( iter = 1; iter <= maxIterations; iter++ ) 
+    {
+      NICE::Vector perm;
+      randomPermutation(perm,indices,sizeOfRandomSubset);
+ 
+      if ( timeAnalysis )
+      {
+        t.stop();
+        Vector r;
+        this->hik_kernel_multiply_fast(Tlookup, q, alpha, r);
+        r = r - y;
+        
+        double res = r.normL2();
+        double resMax = r.normInf();
+
+        cerr << "SimpleGradientDescent: TIME " << t.getSum() << " " << res << " " << resMax << endl;
+
+        t.start();
+      }
+     
+      for ( int i = 0; i < sizeOfRandomSubset; i++)  
+      {
+
+        pseudoResidual(perm[i]) = -y(perm[i]) + (this->noise*alpha(perm[i]));
+        for (uint j = 0; j < (uint)this->d; j++)
+        {
+          x_i = X_sorted(j,perm[i]);
+          pseudoResidual(perm[i]) += Tlookup[j*hmax + q.quantize(x_i)];
+        }
+      
+        //NOTE: this threshhold could also be a parameter of the function call
+        if ( fabs(pseudoResidual(perm[i])) > 1e-7 )
+        {
+          alpha_old = alpha(perm[i]);
+          alpha_new = alpha_old - (pseudoResidual(perm[i])/diagonalElements(perm[i]));
+          alpha(perm[i]) = alpha_new;
+
+
+          delta_alpha(perm[i]) = alpha_old-alpha_new;
+         
+          this->hikUpdateLookupTable(Tlookup, alpha_new, alpha_old, perm[i], q, pf ); // works correctly
+          
+        } else
+        {
+          delta_alpha(perm[i]) = 0.0;
+        }
+        
+      }
+      // after this only residual(i) is the valid residual... we should
+      // really update the whole vector somehow
+      
+      double delta = delta_alpha.normL2();
+      if ( verbose ) {
+        cerr << "FastMinKernel::solveLin: iteration " << iter << " / " << maxIterations << endl;     
+        cerr << "FastMinKernel::solveLin: delta = " << delta << endl;
+        cerr << "FastMinKernel::solveLin: pseudo residual = " << pseudoResidual.scalarProduct(pseudoResidual) << endl;
+      }
+      
+      if ( delta < minDelta ) 
+      {
+        if ( verbose )
+          cerr << "FastMinKernel::solveLin: small delta" << endl;
+        break;
+      }    
+    }
+  }
+  else //don't use random subsets
+  {   
+    for ( iter = 1; iter <= maxIterations; iter++ ) 
+    {
+      
+      for ( uint i = 0; i < y.size(); i++ )
+      {
+          
+        pseudoResidual(i) = -y(i) + (this->noise*alpha(i));
+        for (uint j = 0; j < (uint) this->d; j++)
+        {
+          x_i = X_sorted(j,i);
+          pseudoResidual(i) += Tlookup[j*hmax + q.quantize(x_i)];
+        }
+      
+        //NOTE: this threshhold could also be a parameter of the function call
+        if ( fabs(pseudoResidual(i)) > 1e-7 )
+        {
+          alpha_old = alpha(i);
+          alpha_new = alpha_old - (pseudoResidual(i)/diagonalElements(i));
+          alpha(i) = alpha_new;
+          delta_alpha(i) = alpha_old-alpha_new;
+          
+          this->hikUpdateLookupTable(Tlookup, alpha_new, alpha_old, i, q, pf ); // works correctly
+          
+        } else
+        {
+          delta_alpha(i) = 0.0;
+        }
+        
+      }
+      
+      double delta = delta_alpha.normL2();
+      if ( verbose ) {
+        cerr << "FastMinKernel::solveLin: iteration " << iter << " / " << maxIterations << endl;     
+        cerr << "FastMinKernel::solveLin: delta = " << delta << endl;
+        cerr << "FastMinKernel::solveLin: pseudo residual = " << pseudoResidual.scalarProduct(pseudoResidual) << endl;
+      }
+      
+      if ( delta < minDelta ) 
+      {
+        if ( verbose )
+          cerr << "FastMinKernel::solveLin: small delta" << endl;
+        break;
+      }    
+    }
+  }
+  
+  if (verboseMinimal)
+    std::cerr << "FastMinKernel::solveLin -- needed " << iter << " iterations" << std::endl;
+  return Tlookup;
+}
+
+void FastMinKernel::randomPermutation(NICE::Vector & permutation, const std::vector<int> & oldIndices, const int & newSize) const
+{
+  std::vector<int> indices(oldIndices);
+  
+  int resultingSize (std::min((int) (oldIndices.size()),newSize) );
+  permutation.resize(resultingSize);
+  
+  for (int i = 0; i < resultingSize; i++)
+  {
+    int newIndex(rand() % indices.size());
+    permutation[i] = indices[newIndex ];
+    indices.erase(indices.begin() + newIndex);
+  }
+}
+
+double FastMinKernel::getFrobNormApprox()
+{
+  double frobNormApprox(0.0);
+  
+  switch (approxScheme)
+  {
+    case MEDIAN:
+    {
+      //\| K \|_F^1 ~ (n/2)^2 \left( \sum_k \median_k \right)^2
+      //motivation: estimate half of the values in dim k to zero and half of them to the median (-> lower bound expectation)
+      for (int i = 0; i < d; i++)
+      {
+        double median = this->X_sorted.getFeatureValues(i).getMedian();
+        frobNormApprox += median;
+      }
+      
+      frobNormApprox = fabs(frobNormApprox) * n/2.0;
+      break;
+    }
+    case EXPECTATION:
+    {
+      std::cerr << "EXPECTATION" << std::endl;
+      //\| K \|_F^1^2 ~ \sum K_{ii}^2     +    (n^2 - n) \left( \frac{1}{3} \sum_k \left( 2 a_k + b_k \right) \right)
+      // with a_k = minimal value in dim k and b_k maximal value
+      
+      //first term
+      NICE::Vector diagEl;
+      X_sorted.hikDiagonalElements(diagEl);
+      frobNormApprox += diagEl.normL2();
+      
+      //second term
+      double secondTerm(0.0);
+      for (int i = 0; i < d; i++)
+      {
+        double minInDim;
+        minInDim = this->X_sorted.getFeatureValues(i).getMin();
+        double maxInDim;
+        maxInDim = this->X_sorted.getFeatureValues(i).getMax();
+        std::cerr << "min: " << minInDim << " max: " << maxInDim << std::endl;
+        secondTerm += 2.0*minInDim + maxInDim;
+      }
+      secondTerm /= 3.0;
+      secondTerm = pow(secondTerm, 2);
+      secondTerm *= (pow(this->n,2) - this->n);
+      frobNormApprox += secondTerm;
+      
+      
+      frobNormApprox = sqrt(frobNormApprox);
+      
+      break;
+    }
+    default:
+    { //do nothing, approximate with zero :)
+      break;
+    }
+  }
+  return frobNormApprox;
+}
+
+void FastMinKernel::setApproximationScheme(const int & _approxScheme)
+{
+  switch(_approxScheme)
+  {
+    case 0:
+    {
+      approxScheme = MEDIAN;
+      break;
+    }
+    case 1:
+    {
+      approxScheme = EXPECTATION;
+      break;
+    }
+    default:
+    {
+      approxScheme = MEDIAN;
+      break;
+    }
+  }
+}
+
+void FastMinKernel::hikPrepareKVNApproximation(NICE::VVector & A) const
+{
+  A.resize(d);
+
+  //  efficient calculation of |k_*|^2 = k_*^T * k_*
+  //  ---------------------------------
+  //  
+  //    \sum_{i=1}^{n} \left( \sum_{d=1}^{D} \min (x_d^*, x_d^i) \right)^2
+  //  <=\sum_{i=1}^{n} \sum_{d=1}^{D} \left( \min (x_d^*, x_d^i) \right)^2  
+  //  = \sum_{d=1}^{D} \sum_{i=1}^{n} \left( \min (x_d^*, x_d^i) \right)^2
+  //  = \sum_{d=1}^{D} \left( \sum_{i:x_d^i < x_*^d} (x_d^i)^2 + \sum_{j: x_d^* \leq x_d^j} (x_d^*)^2 \right)
+  //
+  //  again let us define l_d = { i | x_d^i <= x_d^* }
+  //  and u_d = { i | x_d^i > x_d^* }, this leads to
+  //  
+  //  = \sum_{d=1}^{D} ( \sum_{l \in l_d} (x_d^l)^2 + \sum_{u \in u_d} (x_d^*)^2
+  //  = \sum_{d=1}^{D} ( \sum_{l \in l_d} (x_d^l)^2 + (x_d^*)^2 \sum_{u \in u_d} 1
+  // 
+  //  We also define 
+  //  l_d^j = { i | x_d^i <= x_d^j } and
+  //  u_d^j = { i | x_d^i > x_d^j }
+  //
+  //  We now need the partial sums 
+  //
+  //  (Definition 1)
+  //  a_{d,j} = \sum_{l \in l_d^j} (x_d^l)^2
+  //  according to increasing values of x_d^l
+  //
+  //  We end at
+  //  |k_*|^2 <= \sum_{d=1}^{D} \left( a_{d,r_d} + (x_d^*)^2 * |u_d^{r_d}| \right)
+  //  with r_d being the index of the last example in the ordered sequence for dimension d, that is not larger than x_d^*
+
+  //  we only need as many entries as we have nonZero entries in our features for the corresponding dimensions
+  for (int i = 0; i < d; i++)
+  {
+    uint numNonZero = X_sorted.getNumberOfNonZeroElementsPerDimension(i);
+    A[i].resize( numNonZero );
+  }
+  //  for more information see hik_prepare_alpha_multiplications
+  
+  for (int dim = 0; dim < d; dim++)
+  {
+    double squared_sum(0.0);
+
+    int cntNonzeroFeat(0);
+    
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+    // loop through all elements in sorted order
+    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++ )
+    {
+      const SortedVectorSparse<double>::dataelement & de = i->second;
+      
+      // de: first - index, second - transformed feature
+      double elem( de.second );
+                
+      squared_sum += pow( elem, 2 );
+      A[dim][cntNonzeroFeat] = squared_sum;
+
+      cntNonzeroFeat++;
+    }
+  }
+}
+
+double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & A, const Quantization & q, const ParameterizedFunction *pf ) const
+{
+  //NOTE keep in mind: for doing this, we already have precomputed A using hikPrepareSquaredKernelVector!
+  
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+
+
+  // creating the lookup table as pure C, which might be beneficial
+  // for fast evaluation
+  double *Tlookup = new double [ hmax * this->d ];
+
+  // loop through all dimensions
+  for (int dim = 0; dim < this->d; dim++)
+  {
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n )
+      continue;
+
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+      
+    SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin();
+    SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
+    
+    // index of the element, which is always bigger than the current value fval
+    int index = 0;
+    // we use the quantization of the original features! the transformed feature were
+    // already used to calculate A and B, this of course assumes monotonic functions!!!
+    int qBin = q.quantize ( i->first ); 
+
+    // the next loop is linear in max(hmax, n)
+    // REMARK: this could be changed to hmax*log(n), when
+    // we use binary search
+    
+    for (int j = 0; j < (int)hmax; j++)
+    {
+      double fval = prototypes[j];
+      double t;
+
+      if (  (index == 0) && (j < qBin) ) {
+        // current element is smaller than everything else
+        // resulting value = fval * sum_l=1^n 1
+        t = pow( fval, 2 ) * (n-nrZeroIndices-index);
+      } else {
+
+         // move to next example, if necessary   
+        while ( (j >= qBin) && ( index < (this->n-nrZeroIndices)) )
+        {
+          index++;
+          iPredecessor = i;
+          i++;
+
+          if ( i->first !=  iPredecessor->first )
+            qBin = q.quantize ( i->first );
+        }
+        // compute current element in the lookup table and keep in mind that
+        // index is the next element and not the previous one
+        //NOTE pay attention: this is only valid if all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+        if ( (j >= (uint)qBin) && ( index==(this->n-1-nrZeroIndices) ) ) {
+          // the current element (fval) is equal or bigger to the element indexed by index
+          // the second term vanishes, which is logical, since all elements are smaller than j!
+          t = A[dim][index];
+        } else {
+          // standard case
+          t =  A[dim][index-1] + pow( fval, 2 ) * (n-nrZeroIndices-(index) );
+//           A[dim][index-1] + fval * (n-nrZeroIndices-(index) );//fval*fval * (n-nrZeroIndices-(index-1) );
+          
+        }
+      }
+
+      Tlookup[ dim*hmax + j ] = t;
+    }
+  }
+
+  delete [] prototypes;
+
+  return Tlookup;  
+}
+
+double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantization & q, const ParameterizedFunction *pf ) const
+{
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+
+  // creating the lookup table as pure C, which might be beneficial
+  // for fast evaluation
+  double *Tlookup = new double [ hmax * this->d ];
+  
+  // loop through all dimensions
+  for (int dim = 0; dim < this->d; dim++)
+  {
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n )
+      continue;
+
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+         
+    SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin();
+    SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
+    
+    // index of the element, which is always bigger than the current value fval
+    int index = 0;
+    
+    // we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
+    int qBin = q.quantize ( i->first ); 
+    
+    double sum(0.0);
+    
+    for (uint j = 0; j < hmax; j++)
+    {
+      double fval = prototypes[j];
+      double t;
+
+      if (  (index == 0) && (j < (uint)qBin) ) {
+        // current element is smaller than everything else
+        // resulting value = fval * sum_l=1^n 1
+        t = pow( fval, 2 ) * (n-nrZeroIndices-index);
+      } else {
+
+         // move to next example, if necessary   
+        while ( (j >= (uint)qBin) && ( index < (this->n-nrZeroIndices)) )
+        {
+          sum += pow( i->second.second, 2 ); //i->dataElement.transformedFeatureValue
+          
+          index++;
+          iPredecessor = i;
+          i++;
+
+          if ( i->first !=  iPredecessor->first )
+            qBin = q.quantize ( i->first );
+        }
+        // compute current element in the lookup table and keep in mind that
+        // index is the next element and not the previous one
+        //NOTE pay attention: this is only valid if we all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+        if ( (j >= (uint)qBin) && ( index==(this->n-1-nrZeroIndices) ) ) {
+          // the current element (fval) is equal or bigger to the element indexed by index
+          // the second term vanishes, which is logical, since all elements are smaller than j!
+          t = sum;
+        } else {
+          // standard case
+          t = sum + pow( fval, 2 ) * (n-nrZeroIndices-(index) );
+        }
+      }
+
+      Tlookup[ dim*hmax + j ] = t;
+    }
+  }
+
+  delete [] prototypes;
+
+  return Tlookup;  
+}
+
+void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & A, const NICE::SparseVector & xstar, double & norm, const ParameterizedFunction *pf ) 
+{
+  norm = 0.0;
+  for (SparseVector::const_iterator i = xstar.begin(); i != xstar.end(); i++)
+  {
+  
+    int dim = i->first;
+    double fval = i->second;
+    
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n ) {
+      // all features are zero so let us ignore them completely
+      continue;
+    }
+
+    int position;
+
+    //where is the example x^z_i located in
+    //the sorted array? -> perform binary search, runtime O(log(n))
+    // search using the original value
+    X_sorted.findFirstLargerInDimension(dim, fval, position);
+    position--;
+  
+    //NOTE again - pay attention! This is only valid if all entries are NOT negative! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+    double firstPart(0.0);
+    //TODO in the "overnext" line there occurs the following error
+    // Invalid read of size 8    
+    if (position >= 0) 
+      firstPart = (A[dim][position-nrZeroIndices]);
+    else
+      firstPart = 0.0;
+    
+    double secondPart( 0.0);
+      
+    if ( pf != NULL )
+      fval = pf->f ( dim, fval );
+    
+    fval = fval * fval;
+    
+    if (position >= 0) 
+      secondPart = fval * (n-nrZeroIndices-(position+1));
+    else //if x_d^* is smaller than every non-zero training example
+      secondPart = fval * (n-nrZeroIndices);
+    
+    // but apply using the transformed one
+    norm += firstPart + secondPart;
+  }  
+}
+
+void FastMinKernel::hikComputeKVNApproximationFast(const double *Tlookup, const Quantization & q, const NICE::SparseVector & xstar, double & norm) const
+{
+  norm = 0.0;
+  // runtime is O(d) if the quantizer is O(1)
+  for (SparseVector::const_iterator i = xstar.begin(); i != xstar.end(); i++ )
+  {
+    int dim = i->first;
+    double v = i->second;
+    // we do not need a parameterized function here, since the quantizer works on the original feature values. 
+    // nonetheless, the lookup table was created using the parameterized function    
+    uint qBin = q.quantize(v);
+    
+    norm += Tlookup[dim*q.size() + qBin];
+  }  
+}
+
+void FastMinKernel::hikComputeKernelVector ( const NICE::SparseVector& xstar, NICE::Vector & kstar ) const
+{
+  //init
+  kstar.resize(this->n);
+  kstar.set(0.0);
+  
+  //let's start :)
+  for (SparseVector::const_iterator i = xstar.begin(); i != xstar.end(); i++)
+  {
+  
+    int dim = i->first;
+    double fval = i->second;
+    
+    int nrZeroIndices = X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n ) {
+      // all features are zero so let us ignore them completely
+      continue;
+    }
+    
+
+    int position;
+
+    //where is the example x^z_i located in
+    //the sorted array? -> perform binary search, runtime O(log(n))
+    // search using the original value
+    X_sorted.findFirstLargerInDimension(dim, fval, position);
+    position--;
+    
+    //get the non-zero elements for this dimension  
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = X_sorted.getFeatureValues(dim).nonzeroElements();
+    
+    //run over the non-zero elements and add the corresponding entries to our kernel vector
+
+    int count(nrZeroIndices);
+    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, count++ )
+    {
+      int origIndex(i->second.first); //orig index (i->second.second would be the transformed feature value)
+      if (count <= position)
+        kstar[origIndex] += i->first; //orig feature value
+      else
+        kstar[origIndex] += fval;
+    }
+  }  
+}
+
+// ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
+
+void FastMinKernel::restore ( std::istream & is, int format )
+{
+  if (is.good())
+  {
+    is.precision (numeric_limits<double>::digits10 + 1);  
+    
+    string tmp;
+    is >> tmp; //class name
+    
+    is >> tmp;
+    is >> n;
+    
+    is >> tmp;
+    is >> d;
+    
+    is >> tmp;
+    is >> noise;
+    
+    is >> tmp;
+    int approxSchemeInt;
+    is >> approxSchemeInt;
+    setApproximationScheme(approxSchemeInt);
+   
+    X_sorted.restore(is,format);
+   }
+  else
+  {
+    std::cerr << "FastMinKernel::restore -- InStream not initialized - restoring not possible!" << std::endl;
+  }  
+}
+void FastMinKernel::store ( std::ostream & os, int format ) const
+{
+  if (os.good())
+  {
+    os.precision (numeric_limits<double>::digits10 + 1);
+    os << "FastMinKernel" << std::endl;
+    os << "n: " << n << std::endl;
+    os << "d: " << d << std::endl;
+    os << "noise: " << noise << std::endl;
+    os << "approxScheme: " << approxScheme << std::endl;    
+    X_sorted.store(os,format);  
+  }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }    
+}
+
+void FastMinKernel::clear ()
+{
+  std::cerr << "FastMinKernel clear-function called" << std::endl;
+}
+
+void FastMinKernel::setVerbose( const bool & _verbose)
+{
+  verbose = _verbose;
+}
+
+bool FastMinKernel::getVerbose( )   const
+{
+  return verbose;
+}
+
+void FastMinKernel::setDebug( const bool & _debug)
+{
+  debug = _debug;
+  X_sorted.setDebug( _debug );
+}
+
+bool FastMinKernel::getDebug( )   const
+{
+  return debug;
+}
+
+// ----------------- INCREMENTAL LEARNING METHODS -----------------------
+void FastMinKernel::addExample(const NICE::SparseVector & _v, const ParameterizedFunction *pf )
+{
+  X_sorted.add_feature(_v, pf );
+  n++;
+}
+void FastMinKernel::addExample(const std::vector<double> & _v, const ParameterizedFunction *pf )
+{
+  X_sorted.add_feature(_v, pf );
+  n++;
+}
+
+void FastMinKernel::updatePreparationForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, NICE::VVector & A, NICE::VVector & B, const ParameterizedFunction *pf) const
+{ 
+  NICE::SparseVector::const_iterator it = _v.begin();
+  for (int dim = 0; dim < this->d; dim++)
+  {
+    if (it->first == dim)
+    {
+      //increase both datastructures by one
+      A[dim].append(0.0);
+      B[dim].append(0.0);
+      
+      //this is the index of the new example in this dimension, which was already added
+      int idx;
+      X_sorted.findLastInDimension(dim, it->second, idx);
+      //actually we do not want to have the next position, but the current one
+      idx--;
+      
+      // and we do not care about zero elements since we store matrices A and B only for non-zero elements in the training data
+      idx -= X_sorted.getNumberOfZeroElementsPerDimension(dim);
+      
+      // we start using the last old element, which is located at size-2
+      for( int i = A[dim].size()-2; i >= (idx-1); i--)
+      {
+        if (pf != NULL)
+          A[dim][i+1] = A[dim][i] + alpha * pf->f ( 1, it->second );
+        else
+        {
+          A[dim][i+1] = A[dim][i] + alpha * it->second;
+        }
+      }    
+          
+      // remember: in contrast to the explanations in our ECCV-paper, we store the alpha-values of the INCREASINGLY ordered features
+      // in the matrix B, not in decreasing order
+      for (int i = B[dim].size()-1; i >= std::max(1,idx); i--)
+      {
+        B[dim][i] = B[dim][i-1] + alpha;
+      }
+      
+      //special case
+      if (idx == 0)
+      {
+        if (pf != NULL)
+          A[dim][0] = alpha * pf->f ( 1, it->second );
+        else
+          A[dim][0] = alpha * it->second;
+        
+        B[dim][0] = alpha;
+      }      
+      
+      it++;
+    }
+    else //_v is zero for that dimension
+    {
+      //nothing to do, since we do not store any information about zero elements
+    }
+  }
+}
+
+void FastMinKernel::updateLookupTableForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, double * T, const Quantization & q, const ParameterizedFunction *pf) const
+{
+  //be aware, index n-1 is only valid, if we do not explicitely changed the indices while inserting elements
+  //actually, the code written below works equally to the following line, but is more efficient since we do not have to call the feature matrix several times
+//   this->hikUpdateLookupTable(T, alpha, 0.0, n-1, q, pf );
+  if (T == NULL)
+  {
+    fthrow(Exception, "FastMinKernel::updateLookupTableForAlphaMultiplications LUT not initialized, run FastMinKernel::hikPrepareLookupTable first!");
+    return;
+  }
+  
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+  
+  // loop through all dimensions
+  for (NICE::SparseVector::const_iterator it = _v.begin(); it != _v.end(); it++)
+  {
+
+    int dim(it->first);
+
+    double x_i = it->second;
+    //as usually, we quantize the original features, but use the quantized transformed features lateron
+    int q_bin = q.quantize(x_i);      
+
+    //TODO we could speed up this with first do a binary search for the position where the min changes, and then do two separate for-loops
+    for (uint j = 0; j < hmax; j++)
+    {
+      double fval;
+      
+      if (q_bin > j)
+        fval = prototypes[j]; //the prototypes are already transformed
+      else
+      {
+        if (pf != NULL)
+          fval = pf->f( 1, x_i );
+        else
+          fval = x_i;
+      }
+      
+      // pay attention: we use either the quantized prototypes or the REAL feature values, not the quantized ones!
+      T[ dim*hmax + j ] += alpha*fval;
+    }
+  }
+
+  delete [] prototypes;
+}
+
+void FastMinKernel::updatePreparationForKVNApproximation(const NICE::SparseVector & _v, NICE::VVector & A, const ParameterizedFunction *pf) const
+{
+  for (NICE::SparseVector::const_iterator it = _v.begin(); it != _v.end(); it++)
+  {
+    int dim(it->first);  
+    int idx;
+    
+    // we use the original feature value for this search, not the transformed one (see FeatureMatrixT)
+    // we assume that the nex example was already inserted to the FeatureMatrix
+    X_sorted.findLastInDimension(dim, it->second, idx);  
+    //we do not want to considere zero elements, since we store it in a sparse way
+    idx -= X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    // not the next one, but the current (position vs index)
+    idx--;
+    
+    // perform a resize operations, since we have a new element
+    A[dim].resize(A[dim].size()+1);
+    
+    // update :)
+    for( int i = A[dim].size()-1; i >= idx; i--)
+    {
+      if (pf != NULL)
+        A[dim][i] = A[dim][i-1] + pow(pf->f ( 1, it->second ), 2);
+      else
+        A[dim][i] = A[dim][i-1] + pow(it->second, 2);
+    }   
+  }
+}
+
+void FastMinKernel::updateLookupTableForKVNApproximation(const NICE::SparseVector & _v, double * T, const Quantization & q, const ParameterizedFunction *pf) const
+{
+  if (T == NULL)
+  {
+    fthrow(Exception, "FastMinKernel::updateLookupTableForKernelVectorNorm LUT not initialized, run FastMinKernel::hikPrepareLookupTableForKernelVectorNorm first!");
+    return;
+  }
+  
+  // number of quantization bins
+  uint hmax = q.size();
+
+  // store (transformed) prototypes
+  double *prototypes = new double [ hmax ];
+  for ( uint i = 0 ; i < hmax ; i++ )
+    if ( pf != NULL ) {
+      // FIXME: the transformed prototypes could change from dimension to another dimension
+      // We skip this flexibility ...but it should be changed in the future
+      prototypes[i] = pf->f ( 1, q.getPrototype(i) );
+    } else {
+      prototypes[i] = q.getPrototype(i);
+    }
+   
+  // loop through all dimensions
+  for (NICE::SparseVector::const_iterator it = _v.begin(); it != _v.end(); it++)
+  {
+    int dim(it->first);
+
+    double x_i = it->second;
+    //as usually, we quantize the original features, but use the quantized transformed features lateron
+    int q_bin = q.quantize(x_i);      
+
+    //TODO we could speed up this with first do a binary search for the position where the min changes, and then do two separate for-loops
+    for (uint j = 0; j < hmax; j++)
+    {
+      double fval;
+      
+      if (q_bin > j)
+        fval = prototypes[j]; //the prototypes are already transformed
+      else
+      {
+        if (pf != NULL)
+          fval = pf->f( 1, x_i );
+        else
+          fval = x_i;
+      }
+      
+      // pay attention: we use either the quantized prototypes or the REAL feature values, not the quantized ones!
+      T[ dim*hmax + j ] += pow( fval, 2 );
+    }
+  }
+  
+  delete [] prototypes;  
+}

+ 437 - 0
FastMinKernel.h

@@ -0,0 +1,437 @@
+/** 
+* @file FastMinKernel.h
+* @brief Efficient GPs with HIK for classification by regression (Interface)
+* @author Alexander Freytag
+* @date 06-12-2011 (dd-mm-yyyy)
+*/
+#ifndef FASTMINKERNELINCLUDE
+#define FASTMINKERNELINCLUDE
+
+#include <iostream>
+
+#include <core/vector/MatrixT.h>
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VVector.h>
+#include <core/basics/Exception.h>
+#include "core/basics/Persistent.h"
+
+#include "FeatureMatrixT.h"
+#include "Quantization.h"
+#include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
+
+namespace NICE {
+
+
+/** 
+ * @class FastMinKernel
+ * @brief Efficient GPs with HIK for classification by regression
+ * @author Alexander Freytag
+ */  
+  
+  /** interface to FastMinKernel implementation*/
+  class FastMinKernel : NICE::Persistent
+  {
+
+    protected:
+      /** number of examples */
+      int n;
+
+      /** dimension of feature vectors */
+      int d; 
+
+      /** noise added to the diagonal of the kernel matrix */
+      double noise;
+      
+      /** sorted matrix of features (sorted along each dimension) */
+      NICE::FeatureMatrixT<double> X_sorted;
+      
+      //! verbose flag for output after calling the restore-function
+      bool verbose;
+      //! debug flag for output during debugging
+      bool debug;      
+
+      /** 
+      * @brief Set number of examples
+      * @author Alexander Freytag
+      * @date 07-12-2011 (dd-mm-yyyy)
+      */
+      void set_n(const int & _n){n = _n;};
+      
+      /** 
+      * @brief Set number of dimensions
+      * @author Alexander Freytag
+      * @date 07-12-2011 (dd-mm-yyyy)
+      */
+      void set_d(const int & _d){d = _d;};     
+
+      /** 
+      * @brief Prepare the efficient HIK-computations part 1: order the features in each dimension and save the permutation. Pay attention: X is of dim n x d, where as X_sorted is of dimensionality d x n!
+      * @author Alexander Freytag
+      * @date 07-12-2011 (dd-mm-yyyy)
+      */
+      void hik_prepare_kernel_multiplications(const std::vector<std::vector<double> > & X, NICE::FeatureMatrixT<double> & X_sorted, const int & _dim = -1);
+      
+      void hik_prepare_kernel_multiplications ( const std::vector< NICE::SparseVector * > & X, NICE::FeatureMatrixT<double> & X_sorted, const bool & dimensionsOverExamples, const int & _dim = -1);
+      
+      void randomPermutation(NICE::Vector & permutation, const std::vector<int> & oldIndices, const int & newSize) const;
+      
+      enum ApproximationScheme{ MEDIAN = 0, EXPECTATION=1};
+      ApproximationScheme approxScheme;
+
+    public:
+
+      //------------------------------------------------------
+      // several constructors and destructors
+      //------------------------------------------------------
+      
+      /** 
+      * @brief dummy constructor
+      * @author Alexander Freytag
+      * @date 20-04-2012 (dd-mm-yyyy)
+      */
+      FastMinKernel();      
+      
+      /** 
+      * @brief initialize with some data
+      * @author Alexander Freytag
+      * @date 06-12-2011 (dd-mm-yyyy)
+      */
+      FastMinKernel( const std::vector<std::vector<double> > & X, const double noise , const bool _debug = false, const int & _dim = -1);
+
+      
+      /**
+      * @brief Just another sparse data structure
+      *
+      * @param X vector of sparse vector pointers
+      * @param noise GP noise
+      */
+      FastMinKernel( const std::vector< SparseVector * > & X, const double noise, const bool _debug = false, const bool & dimensionsOverExamples=false, const int & _dim = -1);
+
+#ifdef NICE_USELIB_MATIO
+      /**
+      * @brief intialize with some data given in a matlab-sparse struct and restricted with an example index
+      *
+      * @param X matlab-struct containing the feature vectors
+      * @param noise additional noise variance of the labels
+      * @param examples set of indices to include
+      */
+      FastMinKernel ( const sparse_t & X, const double noise, const std::map<int, int> & examples, const bool _debug = false , const int & _dim = -1);
+#endif
+
+      /** 
+      * @brief Default destructor
+      * @author Alexander Freytag
+      * @date 06-12-2011 (dd-mm-yyyy)
+      */
+      ~FastMinKernel();
+
+      //------------------------------------------------------
+      // several get and set methods including access operators
+      //------------------------------------------------------
+      
+      
+      void setApproximationScheme(const ApproximationScheme & _approxScheme = MEDIAN) {approxScheme = _approxScheme;};
+      
+      virtual void setApproximationScheme(const int & _approxScheme = 0);
+      
+      /** 
+      * @brief Get number of examples
+      * @author Alexander Freytag
+      * @date 07-12-2011 (dd-mm-yyyy)
+      */
+      int get_n() const {return n;};
+      
+      /** 
+      * @brief Get number of dimensions
+      * @author Alexander Freytag
+      * @date 07-12-2011 (dd-mm-yyyy)
+      */
+      int get_d() const {return d;};
+
+      /** 
+      * @brief Computes the ratio of sparsity across the matrix
+      * @author Alexander Freytag
+      * @date 11-01-2012 (dd-mm-yyyy)
+      */
+      double getSparsityRatio(){return X_sorted.computeSparsityRatio();};
+      
+      /** set verbose flag used for restore-functionality*/
+      void setVerbose( const bool & _verbose);
+      bool getVerbose( ) const;  
+      
+      /** set debug flag used for debug output*/
+      void setDebug( const bool & _debug);
+      bool getDebug( ) const;        
+      
+      //------------------------------------------------------
+      // high level methods
+      //------------------------------------------------------
+      
+      /**
+      * @brief apply a parameterized function to the feature matrix
+      * @author Alexander Freytag
+      * @date 04-05-2012 (dd-mm-yyyy)
+      *
+      * @param pf the parameterized function (optional), if not given, nothing will be done
+      */         
+      void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf = NULL );
+          
+      /** 
+      * @brief  Prepare the efficient HIK-computations part 2: calculate the partial sum for each dimension. Explicitely exploiting sparsity!!! Pay attention: X_sorted is of dimensionality d x n!
+      * @author Alexander Freytag
+      * @date 17-01-2012 (dd-mm-yyyy)
+      */
+      void hik_prepare_alpha_multiplications(const NICE::Vector & alpha, NICE::VVector & A, NICE::VVector & B) const;
+            
+      /**
+      * @brief Computing K*alpha with the minimum kernel trick, explicitely exploiting sparsity!!!
+      * @author Alexander Freytag
+      * @date 17-01-2012 (dd-mm-yyyy)
+      */
+      void hik_kernel_multiply(const NICE::VVector & A, const NICE::VVector & B, const NICE::Vector & alpha, NICE::Vector & beta) const;
+      void hik_kernel_multiply_fast(const double *Tlookup, const Quantization & q, const NICE::Vector & alpha, NICE::Vector & beta) const;
+
+      /**
+      * @brief Computing k_{*}*alpha using the minimum kernel trick and exploiting sparsity of the feature vector given
+      *
+      * @author Alexander Freytag
+      * @date 20-01-2012 (dd-mm-yyyy)
+      * @param A pre-computation matrix (VVector) (use the prepare method) 
+      * @param B pre-computation matrix (VVector)
+      * @param xstar new feature vector (SparseVector)
+      * @param beta result of the scalar product
+      * @param pf optional feature transformation
+      */
+      void hik_kernel_sum(const NICE::VVector & A, const NICE::VVector & B, const NICE::SparseVector & xstar, double & beta, const ParameterizedFunction *pf = NULL ) const;
+      
+      /**
+      * @brief compute beta = k_*^T * alpha by using a large lookup table created by hik_prepare_alpha_multiplications_fast
+      * @author Erik Rodner
+      *
+      * @param Tlookup large lookup table calculated by hik_prepare_alpha_multiplications_fast
+      * @param q Quantization object
+      * @param xstar feature vector (indirect k_*)
+      * @param beta result of the calculation
+      */
+      void hik_kernel_sum_fast(const double* Tlookup, const Quantization & q, const NICE::Vector & xstar, double & beta) const;
+      void hik_kernel_sum_fast(const double *Tlookup, const Quantization & q, const NICE::SparseVector & xstar, double & beta) const;
+
+      /**
+      * @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
+      * @author Erik Rodner
+      *
+      * @param alpha coefficient vector
+      * @param A pre-calculation array computed by hik_prepare_alpha_multiplications
+      * @param B pre-calculation array computed by hik_prepare_alpha_multiplications
+      * @param q Quantization
+      *
+      * @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
+      * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
+      */
+      double *hik_prepare_alpha_multiplications_fast(const NICE::VVector & A, const NICE::VVector & B, const Quantization & q, const ParameterizedFunction *pf = NULL ) const;
+      
+      /**
+      * @brief compute lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
+      * @author Alexander Freytag
+      *
+      * @param alpha coefficient vector
+      * @param q Quantization
+      * @param pf ParameterizedFunction to change the original feature values
+      *
+      * @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
+      * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
+      */
+      double* hikPrepareLookupTable(const NICE::Vector & alpha, const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+
+      /**
+      * @brief update the lookup table for HIK calculation using quantized signals and prepare for K*alpha or k_*^T * alpha computations
+      * @author Alexander Freytag
+      *
+      * @param T previously computed LUT, that will be changed
+      * @param alphaNew new value of alpha at index idx
+      * @param alphaOld old value of alpha at index idx
+      * @param idx index in which alpha changed
+      * @param q Quantization
+      * @param pf ParameterizedFunction to change the original feature values
+      */
+      void hikUpdateLookupTable(double * T, const double & alphaNew, const double & alphaOld, const int & idx, const Quantization & q, const ParameterizedFunction *pf ) const;
+
+      /**
+      * @brief return a reference to the sorted feature matrix
+      */
+      FeatureMatrix & featureMatrix(void) { return X_sorted; };
+      const FeatureMatrix & featureMatrix(void) const { return X_sorted; };
+      
+      /**
+       * @brief solve the linear system K*alpha = y with the minimum kernel trick based on the algorithm of Wu (Wu10_AFD)
+       * @note method converges slowly for large scale problems and even for normal scale :(
+       * @author Paul Bodesheim
+       * 
+       * @param y right hand side of linear system
+       * @param alpha final solution of the linear system
+       * @param q Quantization
+       * @param pf ParameterizedFunction to change the original feature values
+       * @param useRandomSubsets true, if the order of examples in each iteration should be randomly sampled
+       * @param maxIterations maximum number of iterations
+       * @param sizeOfRandomSubset nr of Elements that should be randomly considered in each iteration (max: y.size())
+       * @param minDelta minimum difference between two solutions alpha_t and alpha_{t+1} (convergence criterion)
+       * 
+       * @return C standard vector representing a q.size()*n double matrix and the lookup table T. Elements can be accessed with
+       * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
+       **/
+      double *solveLin(const NICE::Vector & y, NICE::Vector & alpha, const Quantization & q, const ParameterizedFunction *pf = NULL, const bool & useRandomSubsets = true, uint maxIterations = 10000, const int & _sizeOfRandomSubset = (-1), double minDelta = 1e-7, bool timeAnalysis = false) const;
+
+
+      //! set the noise parameter
+      void setNoise ( double noise ) { this->noise = noise; }
+
+      //! get the current noise parameter
+      double getNoise (void) const { return noise; }
+      
+      double getFrobNormApprox();
+      
+      
+      /** 
+      * @brief  Prepare the efficient HIK-computations for the squared kernel vector |k_*|^2 : calculate the partial squared sums for each dimension.
+      * @author Alexander Freytag
+      * @date 10-04-2012 (dd-mm-yyyy)
+      */
+      void hikPrepareKVNApproximation(NICE::VVector & A) const;
+      
+      /** 
+      * @brief  Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples. You have to run hikPrepareSquaredKernelVector before
+      * @author Alexander Freytag
+      * @date 10-04-2012 (dd-mm-yyyy)
+      * 
+      * @param A pre-calculation array computed by hikPrepareSquaredKernelVector
+      * @param q Quantization
+      * @param pf Parameterized Function to efficiently apply a function to the underlying data
+      *
+      * @return C standard vector representing a q.size()*d double matrix and the lookup table T. Elements can be accessed with
+      * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
+      */
+      double * hikPrepareKVNApproximationFast(NICE::VVector & A, const Quantization & q, const ParameterizedFunction *pf = NULL ) const;
+      
+      /**
+      * @brief Compute lookup table for HIK calculation of |k_*|^2 assuming quantized test samples ( equals hikPrepareSquaredKernelVector + hikPrepareSquaredKernelVectorFast, but is faster). Approximation does not considere mixed terms between dimensions.
+      * @author Alexander Freytag
+      * @date 10-04-2012 (dd-mm-yyyy)
+      *
+      * @param q Quantization
+      * @param pf ParameterizedFunction to change the original feature values
+      *
+      * @return C standard vector representing a q.size()*d double matrix and the lookup table T. Elements can be accessed with
+      * T[dim*q.size() + j], where j is a bin entry corresponding to quantization q.
+      */
+      double* hikPrepareLookupTableForKVNApproximation(const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+      
+      /**
+      * @brief Approximate norm = |k_*|^2 using the minimum kernel trick and exploiting sparsity of the given feature vector. Approximation does not considere mixed terms between dimensions.
+      * @author Alexander Freytag
+      * @date 10-04-2012 (dd-mm-yyyy)
+      * 
+      * @param A pre-computation matrix (VVector) (use the prepare method) 
+      * @param xstar new feature vector (SparseVector)
+      * @param norm result of the squared norm approximation
+      * @param pf optional feature transformation
+      */
+      void hikComputeKVNApproximation(const NICE::VVector & A, const NICE::SparseVector & xstar, double & norm, const ParameterizedFunction *pf = NULL ) ;
+      
+      /**
+      * @brief Approximate norm = |k_*|^2 using a large lookup table created by hikPrepareSquaredKernelVector and hikPrepareSquaredKernelVectorFast or directly using hikPrepareLookupTableForSquaredKernelVector. Approximation does not considere mixed terms between dimensions.
+      * @author Alexander Freytag
+      * @date 10-04-2012 (dd-mm-yyyy)
+      *
+      * @param Tlookup large lookup table
+      * @param q Quantization object
+      * @param xstar feature vector (indirect k_*)
+      * @param norm result of the calculation
+      */
+      void hikComputeKVNApproximationFast(const double *Tlookup, const Quantization & q, const NICE::SparseVector & xstar, double & norm ) const;
+
+      /**
+      * @brief Compute the kernel vector k_* between training examples and test example. Runtime. O(n \times D). Exploiting sparsity
+      * @author Alexander Freytag
+      * @date 13-04-2012 (dd-mm-yyyy)
+      *
+      * @param xstar feature vector
+      * @param kstar kernel vector
+      */      
+      void hikComputeKernelVector( const NICE::SparseVector & xstar, NICE::Vector & kstar) const;
+      
+      /** Persistent interface */
+      virtual void restore ( std::istream & is, int format = 0 );
+      virtual void store ( std::ostream & os, int format = 0 ) const; 
+      virtual void clear ();
+      
+      // ----------------- INCREMENTAL LEARNING METHODS -----------------------
+      
+      /**
+      * @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
+      * @author Alexander Freytag
+      * @date 25-04-2012 (dd-mm-yyyy)
+      *
+      * @param _v new feature vector
+      */       
+      void addExample(const NICE::SparseVector & _v, const ParameterizedFunction *pf = NULL);
+      /**
+      * @brief Add a new example to the feature-storage. You have to update the corresponding variables explicitely after that.
+      * @author Alexander Freytag
+      * @date 25-04-2012 (dd-mm-yyyy)
+      *
+      * @param _v new feature vector
+      */       
+      void addExample(const std::vector<double> & _v, const ParameterizedFunction *pf = NULL);
+      
+      /**
+      * @brief Updates A and B matrices for fast kernel multiplications and kernel sums. You need to compute the new alpha value and run addExample first!
+      * @author Alexander Freytag
+      * @date 25-04-2012 (dd-mm-yyyy)
+      *
+      * @param _v new feature vector
+      * @param alpha new alpha value for the corresponding feature
+      * @param A precomputed matrix A which will be updated accordingly
+      * @param B precomputed matrix B which will be updated accordingly
+      * @param pf optional feature transformation
+      */       
+      void updatePreparationForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, NICE::VVector & A, NICE::VVector & B, const ParameterizedFunction *pf = NULL) const;
+      /**
+      * @brief Updates LUT T for very fast kernel multiplications and kernel sums. You need to compute the new alpha value and run addExample first!
+      * @author Alexander Freytag
+      * @date 26-04-2012 (dd-mm-yyyy)
+      *
+      * @param _v new feature vector
+      * @param alpha new alpha value for the corresponding feature
+      * @param T precomputed lookup table, which will be updated
+      * @param q quantization object to quantize possible test samples
+      * @param pf optional feature transformation
+      */       
+      void updateLookupTableForAlphaMultiplications(const NICE::SparseVector & _v, const double & alpha, double * T, const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+      
+      /**
+      * @brief Updates matrix A for approximations of the kernel vector norm. You need to run addExample first!
+      * @author Alexander Freytag
+      * @date 26-04-2012 (dd-mm-yyyy)
+      *
+      * @param _v new feature vector
+      * @param A precomputed matrix A which will be updated accordingly
+      * @param pf optional feature transformation
+      */       
+      void updatePreparationForKVNApproximation(const NICE::SparseVector & _v, NICE::VVector & A, const ParameterizedFunction *pf = NULL) const;
+      /**
+      * @brief Updates LUT T for fast approximations of the kernel vector norm. You need to run addExample first!
+      * @author Alexander Freytag
+      * @date 26-04-2012 (dd-mm-yyyy)
+      *
+      * @param _v new feature vector
+      * @param T precomputed lookup table, which will be updated
+      * @param q quantization object to quantize possible test samples
+      * @param pf optional feature transformation
+      */       
+      void updateLookupTableForKVNApproximation(const NICE::SparseVector & _v, double * T, const Quantization & q, const ParameterizedFunction *pf = NULL) const;
+
+  };
+
+} // namespace
+
+#endif

+ 417 - 0
FeatureMatrixT.h

@@ -0,0 +1,417 @@
+/** 
+* @file FeatureMatrixT.h
+* @brief A feature matrix, storing (sparse) features sorted per dimension (Interface)
+* @author Alexander Freytag
+* @date 07-12-2011 (dd-mm-yyyy)
+*/
+#ifndef FEATUREMATRIXINCLUDE
+#define FEATUREMATRIXINCLUDE
+
+#include <vector>
+#include <set>
+#include <map>
+#include <iostream>
+#include <limits>
+
+#include <core/basics/Exception.h>
+#include "core/basics/Persistent.h"
+
+#include <core/vector/MatrixT.h>
+#include <core/vector/SparseVectorT.h>
+
+#ifdef NICE_USELIB_MATIO
+  #include <core/matlabAccess/MatFileIO.h> 
+#endif
+  
+
+#include "SortedVectorSparse.h"
+#include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
+
+
+namespace NICE {
+
+  /** 
+ * @class FeatureMatrixT
+ * @brief A feature matrix, storing (sparse) features sorted per dimension
+ * @author Alexander Freytag
+ */  
+  
+template<class T> class FeatureMatrixT : NICE::Persistent
+{
+
+  protected:
+    int n;
+    int d;
+    std::vector<NICE::SortedVectorSparse<T> > features;
+    
+    //! verbose flag for output after calling the restore-function
+    bool verbose;
+    //! debug flag for output during debugging
+    bool debug;
+
+
+  public:
+    
+  //! STL-like typedef for type of elements
+  typedef T value_type;
+
+  //! STL-like typedef for const element reference
+  typedef const T& const_reference;
+
+  //! STL-like typedef for iterator
+  typedef T* iterator;
+
+  //! STL-like typedef for const iterator
+  typedef const T* const_iterator;
+
+  //! STL-like typedef for element reference
+  typedef T& reference;
+  
+    //------------------------------------------------------
+    // several constructors and destructors
+    //------------------------------------------------------
+  
+    /** 
+    * @brief Default constructor
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    FeatureMatrixT();
+    
+    /** 
+    * @brief Recommended constructor
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    FeatureMatrixT(const std::vector<std::vector<T> > & _features, const int & _dim = -1);
+    
+#ifdef NICE_USELIB_MATIO
+    /** 
+    * @brief Constructor reading data from matlab-files
+    * @author Alexander Freytag
+    * @date 10-01-2012 (dd-mm-yyyy)
+    */
+    FeatureMatrixT(const sparse_t & _features, const int & _dim = -1);//, const int & nrFeatures);
+#endif
+
+    /** just another constructor for sparse features */
+    FeatureMatrixT(const std::vector< SparseVector * > & X, const bool dimensionsOverExamples = false, const int & _dim = -1);
+    
+#ifdef NICE_USELIB_MATIO
+    /**
+    * @brief Constructor reading data from matlab-files and providing the possibility to
+    * restrict the number of examples to a certain subset
+    *
+    * @param _features sparse data matrix (sett MatFileIO)
+    * @param examples set of example indices
+    */
+    FeatureMatrixT(const sparse_t & _features, const std::map<int, int> & examples , const int & _dim = -1);
+#endif
+
+    /** 
+    * @brief Default destructor
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    ~FeatureMatrixT();
+    
+    //------------------------------------------------------
+    // several get and set methods including access operators
+    //------------------------------------------------------
+    
+    /** 
+    * @brief Get number of examples
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+      int get_n() const;
+    /** 
+    * @brief Get number of dimensions
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+      int get_d() const;
+      
+    /** 
+    * @brief Sets the given dimension and re-sizes internal data structure. WARNING: this will completely remove your current data!
+    * @author Alexander Freytag
+    * @date 06-12-2011 (dd-mm-yyyy)
+    */
+      void set_d(const int & _d);
+      
+    /** set verbose flag used for restore-functionality*/
+    void setVerbose( const bool & _verbose);
+    bool getVerbose( ) const;     
+    
+    /** set debug flag used for debug output*/
+    void setDebug( const bool & _debug);
+    bool getDebug( ) const;        
+      
+      
+    /** 
+    * @brief  Compare F with this
+    * @author Alexander Freytag
+    * @date 05-01-2012 (dd-mm-yyyy)
+    * @pre Dimensions of \c F and \c this must be equal
+    * @param F data to compare with
+    * @return true if \c F and \c this are equal
+    */
+    inline bool operator==(const FeatureMatrixT<T> & F) const;
+    
+    /**
+    * @brief Compare \c F with \c this.
+    * @author Alexander Freytag
+    * @date 05-01-2012 (dd-mm-yyyy)
+    * @pre Size of \c F and \c this must be equal
+    * @param F data to compare with
+    * @return true if \c F and \c this are not equal
+    */
+    inline bool operator!= (const FeatureMatrixT<T> & F) const;
+
+    /**
+    * @brief Copy data from \c F to \c this.
+    * @author Alexander Freytag
+    * @date 05-01-2012 (dd-mm-yyyy)
+    * @param v New data
+    * @return \c *this
+    */
+    inline FeatureMatrixT<T>& operator=(const FeatureMatrixT<T> & F);
+      
+    /** 
+    * @brief Matrix-like operator for element access, performs validity check
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    inline T operator()(const int row, const int col) const;
+    
+    /** 
+    * @brief Element access without validity check
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    inline T getUnsafe(const int row, const int col) const;
+
+    /** 
+    * @brief Element access of original values without validity check
+    * @author Erik Rodner
+    */
+    inline T getOriginal(const int row, const int col) const;
+
+    /** 
+    * @brief Sets a specified element to the given value, performs validity check
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    inline void set (const int row, const int col, const T & newElement, bool setTransformedValue = false);
+    
+    /** 
+    * @brief Sets a specified element to the given value, without validity check
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    inline void setUnsafe (const int row, const int col, const T & newElement, bool setTransformedValue = false);
+    
+    /** 
+    * @brief Access to all element entries of a specified dimension, including validity check
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void getDimension(const int & dim, NICE::SortedVectorSparse<T> & dimension) const;
+    
+    /** 
+    * @brief Access to all element entries of a specified dimension, without validity check
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void getDimensionUnsafe(const int & dim, NICE::SortedVectorSparse<T> & dimension) const;
+    
+    /** 
+    * @brief Finds the first element in a given dimension, which equals elem (orig feature value, not the transformed one)
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void findFirstInDimension(const int & dim, const T & elem, int & position) const;
+    
+    /** 
+    * @brief Finds the last element in a given dimension, which equals elem (orig feature value, not the transformed one)
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)1
+    */
+    void findLastInDimension(const int & dim, const T & elem, int & position) const;
+    
+    /** 
+    * @brief Finds the first element in a given dimension, which is larger as elem (orig feature value, not the transformed one)
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void findFirstLargerInDimension(const int & dim, const T & elem, int & position) const;
+    
+    /** 
+    * @brief Finds the last element in a given dimension, which is smaller as elem (orig feature value, not the transformed one)
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void findLastSmallerInDimension(const int & dim, const T & elem, int & position) const;
+    
+    //------------------------------------------------------
+    // high level methods
+    //------------------------------------------------------
+    
+    /**
+    * @brief apply a parameterized function to the feature matrix
+    * @author Alexander Freytag
+    * @date 04-05-2012 (dd-mm-yyyy)
+    *
+    * @param pf the parameterized function (optional), if not given, nothing will be done
+    */    
+    void applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf = NULL );
+    
+    /** 
+    * @brief Computes the ratio of sparsity across the matrix
+    * @author Alexander Freytag
+    * @date 11-01-2012 (dd-mm-yyyy)
+    */
+    double computeSparsityRatio();
+
+    /** 
+    * @brief add a new feature and insert its elements in the already ordered structure
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    void add_feature(const std::vector<T> & feature, const NICE::ParameterizedFunction *pf = NULL);
+    /** 
+    * @brief add a new feature and insert its elements in the already ordered structure, will be casted to type T
+    * @author Alexander Freytag
+    * @date 25-04-2012 (dd-mm-yyyy)
+    */    
+    void add_feature(const NICE::SparseVector & feature, const NICE::ParameterizedFunction *pf = NULL);
+
+    /** 
+    * @brief add several new features and insert their elements in the already ordered structure
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    void add_features(const std::vector<std::vector<T> > & _features );
+    
+    /** 
+    * @brief set the stored features to new values - which means deleting the old things and inserting the new ones. Return resulting permutation according to each dimension
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    void set_features(const std::vector<std::vector<T> > & _features, std::vector<std::vector<int> > & permutations, const int & _dim = -1);
+    void set_features(const std::vector<std::vector<T> > & _features, std::vector<std::map<int,int> > & permutations, const int & _dim = -1);
+    void set_features(const std::vector<std::vector<T> > & _features, const int & _dim = -1);
+    void set_features(const std::vector< NICE::SparseVector * > & _features, const bool dimensionsOverExamples = false, const int & _dim = -1);
+    
+    /**
+    * @brief get a permutation vector for each dimension
+    *
+    * @param resulting permutation matrix
+    */
+    void getPermutations( std::vector<std::vector<int> > & permutations) const;
+    void getPermutations( std::vector<std::map<int,int> > & permutations) const;
+      
+    /** 
+    * @brief Prints the whole Matrix (outer loop over dimension, inner loop over features)
+    * @author Alexander Freytag
+    * @date 07-12-2011 (dd-mm-yyyy)
+    */
+    void print(std::ostream & os) const;
+    
+    /** 
+    * @brief Computes the whole non-sparse matrix. WARNING: this may result in a really memory-consuming data-structure!
+    * @author Alexander Freytag
+    * @date 12-01-2012 (dd-mm-yyyy)
+    */
+    void computeNonSparseMatrix(NICE::MatrixT<T> & matrix, bool transpose = false) const;
+    
+    /** 
+    * @brief Computes the whole non-sparse matrix. WARNING: this may result in a really memory-consuming data-structure!
+    * @author Alexander Freytag
+    * @date 12-01-2012 (dd-mm-yyyy)
+    */
+    void computeNonSparseMatrix(std::vector<std::vector<T> > & matrix, bool transpose = false) const;
+    
+    /** 
+    * @brief Swaps to specified elements, performing a validity check
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void swap(const int & row1, const int & col1, const int & row2, const int & col2);
+    
+    /** 
+    * @brief Swaps to specified elements, without performing a validity check
+    * @author Alexander Freytag
+    * @date 08-12-2011 (dd-mm-yyyy)
+    */
+    void swapUnsafe(const int & row1, const int & col1, const int & row2, const int & col2);
+
+    /**
+    * @brief direct access to elements
+    *
+    * @param dim feature index
+    *
+    * @return sorted feature values
+    */
+    const SortedVectorSparse<T> & getFeatureValues ( int dim ) const { return features[dim]; };
+ 
+    /**
+    * @brief direct read/write access to elements
+    *
+    * @param dim feature index
+    *
+    * @return sorted feature values
+    */
+    SortedVectorSparse<T> & getFeatureValues ( int dim ) { return features[dim]; };
+   
+    
+    /**
+    * @brief compute the diagonal elements of the HIK kernel matrix induced by the features
+    *
+    * @param diagonalElements resulting vector
+    */
+    void hikDiagonalElements( Vector & diagonalElements ) const;
+
+    /**
+    * @brief Compute the trace of the HIK kernel matrix induced by the features
+    *
+    * @return value of the trace
+    */
+    double hikTrace() const;
+    
+    /**
+    * @brief Return the number of nonzero elements in a specified dimension, that are currently stored in the feature matrix
+    *
+    * @return number of nonzero elements on the specified dimension
+    */ 
+    int getNumberOfNonZeroElementsPerDimension(const int & dim) const;
+   
+    /**
+    * @brief Return the number of zero elements in a specified dimension, that are currently stored in the feature matrix
+    *
+    * @return number of nonzero elements on the specified dimension
+    */ 
+    int getNumberOfZeroElementsPerDimension(const int & dim) const;
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 );
+    virtual void store ( std::ostream & os, int format = 0 ) const;
+    virtual void clear ( );
+
+};
+
+  //! default definition for a FeatureMatrix
+  typedef FeatureMatrixT<double> FeatureMatrix;
+  typedef FeatureMatrixT<bool> BoolFeatureMatrix;
+  typedef FeatureMatrixT<char> CharFeatureMatrix;
+  typedef FeatureMatrixT<int> IntFeatureMatrix;
+  typedef FeatureMatrixT<float> FloatFeatureMatrix;
+
+
+} // namespace
+
+#ifdef __GNUC__
+#include "gp-hik-core/FeatureMatrixT.tcc"
+#endif
+
+#endif

+ 949 - 0
FeatureMatrixT.tcc

@@ -0,0 +1,949 @@
+/** 
+* @file FeatureMatrixT.tcc
+* @brief A feature matrix, storing (sparse) features sorted per dimension (Implementation)
+* @author Alexander Freytag
+* @date 07-12-2011 (dd-mm-yyyy)
+*/
+// #ifndef FEATUREMATRIX_TCC
+// #define FEATUREMATRIX_TCC
+
+
+#include "FeatureMatrixT.h"
+
+namespace NICE {
+
+    
+
+    //------------------------------------------------------
+    // several constructors and destructors
+    //------------------------------------------------------
+
+    // Default constructor
+    template <typename T>
+    FeatureMatrixT<T>::FeatureMatrixT()
+    {
+      n = 0;
+      d = 0;
+      features.clear();
+      verbose = false;
+      debug = false;
+    }
+    
+
+    // Recommended constructor
+    template <typename T>
+    FeatureMatrixT<T>::FeatureMatrixT(const std::vector<std::vector<T> > & _features, const int & _dim)
+    {
+      n = 0;
+      if (_dim < 0)
+        d = (*_features.begin()).size();
+      else
+        d = _dim;
+      
+      for (typename std::vector<std::vector<T> >::const_iterator it = _features.begin(); it != _features.end(); it++)
+      {
+        add_feature(*it);
+      }
+      verbose = false;
+      debug = false;
+    }
+
+    //Constructor reading data from a vector of sparse vector pointers
+    template <typename T>
+    FeatureMatrixT<T>::
+    FeatureMatrixT(const std::vector< SparseVector * > & X, const bool dimensionsOverExamples, const int & _dim)
+    {
+      features.clear();
+      
+      // resize our data structure
+      if (_dim >= 0) //did the user specified the number of dimensions?
+        set_d(_dim);
+      else //dimensions not specified by users
+      {
+        if (dimensionsOverExamples) //do we have dim x examples ?
+        {
+          set_d(X.size());
+        }
+        else //we have examples x dimes (as usually done)
+        {
+          if (X.size() > 0) //and have at least one example
+            set_d(X[0]->getDim());  
+          else //no example, so set the dim to 0, since we have no idea at all
+          {
+            set_d(0);
+          }          
+        }
+      }
+           
+      // set number of examples n
+      if (d>0)
+      {
+        if (dimensionsOverExamples) //do we have dim x examples ?
+          n = X[0]->getDim(); //NOTE Pay attention: we assume, that this number is set!
+        else //we have examples x dimes (as usually done)   
+          n = X.size(); 
+      }  
+
+     
+      // insert all values
+      if (dimensionsOverExamples) //do we have dim x examples ?
+      {
+        for (int dim = 0; dim < d; dim++)
+        {
+          features[dim].insert( X[dim] );
+        }
+      }
+      else //we have examples x dimes (as usually done)
+      {
+        //loop over every example to add its content
+        for (int nr = 0; nr < n; nr++)
+        {
+          //loop over every dimension to add the specific value to the corresponding SortedVectorSparse
+          for (NICE::SparseVector::const_iterator elemIt = X[nr]->begin(); elemIt != X[nr]->end(); elemIt++)
+          {
+            //elemIt->first: dim, elemIt->second: value
+            features[elemIt->first].insert( (T) elemIt->second, nr);
+          }//for non-zero-values of the feature
+        }//for every new feature
+      }//if dimOverEx
+
+      //set n for the internal data structure SortedVectorSparse
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::iterator it = features.begin(); it != features.end(); it++)
+        (*it).setN(n);
+    }
+
+#ifdef NICE_USELIB_MATIO
+    //Constructor reading data from matlab-files
+    template <typename T>
+    FeatureMatrixT<T>::
+    FeatureMatrixT(const sparse_t & _features, const int & _dim)
+    {
+      if (_dim < 0)
+        set_d(_features.njc -1);
+      else
+        set_d(_dim);
+      
+      int nMax(0);
+
+      for ( int i = 0; i < _features.njc-1; i++ ) //walk over dimensions
+      {
+        for ( int j = _features.jc[i]; j < _features.jc[i+1] && j < _features.ndata; j++ ) //walk over single features, which are sparsely represented
+        {
+          features[i].insert(((T*)_features.data)[j], _features.ir[ j]);
+          if ((_features.ir[ j])>nMax) nMax = _features.ir[ j];
+        }
+      }
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::iterator it = features.begin(); it != features.end(); it++)
+      {
+        (*it).setN(nMax+1);
+      }
+      n = nMax+1;
+      verbose = false;
+    }
+
+    //Constructor reading data from matlab-files
+    template <typename T>
+    FeatureMatrixT<T>::
+    FeatureMatrixT(const sparse_t & _features, const std::map<int, int> & examples, const int & _dim)
+    {
+      if (_dim < 0)
+        set_d(_features.njc -1);
+      else
+        set_d(_dim);
+      
+      int nMax(0);
+
+      for ( int i = 0; i < _features.njc-1; i++ ) //walk over dimensions
+      {
+        for ( int j = _features.jc[i]; j < _features.jc[i+1] && j < _features.ndata; j++ ) //walk over single features, which are sparsely represented
+        {
+          int example_index = _features.ir[ j];
+          std::map<int, int>::const_iterator it = examples.find(example_index);
+          if ( it != examples.end() ) {
+            features[i].insert(((T*)_features.data)[j], it->second /* new index */);
+            if (it->second > nMax) nMax = it->second;
+          }
+        }
+      }
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::iterator it = features.begin(); it != features.end(); it++)
+        (*it).setN(nMax+1);
+    
+      n = nMax+1;
+      verbose = false;
+    }
+#endif
+
+    // Default destructor
+    template <typename T>
+    FeatureMatrixT<T>::~FeatureMatrixT()
+    {
+    }
+    
+    //------------------------------------------------------
+    // several get and set methods including access operators
+    //------------------------------------------------------
+    
+    // Get number of examples
+    template <typename T>
+    int FeatureMatrixT<T>::get_n() const
+    {
+      return n;
+    }
+      
+    //  Get number of dimensions
+    template <typename T>
+    int FeatureMatrixT<T>::get_d() const
+    {
+      return d;
+    }
+      
+    //  Sets the given dimension and re-sizes internal data structure. WARNING: this will completely remove your current data!
+    template <typename T>
+    void FeatureMatrixT<T>::set_d(const int & _d)
+    {
+      d = _d; features.resize(d);
+    }
+    
+    template <typename T>
+    void FeatureMatrixT<T>::setVerbose( const bool & _verbose)
+    {
+      verbose = _verbose;
+    }
+    
+    template <typename T>
+    bool FeatureMatrixT<T>::getVerbose( )   const
+    {
+      return verbose;
+    } 
+    
+    template <typename T>
+    void FeatureMatrixT<T>::setDebug( const bool & _debug)
+    {
+      debug = _debug;
+    }
+    
+    template <typename T>
+    bool FeatureMatrixT<T>::getDebug( )   const
+    {
+      return debug;
+    }     
+      
+    //  Matrix-like operator for element access, performs validity check
+    template <typename T>
+    inline T FeatureMatrixT<T>::operator()(const int row, const int col) const
+    {
+      if ( (row < 0) || (col < 0) || (row > d) || (col > n) )
+      {
+        fthrow(Exception, "FeatureMatrixT: out of bounds");
+      }
+      else
+        return (features[row]).access(col);
+    }
+    
+    template<class T>
+    inline bool
+    FeatureMatrixT<T>::operator==(const FeatureMatrixT<T> & F) const
+    {
+      if ( ( (*this).get_n() != F.get_n()) || ((*this).get_d() != F.get_d()) )
+      {
+        fthrow(Exception, "FeatureMatrixT<T>::operator== : (n != F.get_n()) || (d != F.get_d()) -- number of dimensions does not fit");
+      }
+      else if ((n == 0) || (d == 0))
+      {
+        return true;
+      }
+      
+      for (int i = 0; i < d; i++)
+      {
+        for (int j = 0; j < n; j++)
+        {
+          // FIXME: it would be more efficient if we compare SortedVectorSparse objects here
+          if(!((*this)(i,j) == F(i,j)))
+            return false;
+        }
+      }
+      return true;
+    }
+
+    template<class T>
+    inline bool
+    FeatureMatrixT<T>::operator!=(const FeatureMatrixT<T> & F) const
+    {
+      if ( ( (*this).get_n() != F.get_n()) || ((*this).get_d() != F.get_d()) )
+      {
+        fthrow(Exception, "FeatureMatrixT::operator!=(): (n != F.get_n()) || (d != F.get_d()) -- number of dimensions does not fit");
+      }
+      else if ((n == 0) || (d == 0))
+      {
+        return false;
+      }
+      
+      for (int i = 0; i < d; i++)
+      {
+        for (int j = 0; j < n; j++)
+        {
+          if(!((*this)(i,j) == F(i,j)))
+            return true;
+        }
+      }
+      return false;
+    }
+    
+    template<typename T>
+    inline FeatureMatrixT<T>&
+    FeatureMatrixT<T>::operator=(const FeatureMatrixT<T> & F)
+    {
+      (*this).set_d(F.get_d());
+      
+      (*this).n = F.get_n();
+      
+      for (int i = 0; i < (*this).get_d(); i++)
+      {
+        // use the operator= of SortedVectorSparse
+        features[i] = F[i];
+      }
+      
+      return *this;
+    }
+    
+    //  Element access without validity check
+    template <typename T>
+    inline T FeatureMatrixT<T>::getUnsafe(const int row, const int col) const
+    {
+      return (features[row]).access(col);
+    }
+
+    //! Element access of original values without validity check
+    template <typename T>
+    inline T FeatureMatrixT<T>::getOriginal(const int row, const int col) const
+    {
+      return (features[row]).accessOriginal(col);
+    }
+
+    //  Sets a specified element to the given value, performs validity check
+    template <typename T>
+    inline void FeatureMatrixT<T>::set (const int row, const int col, const T & newElement, bool setTransformedValue)
+    {
+      if ( (row < 0) || (col < 0) || (row > d) || (col > n) )
+      {
+        return;
+      }
+      else
+        (features[row]).set ( col, newElement, setTransformedValue );
+    }
+    
+    //  Sets a specified element to the given value, without validity check
+    template <typename T>
+    inline void FeatureMatrixT<T>::setUnsafe (const int row, const int col, const T & newElement, bool setTransformedValue)
+    {
+      (features[row]).set ( col, newElement, setTransformedValue );
+    }
+    
+    //  Acceess to all element entries of a specified dimension, including validity check
+    template <typename T>
+    void FeatureMatrixT<T>::getDimension(const int & dim, NICE::SortedVectorSparse<T> & dimension) const
+    {
+      if ( (dim < 0) || (dim > d) )
+      {
+        return;
+      }
+      else
+        dimension = features[dim];
+    }
+    
+    //  Acceess to all element entries of a specified dimension, without validity check
+    template <typename T>
+    void FeatureMatrixT<T>::getDimensionUnsafe(const int & dim, NICE::SortedVectorSparse<T> & dimension) const
+    {
+      dimension = features[dim];
+    }
+    
+    // Finds the first element in a given dimension, which equals elem
+    template <typename T>
+    void FeatureMatrixT<T>::findFirstInDimension(const int & dim, const T & elem, int & position) const
+    {
+      position = -1;
+      if ( (dim < 0) || (dim > d))
+        return;
+
+      std::pair< typename SortedVectorSparse<T>::elementpointer, typename SortedVectorSparse<T>::elementpointer > eit;
+      eit =  features[dim].nonzeroElements().equal_range ( elem );
+      position = distance( features[dim].nonzeroElements().begin(), eit.first );
+      if ( elem > features[dim].getTolerance() )
+        position += features[dim].getZeros();
+
+    }
+    
+    //  Finds the last element in a given dimension, which equals elem
+    template <typename T>
+    void FeatureMatrixT<T>::findLastInDimension(const int & dim, const T & elem, int & position) const
+    {
+      position = -1;
+      if ( (dim < 0) || (dim > d))
+        return;
+
+      std::pair< typename SortedVectorSparse<T>::const_elementpointer, typename SortedVectorSparse<T>::const_elementpointer > eit =  features[dim].nonzeroElements().equal_range ( elem );
+      position = distance( features[dim].nonzeroElements().begin(), eit.second );
+      if ( elem > features[dim].getTolerance() )
+        position += features[dim].getZeros();
+    }
+    
+    //  Finds the first element in a given dimension, which is larger as elem
+    template <typename T>
+    void FeatureMatrixT<T>::findFirstLargerInDimension(const int & dim, const T & elem, int & position) const
+    {
+      position = -1;
+      if ( (dim < 0) || (dim > d))
+        return;
+      
+      //no non-zero elements?
+      if (features[dim].getNonZeros() <= 0)
+      {
+        // if element is greater than zero, than is should be added at the last position
+        if (elem > features[dim].getTolerance() )
+          position = this->n;
+        
+        //if not, position is -1
+        return;
+      }
+      
+      if (features[dim].getNonZeros() == 1)
+      {
+        // if element is greater than the only nonzero element, than it is larger as everything else
+        if (features[dim].nonzeroElements().begin()->first <= elem)
+          position = this->n;
+        
+        //if not, but the element is still greater than zero, than 
+        else if (elem > features[dim].getTolerance() )
+          position = this->n -1;
+          
+        return;
+      }
+      
+      typename SortedVectorSparse<T>::const_elementpointer it =  features[dim].nonzeroElements().end(); //this is needed !!!
+      it = features[dim].nonzeroElements().upper_bound ( elem ); //if all values are smaller, this does not do anything at all
+      
+      position = distance( features[dim].nonzeroElements().begin(), it );
+
+      if ( elem > features[dim].getTolerance() )
+      {
+        //position += features[dim].getZeros();
+        position += n - features[dim].getNonZeros();
+      }
+    }
+    
+    //  Finds the last element in a given dimension, which is smaller as elem
+    template <typename T>
+    void FeatureMatrixT<T>::findLastSmallerInDimension(const int & dim, const T & elem, int & position) const
+    {
+      position = -1;
+      if ( (dim < 0) || (dim > d))
+        return;
+
+      typename SortedVectorSparse<T>::const_elementpointer it =  features[dim].nonzeroElements().lower_bound ( elem );
+      position = distance( features[dim].nonzeroElements().begin(), it );
+      if ( it->first > features[dim].getTolerance() )
+        position += features[dim].getZeros();
+    }
+    
+    //------------------------------------------------------
+    // high level methods
+    //------------------------------------------------------
+
+    template <typename T>
+    void FeatureMatrixT<T>::applyFunctionToFeatureMatrix ( const NICE::ParameterizedFunction *pf )
+    {
+      if (pf != NULL)
+      {
+        // REMARK: might be inefficient due to virtual calls
+        if ( !pf->isOrderPreserving() )
+          fthrow(Exception, "ParameterizedFunction::applyFunctionToFeatureMatrix: this function is optimized for order preserving transformations");
+        
+        int d = this->get_d();
+        for (int dim = 0; dim < d; dim++)
+        {
+          std::multimap< double, typename SortedVectorSparse<double>::dataelement> & nonzeroElements = this->getFeatureValues(dim).nonzeroElements();
+          for ( SortedVectorSparse<double>::elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++ )
+          {
+            SortedVectorSparse<double>::dataelement & de = i->second;
+            
+            //TODO check, wether the element is "sparse" afterwards
+            de.second = pf->f( dim, i->first );
+          }
+        }
+
+        /*for ( int i = 0 ; i < featureMatrix.get_n(); i++ )
+          for ( int index = 0 ; index < featureMatrix.get_d(); index++ )
+            featureMatrix.set(index, i, f( (uint)index, featureMatrix.getOriginal(index,i) ), isOrderPreserving() );*/
+      }
+      else
+      {
+        //no pf given -> nothing to do
+      }
+    }    
+    
+    
+    //Computes the ratio of sparsity across the matrix
+    template <typename T>
+    double FeatureMatrixT<T>:: computeSparsityRatio()
+    {
+      double ratio(0.0);
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::const_iterator it = features.begin(); it != features.end(); it++)
+      {
+        ratio += (*it).getZeros() / (double) (*it).getN();
+      }
+      if (features.size() != 0)
+        ratio /= features.size();
+      return ratio;
+    }
+
+    //  add a new feature and insert its elements at the end of each dimension vector
+    template <typename T>
+    void FeatureMatrixT<T>::add_feature( const std::vector<T> & feature, const NICE::ParameterizedFunction *pf )
+    {
+      if (n == 0)
+      {
+        set_d(feature.size());
+      }
+      
+      if ( (int)feature.size() != d)
+      {
+        fthrow(Exception, "FeatureMatrixT<T>::add_feature - number of dimensions does not fit");
+        return;
+      }
+
+      for (int dimension = 0; dimension < (int) features.size(); dimension++)
+      {
+        if (pf != NULL)
+          features[dimension].insert( feature[dimension], pf->f( dimension, feature[dimension]) );
+        else  
+          features[dimension].insert( feature[dimension] );        
+      }
+      n++;
+    }
+    //  add a new feature and insert its elements at the end of each dimension vector
+    template <typename T>
+    void FeatureMatrixT<T>::add_feature(const NICE::SparseVector & feature, const ParameterizedFunction *pf )
+    {
+      if (n == 0)
+      {
+        set_d(feature.size());
+      }
+      
+      if ( (int)feature.getDim() > d)
+      {
+        fthrow(Exception, "FeatureMatrixT<T>::add_feature - number of dimensions does not fit");
+        return;
+      }
+
+      for (NICE::SparseVector::const_iterator it = feature.begin(); it != feature.end(); it++)
+      {
+        if (pf != NULL)
+          features[it->first].insert( (T) it->second, pf->f( it->first, (T) it->second), n );
+        else  
+          features[it->first].insert( (T) it->second, n );
+      }
+      n++;
+    }    
+      
+    //  add several new features and insert their elements in the already ordered structure
+    template <typename T>
+    void FeatureMatrixT<T>::add_features(const std::vector<std::vector<T> > & _features )
+    {
+      //TODO do we need the parameterized function here as well? usually, we add several features and run applyFunctionToFeatureMatrix afterwards.
+      // check this please :)
+      
+      //TODO assure that every feature has the same dimension
+      if (n == 0)
+      {
+        set_d(_features.size());
+      }
+      
+      //pay attention: we assume now, that we have a vector (over dimensions) containing vectors over features (examples per dimension) - to be more efficient
+      for (int dim = 0; dim < d; dim++)
+      {
+          features[dim].insert( _features[dim] );
+      }
+      
+      //update the number of our features
+      n += (int) _features[0].size();
+    }
+    
+    template <typename T>
+    void FeatureMatrixT<T>::set_features(const std::vector<std::vector<T> > & _features, std::vector<std::vector<int> > & permutations, const int & _dim )
+    {
+      features.clear();
+      if (_dim < 0)
+        set_d(_features.size());
+      else
+        set_d(_dim);
+      
+      if (d>0)
+        n = _features[0].size();
+      
+      //pay attention: we assume now, that we have a vector (over dimensions) containing vectors over features (examples per dimension) - to be more efficient
+      for (int dim = 0; dim < d; dim++)
+      {
+        features[dim].insert( _features[dim] );
+      }
+    
+      getPermutations( permutations );
+    }
+
+    template <typename T>
+    void FeatureMatrixT<T>::set_features(const std::vector<std::vector<T> > & _features, std::vector<std::map<int,int> > & permutations, const int & _dim)
+    {
+      features.clear();
+      if (_dim < 0)
+        set_d(_features.size());
+      else
+        set_d(_dim);
+      if (d>0)
+        n = _features[0].size();
+           
+      //pay attention: we assume now, that we have a vector (over dimensions) containing vectors over features (examples per dimension) - to be more efficient
+      for (int dim = 0; dim < d; dim++)
+      {
+        features[dim].insert( _features[dim] );
+      }
+    
+      getPermutations( permutations );
+    }
+    
+    template <typename T>
+    void FeatureMatrixT<T>::set_features(const std::vector<std::vector<T> > & _features, const int & _dim)
+    {
+      features.clear();
+      if (_dim < 0)
+        set_d(_features.size());
+      else
+        set_d(_dim);
+      
+      if (d>0)
+        n = _features[0].size();
+      
+      //pay attention: we assume now, that we have a vector (over dimensions) containing vectors over features (examples per dimension) - to be more efficient
+      for (int dim = 0; dim < d; dim++)
+      {
+        features[dim].insert( _features[dim] );
+      }
+    }
+    
+    template <typename T>
+    void FeatureMatrixT<T>::set_features(const std::vector< NICE::SparseVector * > & _features, const bool dimensionsOverExamples, const int & _dim)
+    {   
+      features.clear();
+      if (_features.size() == 0)
+      {
+        std::cerr << "set_features without features" << std::endl;
+      }
+      
+      // resize our data structure      
+      if (_dim >= 0) //did the user specified the number of dimensions?
+        set_d(_dim);
+      else //dimensions not specified by users
+      {
+        if (dimensionsOverExamples) //do we have dim x examples ?
+        {
+          set_d(_features.size());
+        }
+        else //we have examples x dimes (as usually done)
+        {
+          if (_features.size() > 0) //and have at least one example
+          {
+            try{
+              set_d(_features[0]->getDim());  
+            }
+            catch(...)
+            {
+              std::cerr << "FeatureMatrixT<T>::set_features -- something went wrong using getDim() of SparseVectors" << std::endl;
+            }
+          }
+          else //no example, so set the dim to 0, since we have no idea at all
+          {
+            set_d(0);
+          }          
+        }
+      }    
+      
+      // set number of examples n
+      if (d>0)
+      {
+        if (dimensionsOverExamples) //do we have dim x examples ?
+          n = _features[0]->getDim(); //NOTE Pay attention: we assume, that this number is set!
+        else //we have examples x dimes (as usually done)   
+          n = _features.size(); 
+      }       
+      
+      // insert all values
+      if (dimensionsOverExamples) //do we have dim x examples ?
+      {
+        for (int dim = 0; dim < d; dim++)
+        {
+          features[dim].insert( _features[dim] );
+        }
+      }
+      else //we have examples x dimes (as usually done)
+      {
+        if ( debug )
+          std::cerr << "FeatureMatrixT<T>::set_features " << n << " new examples" << std::endl;
+        //loop over every example to add its content
+        for (int nr = 0; nr < n; nr++)
+        {
+          if ( debug )
+            std::cerr << "add feature nr. " << nr << " / " << _features.size() << " ";
+          //loop over every dimension to add the specific value to the corresponding SortedVectorSparse
+          for (NICE::SparseVector::const_iterator elemIt = _features[nr]->begin(); elemIt != _features[nr]->end(); elemIt++)
+          {
+            if ( debug )
+              std::cerr << elemIt->first << "-" << elemIt->second << " ";
+            //elemIt->first: dim, elemIt->second: value
+            features[elemIt->first].insert( (T) elemIt->second, nr);
+          }//for non-zero-values of the feature
+          if ( debug )
+            std::cerr << std::endl;
+        }//for every new feature
+        if ( debug )
+          std::cerr << "FeatureMatrixT<T>::set_features done" << std::endl;
+      }//if dimOverEx
+      
+      //set n for the internal data structure SortedVectorSparse
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::iterator it = features.begin(); it != features.end(); it++)
+        (*it).setN(n);
+    }
+
+    template <typename T>
+    void FeatureMatrixT<T>::getPermutations( std::vector<std::vector<int> > & permutations) const
+    {
+      for (int dim = 0; dim < d; dim++)
+      {
+        std::vector<int> perm (  (features[dim]).getPermutation() );
+        permutations.push_back(perm);
+      }
+    }
+    
+    template <typename T>
+    void FeatureMatrixT<T>::getPermutations( std::vector<std::map<int,int> > & permutations) const
+    {
+      for (int dim = 0; dim < d; dim++)
+      {
+        std::map<int,int> perm (  (features[dim]).getPermutationNonZeroReal() );
+        permutations.push_back(perm);
+      }
+    }
+
+      
+    //  Prints the whole Matrix (outer loop over dimension, inner loop over features)
+    template <typename T>
+    void FeatureMatrixT<T>::print(std::ostream & os) const
+    {
+      if (os.good())
+      {
+        for (int dim = 0; dim < d; dim++)
+        {
+          features[dim].print(os);
+        }
+      }
+    }
+    
+    // Computes the whole non-sparse matrix. WARNING: this may result in a really memory-consuming data-structure!
+    template <typename T>
+    void FeatureMatrixT<T>::computeNonSparseMatrix(NICE::MatrixT<T> & matrix, bool transpose) const
+    {
+      if ( transpose )
+        matrix.resize(this->get_n(),this->get_d());
+      else
+        matrix.resize(this->get_d(),this->get_n());
+
+      matrix.set((T)0.0);
+      int dimIdx(0);
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::const_iterator it = features.begin(); it != features.end(); it++, dimIdx++)
+      {
+        std::map< int, typename NICE::SortedVectorSparse<T>::elementpointer>  nonzeroIndices= (*it).nonzeroIndices();
+        for (typename std::map< int, typename NICE::SortedVectorSparse<T>::elementpointer>::const_iterator inIt = nonzeroIndices.begin(); inIt != nonzeroIndices.end(); inIt++)
+        {
+          int featIndex = ((*inIt).second)->second.first;
+          if ( transpose ) 
+            matrix(featIndex,dimIdx) =((*inIt).second)->second.second; 
+          else
+            matrix(dimIdx,featIndex) =((*inIt).second)->second.second; 
+        }
+      }
+    }
+
+    // Computes the whole non-sparse matrix. WARNING: this may result in a really memory-consuming data-structure!
+    template <typename T>
+    void FeatureMatrixT<T>::computeNonSparseMatrix(std::vector<std::vector<T> > & matrix, bool transpose) const
+    {
+      if ( transpose )
+        matrix.resize(this->get_n());
+      else
+        matrix.resize(this->get_d());
+      
+      // resizing the matrix
+      for ( uint i = 0 ; i < matrix.size(); i++ )
+        if ( transpose )
+          matrix[i] = std::vector<T>(this->get_d(), 0.0);
+        else
+          matrix[i] = std::vector<T>(this->get_n(), 0.0);
+
+      int dimIdx(0);
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::const_iterator it = features.begin(); it != features.end(); it++, dimIdx++)
+      {
+        std::map< int, typename NICE::SortedVectorSparse<T>::elementpointer>  nonzeroIndices= (*it).nonzeroIndices();
+        for (typename std::map< int, typename NICE::SortedVectorSparse<T>::elementpointer>::const_iterator inIt = nonzeroIndices.begin(); inIt != nonzeroIndices.end(); inIt++)
+        {
+          int featIndex = ((*inIt).second)->second.first;
+          if ( transpose )
+            matrix[featIndex][dimIdx] =((*inIt).second)->second.second; 
+          else
+            matrix[dimIdx][featIndex] =((*inIt).second)->second.second; 
+        }
+      }
+    }
+    
+    // Swaps to specified elements, performing a validity check
+    template <typename T>
+    void FeatureMatrixT<T>::swap(const int & row1, const int & col1, const int & row2, const int & col2)
+    {
+      if ( (row1 < 0) || (col1 < 0) || (row1 > d) || (col1 > n) || (row2 < 0) || (col2 < 0) || (row2 > d) || (col2 > n))
+      {
+        return;
+      }
+      else
+      {
+        //swap
+        T tmp = (*this)(row1, col1);
+        (*this).set(row1, col1, (*this)(row2,col2));
+        (*this).set(row2, col2, tmp);
+      }
+    }
+    
+    //  Swaps to specified elements, without performing a validity check
+    template <typename T>
+    void FeatureMatrixT<T>::swapUnsafe(const int & row1, const int & col1, const int & row2, const int & col2)
+    {
+      //swap
+      T tmp = (*this)(row1, col1);
+      (*this).set(row1, col1, (*this)(row2,col2));
+      (*this).set(row2, col2, tmp);
+    }
+
+    template <typename T>
+    void FeatureMatrixT<T>::hikDiagonalElements( Vector & diagonalElements ) const
+    {
+      int dimIdx = 0;
+      // the function calculates the diagonal elements of a HIK kernel matrix
+      diagonalElements.resize(n);
+      diagonalElements.set(0.0);
+      // loop through all dimensions
+      for (typename std::vector<NICE::SortedVectorSparse<T> >::const_iterator it = features.begin(); it != features.end(); it++, dimIdx++)
+      {
+        std::map< int, typename NICE::SortedVectorSparse<T>::elementpointer>  nonzeroIndices= (*it).nonzeroIndices();
+        // loop through all features
+        for (typename std::map< int, typename NICE::SortedVectorSparse<T>::elementpointer>::const_iterator inIt = nonzeroIndices.begin(); inIt != nonzeroIndices.end(); inIt++)
+        {
+          int index = inIt->first;
+          typename NICE::SortedVectorSparse<T>::elementpointer p = inIt->second;
+          typename NICE::SortedVectorSparse<T>::dataelement de = p->second;
+
+          diagonalElements[index] += de.second;
+        }
+      }
+    }
+
+    template <typename T>
+    double FeatureMatrixT<T>::hikTrace() const
+    {
+      Vector diagonalElements;
+      hikDiagonalElements( diagonalElements );
+      return diagonalElements.Sum();
+
+    }
+    
+    template <typename T>
+    int FeatureMatrixT<T>::getNumberOfNonZeroElementsPerDimension(const int & dim) const
+    {
+      if ( (dim < 0) || (dim > d))
+        return -1;
+      return features[dim].getNonZeros();
+    }
+
+    template <typename T>
+    int FeatureMatrixT<T>::getNumberOfZeroElementsPerDimension(const int & dim) const
+    {
+      if ( (dim < 0) || (dim > d))
+        return -1;
+      return n - features[dim].getNonZeros();
+    }
+    
+
+    template <typename T>
+    void FeatureMatrixT<T>::restore ( std::istream & is, int format )
+    {
+      if (is.good())
+      {
+        is.precision (std::numeric_limits<double>::digits10 + 1);
+        std::string tmp;
+        
+        is >> tmp; //classname
+        
+        is >> tmp;
+        is >> n;
+
+        
+        is >> tmp;
+        is >> d;
+        
+        features.resize(d);
+        //now read features for every dimension
+        for (int dim = 0; dim < d; dim++)
+        {
+          NICE::SortedVectorSparse<T> svs;
+          features[dim] = svs;          
+          features[dim].restore(is,format);
+        }
+        
+        if (verbose)
+        {
+          std::cerr << "FeatureMatrixT<T>::restore" << std::endl;
+          std::cerr << "n: " << n << std::endl;          
+          std::cerr << "d: " << d << std::endl;
+          this->print(std::cerr);
+        }
+      }
+      else
+      {
+        std::cerr << "FeatureMatrixT<T>::restore -- InStream not initialized - restoring not possible!" << std::endl;
+      }
+    }
+
+    template <typename T>
+    void FeatureMatrixT<T>::store ( std::ostream & os, int format ) const
+    {
+      if (os.good())
+      {
+        os.precision (std::numeric_limits<double>::digits10 + 1);
+        os << "FeatureMatrixT" << std::endl;
+        os << "n: " << n << std::endl;
+        os << "d: " << d << std::endl;
+        
+        //now write features for every dimension
+        for (int dim = 0; dim < d; dim++)
+        {
+          features[dim].store(os,format);
+        }
+      }
+      else
+      {
+        std::cerr << "FeatureMatrixT<T>::store -- OutStream not initialized - storing not possible!" << std::endl;
+      }
+    }    
+    
+    template <typename T>
+    void FeatureMatrixT<T>::clear ()
+    {}
+
+} // namespace
+
+// #endif

+ 209 - 0
GMHIKernel.cpp

@@ -0,0 +1,209 @@
+/** 
+* @file GMHIKernel.cpp
+* @brief Fast multiplication with histogram intersection kernel matrices (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 01/02/2012
+
+*/
+#include <iostream>
+
+#include <core/vector/VVector.h>
+#include <core/basics/Timer.h>
+
+#include "GMHIKernel.h"
+
+using namespace NICE;
+using namespace std;
+
+
+GMHIKernel::GMHIKernel( FastMinKernel *_fmk, ParameterizedFunction *_pf, const Quantization *_q )
+{
+  this->fmk = _fmk;
+  this->q = _q;
+  this->pf = _pf;
+  verbose = false;
+  useOldPreparation = false;
+
+}
+
+GMHIKernel::~GMHIKernel()
+{
+}
+
+/** multiply with a vector: A*x = y */
+void GMHIKernel::multiply (NICE::Vector & y, const NICE::Vector & x) const
+{
+  //do we want to use any quantization at all?
+  if (q != NULL)
+  {
+    double *T;
+    if (useOldPreparation)
+    {
+      NICE::VVector A; 
+      NICE::VVector B; 
+      // prepare to calculate sum_i x_i K(x,x_i)
+      fmk->hik_prepare_alpha_multiplications(x, A, B);
+      T = fmk->hik_prepare_alpha_multiplications_fast(A, B, *q, pf);
+    }
+    else
+    {
+      T = fmk->hikPrepareLookupTable(x, *q, pf );
+    }
+    fmk->hik_kernel_multiply_fast ( T, *q, x, y ); 
+    delete [] T;
+  }
+  else //no quantization
+  {
+    NICE::VVector A; 
+    NICE::VVector B; 
+    // prepare to calculate sum_i x_i K(x,x_i)
+    fmk->hik_prepare_alpha_multiplications(x, A, B);
+    
+    if (verbose)
+    {
+      int sizeOfDouble (sizeof(double));
+      int sizeOfA(0);
+      int sizeOfB(0);
+      for (uint i = 0; i < A.size(); i++)
+      {
+        sizeOfA += A[i].size();
+      }
+      for (uint i = 0; i < B.size(); i++)
+      {
+        sizeOfB += B[i].size();
+      }
+      sizeOfA*=sizeOfDouble;
+      sizeOfB*=sizeOfDouble;
+      
+      std::cerr << "multiplySparse: sizeof(A) + sizeof(B): " << sizeOfA + sizeOfB << std::endl;
+    }
+    // y = K * x
+    //we only need x as input argument to add x*noise to beta
+    //all necessary information for the "real" multiplication is already stored in y
+    fmk->hik_kernel_multiply(A, B, x, y);
+  }
+}
+
+/** get the number of rows in A */
+uint GMHIKernel::rows () const
+{
+  // return the number of examples
+  return fmk->get_n();
+}
+
+/** get the number of columns in A */
+uint GMHIKernel::cols () const
+{
+  // return the number of examples
+  return fmk->get_n();
+}
+
+/** set verbose-flag needed for output of size of A and B */
+void GMHIKernel::setVerbose ( const bool& _verbose )
+{
+  verbose = _verbose;
+}
+
+void GMHIKernel::setUseOldPreparation( const bool & _useOldPreparation)
+{
+  useOldPreparation = _useOldPreparation;
+}
+
+uint GMHIKernel::getNumParameters() const 
+{
+  if ( pf == NULL )
+    return 0;
+  else
+    return pf->parameters().size();
+}
+
+void GMHIKernel::getParameters(Vector & parameters) const
+{
+  if ( pf == NULL )
+    parameters.clear();
+  else {
+    parameters.resize( pf->parameters().size() );
+    parameters = pf->parameters();
+  }
+}
+
+void GMHIKernel::setParameters(const Vector & parameters)
+{
+  if ( pf == NULL && parameters.size() > 0 )
+    fthrow(Exception, "Unable to set parameters of a non-parameterized GMHIKernel object");
+
+  pf->parameters() = parameters;
+  
+  fmk->applyFunctionToFeatureMatrix( pf );
+
+  // only for debugging with small matrices: fmk->featureMatrix().print();
+}
+
+void GMHIKernel::getDiagonalElements ( Vector & diagonalElements ) const
+{
+  fmk->featureMatrix().hikDiagonalElements(diagonalElements);
+  // add sigma^2 I
+  diagonalElements += fmk->getNoise();
+}
+
+void GMHIKernel::getFirstDiagonalElement ( double & diagonalElement ) const
+{
+  Vector diagonalElements;
+  fmk->featureMatrix().hikDiagonalElements(diagonalElements);
+  diagonalElement = diagonalElements[0];
+  // add sigma^2 I
+  diagonalElement += fmk->getNoise();
+}
+    
+bool GMHIKernel::outOfBounds(const Vector & parameters) const
+{
+  if ( pf == NULL && parameters.size() > 0 )
+    fthrow(Exception, "Unable to check the bounds of a parameter without any parameterization");
+  
+  Vector uB = pf->getParameterUpperBounds();
+  Vector lB = pf->getParameterLowerBounds();
+  if ( uB.size() != parameters.size() || lB.size() != parameters.size() )
+    fthrow(Exception, "Dimension of lower/upper bound vector " << lB.size() << " and " << uB.size() << " does not match the size of the parameter vector " << parameters.size() << ".");
+  for ( uint i = 0 ; i < parameters.size() ; i++ )
+    if ( (parameters[i] < lB[i]) || (parameters[i] > uB[i]) )
+    {
+      if (verbose)
+        std::cerr << "Parameter " << i << " is out of bounds: " << lB[i] << " <= " << parameters[i] << " <= " << uB[i] << std::endl;
+      return true;
+    }
+
+  return false;
+}
+
+Vector GMHIKernel::getParameterLowerBounds() const
+{
+  if ( pf == NULL )
+    fthrow(Exception, "Unable to get the bounds without any parameterization");
+  return pf->getParameterLowerBounds();
+}
+
+Vector GMHIKernel::getParameterUpperBounds() const
+{
+  if ( pf == NULL )
+    fthrow(Exception, "Unable to get the bounds without any parameterization");
+  return pf->getParameterUpperBounds();
+}
+
+double GMHIKernel::approxFrobNorm() const
+{
+  return this->fmk->getFrobNormApprox();
+}
+
+void GMHIKernel::setApproximationScheme(const int & _approxScheme)
+{
+  this->fmk->setApproximationScheme(_approxScheme);
+}
+
+// ----------------- INCREMENTAL LEARNING METHODS -----------------------
+void GMHIKernel::addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels)
+{
+  // we could add the example to the fmk, but we won't do it here
+  // reason: if we have a balanced learning, we have multiple identical GMHI-objects
+  // if we would add the example here, it would be added as often as we have those objects
+  // therefor we add the example already in the FMKGPHypOpt class
+}

+ 88 - 0
GMHIKernel.h

@@ -0,0 +1,88 @@
+/** 
+* @file GMHIKernel.h
+* @author Erik Rodner, Alexander Freytag
+* @brief Fast multiplication with histogram intersection kernel matrices (Interface)
+* @date 01/02/2012
+
+*/
+#ifndef _NICE_GMHIKERNELINCLUDE
+#define _NICE_GMHIKERNELINCLUDE
+
+#include <vector>
+
+#include <core/algebra/GenericMatrix.h>
+
+#include "ImplicitKernelMatrix.h"
+#include "FeatureMatrixT.h"
+#include "FastMinKernel.h"
+
+namespace NICE {
+
+ /** 
+ * @class GMHIKernel
+ * @brief Fast multiplication with histogram intersection kernel matrices
+ * @author Erik Rodner, Alexander Freytag
+ */
+
+class GMHIKernel : public ImplicitKernelMatrix
+{
+
+  protected:
+
+    FastMinKernel *fmk;
+    const Quantization *q;
+    ParameterizedFunction *pf;
+
+    bool verbose;
+
+    bool use_sparse_implementation;
+    bool useOldPreparation;
+
+  public:
+
+    /** simple constructor */
+    GMHIKernel( FastMinKernel *_fmk, ParameterizedFunction *_pf = NULL, const Quantization *_q = NULL );
+      
+    /** multiply with a vector: A*x = y */
+    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
+
+    /** get the number of rows in A */
+    virtual uint rows () const;
+
+    /** get the number of columns in A */
+    virtual uint cols () const;
+
+    /** simple destructor */
+    virtual ~GMHIKernel();
+   
+    /** get the diagonal elements of the current matrix */
+    virtual void getDiagonalElements ( Vector & diagonalElements ) const;
+    virtual void getFirstDiagonalElement ( double & diagonalElement ) const;
+
+    uint getNumParameters() const;
+    void getParameters(Vector & parameters) const;
+    void setParameters(const Vector & parameters);
+    bool outOfBounds(const Vector & parameters) const;
+
+    Vector getParameterLowerBounds() const;
+    Vector getParameterUpperBounds() const;
+
+    void setVerbose( const bool & _verbose);
+    void setUseOldPreparation( const bool & _useOldPreparation);
+    
+    virtual double approxFrobNorm() const;
+    virtual void setApproximationScheme(const int & _approxScheme);
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 ) {};//fmk->restore( is, format );};
+    virtual void store ( std::ostream & os, int format = 0 ) const {};//fmk->store( os, format );};
+    virtual void clear () {};
+    
+    virtual void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels);
+    
+    void setFastMinKernel(NICE::FastMinKernel * _fmk){fmk = _fmk;};
+     
+};
+
+}
+#endif

+ 353 - 0
GPHIKClassifier.cpp

@@ -0,0 +1,353 @@
+/** 
+* @file GPHIKClassifier.cpp
+* @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning) (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/01/2012
+
+*/
+#include <iostream>
+
+#include "core/basics/numerictools.h"
+#include <core/basics/Timer.h>
+
+#include "GPHIKClassifier.h"
+#include "gp-hik-core/parameterizedFunctions/PFAbsExp.h"
+#include "gp-hik-core/parameterizedFunctions/PFExp.h"
+#include "gp-hik-core/parameterizedFunctions/PFMKL.h"
+
+using namespace std;
+using namespace NICE;
+
+
+GPHIKClassifier::GPHIKClassifier( const Config *conf, const string & confSection ) 
+{
+  //default settings, may be overwritten lateron
+  gphyper = NULL;
+  pf = NULL;
+  confCopy = NULL;
+  //just a default value
+  uncertaintyPredictionForClassification = false;
+  
+  if ( conf == NULL )
+  {
+     fthrow(Exception, "GPHIKClassifier: the config is NULL -- use a default config and the restore-function instaed!");
+  }
+  else
+    this->init(conf, confSection);
+}
+
+GPHIKClassifier::~GPHIKClassifier()
+{
+  if ( gphyper != NULL )
+    delete gphyper;
+  
+  if (pf != NULL)
+    delete pf;
+
+  if ( confCopy != NULL )
+    delete confCopy;
+}
+
+void GPHIKClassifier::init(const Config *conf, const string & confSection)
+{
+  double parameterLowerBound = conf->gD(confSection, "parameter_lower_bound", 1.0 );
+  double parameterUpperBound = conf->gD(confSection, "parameter_upper_bound", 5.0 );
+
+  if (gphyper == NULL)
+    this->gphyper = new FMKGPHyperparameterOptimization;
+  this->noise = conf->gD(confSection, "noise", 0.01);
+
+  string transform = conf->gS(confSection, "transform", "absexp" );
+  
+  if (pf == NULL)
+  {
+    if ( transform == "absexp" )
+    {
+      this->pf = new PFAbsExp( 1.0, parameterLowerBound, parameterUpperBound );
+    } else if ( transform == "exp" ) {
+      this->pf = new PFExp( 1.0, parameterLowerBound, parameterUpperBound );
+    }else if ( transform == "MKL" ) {
+      //TODO generic, please :) load from a separate file or something like this!
+      std::set<int> steps; steps.insert(4000); steps.insert(6000); //specific for VISAPP
+      this->pf = new PFMKL( steps, parameterLowerBound, parameterUpperBound );
+    } else {
+      fthrow(Exception, "Transformation type is unknown " << transform);
+    }
+  }
+  else{
+    //we already know the pf from the restore-function
+  }
+  this->confSection = confSection;
+  this->verbose = conf->gB(confSection, "verbose", false);
+  this->debug = conf->gB(confSection, "debug", false);
+  this->uncertaintyPredictionForClassification = conf->gB( confSection, "uncertaintyPredictionForClassification", false );
+  
+  if (confCopy != conf)
+  {  
+    this->confCopy = new Config ( *conf );
+    //we do not want to read until end of file for restoring    
+    confCopy->setIoUntilEndOfFile(false);    
+  }
+   
+  //how do we approximate the predictive variance for classification uncertainty?
+  string varianceApproximationString = conf->gS(confSection, "varianceApproximation", "approximate_fine"); //default: fine approximative uncertainty prediction
+  if (varianceApproximationString.compare("approximate_rough") == 0)
+  {
+    this->varianceApproximation = APPROXIMATE_ROUGH;
+  }
+  else if (varianceApproximationString.compare("approximate_fine") == 0)
+  {
+    this->varianceApproximation = APPROXIMATE_FINE;
+  }
+  else if (varianceApproximationString.compare("exact") == 0)
+  {
+    this->varianceApproximation = EXACT;
+  }
+  else
+  {
+    this->varianceApproximation = NONE;
+  } 
+}
+
+void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores )
+{
+  double tmpUncertainty;
+  this->classify( example, result, scores, tmpUncertainty );
+}
+
+void GPHIKClassifier::classify ( const SparseVector * example,  int & result, SparseVector & scores, double & uncertainty )
+{
+  scores.clear();
+  
+  int classno = gphyper->classify ( *example, scores );
+
+  if ( scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  }
+  
+  result = scores.maxElement();
+   
+  if (uncertaintyPredictionForClassification)
+  {
+    if (varianceApproximation != NONE)
+    {
+      NICE::Vector uncertainties;
+      this->predictUncertainty( example, uncertainties );
+      uncertainty = uncertainties.Max();
+    }  
+    else
+    {
+      //do nothing
+      uncertainty = std::numeric_limits<double>::max();
+    }
+  }
+  else
+  {
+    //do nothing
+    uncertainty = std::numeric_limits<double>::max();
+  }    
+}
+
+/** training process */
+void GPHIKClassifier::train ( const std::vector< NICE::SparseVector *> & examples, const NICE::Vector & labels )
+{
+  if (verbose)
+    std::cerr << "GPHIKClassifier::train" << std::endl;
+
+  Timer t;
+  t.start();
+  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
+  t.stop();
+  if (verbose)
+    std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
+  
+  gphyper = new FMKGPHyperparameterOptimization ( confCopy, pf, fmk, confSection ); 
+
+  if (verbose)
+    cerr << "Learning ..." << endl;
+  // go go go
+  gphyper->optimize ( labels );
+  if (verbose)
+    std::cerr << "optimization done, now prepare for the uncertainty prediction" << std::endl;
+  
+  if ( (varianceApproximation == APPROXIMATE_ROUGH) )
+  {
+    //prepare for variance computation (approximative)
+    gphyper->prepareVarianceApproximation();
+  }
+  //for exact variance computation, we do not have to prepare anything
+
+  // clean up all examples ??
+  if (verbose)
+    std::cerr << "Learning finished" << std::endl;
+}
+
+/** training process */
+void GPHIKClassifier::train ( const std::vector< SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
+{ 
+  if (verbose)
+    std::cerr << "GPHIKClassifier::train" << std::endl;
+
+  Timer t;
+  t.start();
+  FastMinKernel *fmk = new FastMinKernel ( examples, noise, this->debug );
+  t.stop();
+  if (verbose)
+    std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;  
+  
+  gphyper = new FMKGPHyperparameterOptimization ( confCopy, pf, fmk, confSection ); 
+
+  if (verbose)
+    cerr << "Learning ..." << endl;
+  // go go go
+  gphyper->optimize ( binLabels );
+  if (verbose)
+    std::cerr << "optimization done, now prepare for the uncertainty prediction" << std::endl;
+  
+  if ( (varianceApproximation == APPROXIMATE_ROUGH) )
+  {
+    //prepare for variance computation (approximative)
+    gphyper->prepareVarianceApproximation();
+  }
+  //for exact variance computation, we do not have to prepare anything
+
+  // clean up all examples ??
+  if (verbose)
+    std::cerr << "Learning finished" << std::endl;
+}
+
+void GPHIKClassifier::clear ()
+{
+  if ( gphyper != NULL )
+    delete gphyper;
+  gphyper = NULL;
+}
+
+GPHIKClassifier *GPHIKClassifier::clone () const
+{
+  fthrow(Exception, "GPHIKClassifier: clone() not yet implemented" );
+
+  return NULL;
+}
+  
+void GPHIKClassifier::predictUncertainty( const NICE::SparseVector * example, NICE::Vector & uncertainties )
+{  
+  //we directly store the predictive variances in the vector, that contains the classification uncertainties lateron to save storage
+  switch (varianceApproximation)    
+  {
+    case APPROXIMATE_ROUGH:
+    {
+      gphyper->computePredictiveVarianceApproximateRough( *example, uncertainties );
+      break;
+    }
+    case APPROXIMATE_FINE:
+    {
+      gphyper->computePredictiveVarianceApproximateFine( *example, uncertainties );
+      break;
+    }    
+    case EXACT:
+    {
+      gphyper->computePredictiveVarianceExact( *example, uncertainties );
+      break;
+    }
+    default:
+    {
+//       std::cerr << "No Uncertainty Prediction at all" << std::endl;
+      fthrow(Exception, "GPHIKClassifier - your settings disabled the variance approximation needed for uncertainty prediction.");
+//       uncertainties.resize( 1 );
+//       uncertainties.set( numeric_limits<double>::max() );
+//       break;
+    }
+  }
+}
+
+//---------------------------------------------------------------------
+//                           protected methods
+//---------------------------------------------------------------------
+void GPHIKClassifier::restore ( std::istream & is, int format )
+{
+  if (is.good())
+  {
+    is.precision (numeric_limits<double>::digits10 + 1);
+    
+    string tmp;
+    is >> tmp;    
+    is >> confSection;
+    
+    if (pf != NULL)
+    {
+      delete pf;
+    }
+    string transform;
+    is >> transform;
+    if ( transform == "absexp" )
+    {
+      this->pf = new PFAbsExp ();
+    } else if ( transform == "exp" ) {
+      this->pf = new PFExp ();
+    } else {
+      fthrow(Exception, "Transformation type is unknown " << transform);
+    }    
+    pf->restore(is, format);
+            
+    //load every options we determined explicitely
+    confCopy->clear();
+    //we do not want to read until the end of the file
+    confCopy->setIoUntilEndOfFile( false );
+    confCopy->restore(is, format);
+
+    //load every settings as well as default options
+    this->init(confCopy, confSection); 
+  
+    //first read things from the config
+    gphyper->initialize ( confCopy, pf );
+    
+    //then, load everything that we stored explicitely,
+    // including precomputed matrices, LUTs, eigenvalues, ... and all that stuff
+    gphyper->restore(is, format);      
+  }
+  else
+  {
+    std::cerr << "GPHIKClassifier::restore -- InStream not initialized - restoring not possible!" << std::endl;
+  }
+}
+
+void GPHIKClassifier::store ( std::ostream & os, int format ) const
+{
+  if (os.good())
+  {
+    os.precision (numeric_limits<double>::digits10 + 1);
+    
+    os << "confSection: "<<  confSection << std::endl;
+    
+    os << pf->sayYourName() << std::endl;
+    pf->store(os, format);
+    
+    //we do not want to read until end of file for restoring    
+    confCopy->setIoUntilEndOfFile(false);
+    confCopy->store(os,format);  
+    
+    //store the underlying data
+    //will be done in gphyper->store(of,format)
+    //store the optimized parameter values and all that stuff
+    gphyper->store(os, format); 
+  }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }
+}
+
+void GPHIKClassifier::addExample( const NICE::SparseVector * example, const double & label, const bool & performOptimizationAfterIncrement)
+{
+  gphyper->addExample( *example, label, performOptimizationAfterIncrement );
+}
+
+void GPHIKClassifier::addMultipleExamples( const std::vector< const NICE::SparseVector *> & newExamples, const NICE::Vector & newLabels, const bool & performOptimizationAfterIncrement)
+{
+  //are new examples available? If not, nothing has to be done
+  if ( newExamples.size() < 1)
+    return;
+  
+  gphyper->addMultipleExamples( newExamples, newLabels, performOptimizationAfterIncrement );
+}

+ 133 - 0
GPHIKClassifier.h

@@ -0,0 +1,133 @@
+/** 
+* @file GPHIKClassifier.h
+* @author Erik Rodner, Alexander Freytag
+* @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning) (Interface)
+* @date 02/01/2012
+
+*/
+#ifndef _NICE_GPHIKCLASSIFIERINCLUDE
+#define _NICE_GPHIKCLASSIFIERINCLUDE
+
+#include <string>
+#include <limits>
+
+#include <core/basics/Config.h>
+#include <core/vector/SparseVectorT.h>
+
+#include "FMKGPHyperparameterOptimization.h"
+#include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
+
+namespace NICE {
+  
+ /** 
+ * @class GPHIKClassifier
+ * @brief Main interface for our GP HIK classifier (similar to the feature pool classifier interface in vislearning)
+ * @author Erik Rodner, Alexander Freytag
+ */
+ 
+class GPHIKClassifier
+{
+
+  protected:
+    std::string confSection;
+    double noise;
+
+    enum VarianceApproximation{
+      APPROXIMATE_ROUGH,
+      APPROXIMATE_FINE,
+      EXACT,
+      NONE
+    };
+    
+    VarianceApproximation varianceApproximation;
+    
+    /**compute the uncertainty prediction during classification?*/
+    bool uncertaintyPredictionForClassification;
+    
+    NICE::Config *confCopy;
+    NICE::ParameterizedFunction *pf;
+    NICE::FMKGPHyperparameterOptimization *gphyper;
+    
+    /** verbose flag for useful output*/
+    bool verbose;
+    /** debug flag for several outputs useful for debugging*/
+    bool debug;
+    
+    /** 
+    * @brief classify a given example with the previously learnt model
+    * @param pe example to be classified given in a sparse representation
+    */    
+    void init(const NICE::Config *conf, const std::string & confSection);
+       
+
+  public:
+
+    /** simple constructor */
+    GPHIKClassifier( const NICE::Config *conf, const std::string & confSection = "GPHIKClassifier" );
+      
+    /** simple destructor */
+    ~GPHIKClassifier();
+   
+
+    /** 
+     * @brief classify a given example with the previously learnt model
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param example (SparseVector) to be classified given in a sparse representation
+     * @param result (int) class number of most likely class
+     * @param scores (SparseVector) classification scores for known classes
+     */        
+    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores );
+    
+    /** 
+     * @brief classify a given example with the previously learnt model
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param example (SparseVector) to be classified given in a sparse representation
+     * @param result (int) class number of most likely class
+     * @param scores (SparseVector) classification scores for known classes
+     * @param uncertainty (double*) predictive variance of the classification result, if computed
+     */    
+    void classify ( const NICE::SparseVector * example,  int & result, NICE::SparseVector & scores, double & uncertainty );
+
+    /**
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors 
+     * @date 18-10-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples (std::vector< NICE::SparseVector *>) training data given in a sparse representation
+     * @param labels (Vector) class labels (multi-class)
+     */
+    void train ( const std::vector< NICE::SparseVector *> & examples, const NICE::Vector & labels );
+    
+    /** 
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors 
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples examples to use given in a sparse data structure
+     * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
+     */
+    void train ( const std::vector< NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
+    
+    /** Persistent interface */
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
+
+    GPHIKClassifier *clone () const;
+
+    /** 
+     * @brief prediction of classification uncertainty
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples example for which the classification uncertainty shall be predicted, given in a sparse representation
+     * @param uncertainties contains the resulting classification uncertainties (1 entry for standard setting, m entries for binary-balanced setting)
+     */       
+    void predictUncertainty( const NICE::SparseVector * example, NICE::Vector & uncertainties );
+    
+    void addExample( const NICE::SparseVector * example, const double & label, const bool & performOptimizationAfterIncrement = true);
+    void addMultipleExamples( const std::vector< const NICE::SparseVector * > & newExamples, const NICE::Vector & newLabels, const bool & performOptimizationAfterIncrement = true);
+};
+
+}
+
+#endif

+ 367 - 0
GPLikelihoodApprox.cpp

@@ -0,0 +1,367 @@
+/** 
+* @file GPLikelihoodApprox.cpp
+* @brief GP likelihood approximation as a cost function (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/09/2012
+
+*/
+#include <iostream>
+
+#include <core/algebra/CholeskyRobust.h>
+#include <core/vector/Algorithms.h>
+#include <core/vector/Eigen.h>
+
+#include <core/basics/Timer.h>
+#include <core/algebra/ILSConjugateGradients.h>
+#include "kernels/GeneralizedIntersectionKernelFunction.h"
+#include "kernels/IntersectionKernelFunction.h"
+
+
+#include "GPLikelihoodApprox.h"
+#include "IKMLinearCombination.h"
+#include "GMHIKernel.h"
+#include "algebra/LogDetApproxBaiAndGolub.h"
+
+
+using namespace std;
+using namespace NICE;
+using namespace OPTIMIZATION;
+
+
+GPLikelihoodApprox::GPLikelihoodApprox( const map<int, Vector> & binaryLabels,
+                                        ImplicitKernelMatrix *ikm,
+                                        IterativeLinearSolver *linsolver, 
+                                        EigValues *eig,
+                                        bool verifyApproximation,
+                                        int _nrOfEigenvaluesToConsider
+                                      ) 
+
+      : CostFunction( ikm->getNumParameters() )
+{
+  this->binaryLabels = binaryLabels;
+  this->ikm = ikm;
+  this->linsolver = linsolver;
+  this->eig = eig;
+
+  if ( binaryLabels.size() == 1 )
+    this->nrOfClasses = 2;
+  else
+    this->nrOfClasses = binaryLabels.size();
+
+  this->min_nlikelihood = std::numeric_limits<double>::max();
+  this->verifyApproximation = verifyApproximation;
+  
+  this->nrOfEigenvaluesToConsider = _nrOfEigenvaluesToConsider;
+  
+  lastAlphas = NULL;
+  
+  this->verbose = false;
+  this->debug = false;
+  
+  this->usePreviousAlphas = true;
+
+}
+
+GPLikelihoodApprox::~GPLikelihoodApprox()
+{
+  //delete the pointer, but not the content (which is stored somewhere else)
+  if (lastAlphas != NULL)
+    lastAlphas = NULL;  
+}
+
+void GPLikelihoodApprox::calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax )
+{
+  // robust cholesky routine without noise !!
+  CholeskyRobust cr ( true /*verbose*/, 0.0, false /*useCuda*/ );
+
+  Timer t;
+  t.start();
+  cerr << "VERIFY: Calculating kernel matrix ..." << endl;
+  Matrix K;
+  IntersectionKernelFunction<double> hik;
+  //old version, not needed anymore - we explore sparsity
+//   K = hik.computeKernelMatrix(data_matrix, noise); // = K + sigma^2 I
+  K = hik.computeKernelMatrix(f, noise);
+  t.stop();
+  cerr << "VERIFY: Time used for calculating kernel matrix is: " << t.getLast() << endl;
+
+  cerr << "K is a " << K.rows() << " x " << K.cols() << " matrix" << endl;
+
+  if ( K.containsNaN() ) 
+    fthrow(Exception, "NaN values in the kernel matrix");
+
+  cerr << "VERIFY: Computing likelihood ..." << endl;
+  t.start();
+  Matrix choleskyMatrix; 
+  cr.robustChol ( K, choleskyMatrix ); // K = choleskyMatrix^T * choleskyMatrix
+  double gt_logdet = (yset.size()) * cr.getLastLogDet();
+  cerr << "chol * chol^T: " << ( choleskyMatrix * choleskyMatrix.transpose() )(0,0,4,4) << endl;
+
+  double gt_dataterm = 0.0;
+  for ( map< int, NICE::Vector >::const_iterator i = yset.begin(); i != yset.end(); i++ )
+  {
+    const Vector & y = i->second;
+    Vector gt_alpha;
+    choleskySolve ( choleskyMatrix, y, gt_alpha );
+    cerr << "cholesky error: " << (K*gt_alpha - y).normL2() << endl;
+    gt_dataterm += y.scalarProduct ( gt_alpha );
+  }
+  //cerr << "linsolve error: " << (tmp - y).normL2() << endl;
+  t.stop();
+  cerr << "VERIFY: Time used for calculating likelihood: " << t.getLast() << endl;
+  
+  cerr << "Something of K: " << K(0, 0, 4, 4) << endl;
+  cerr << "frob norm: gt:" << K.frobeniusNorm() << endl;
+  
+  /*try {
+    Vector *eigenv = eigenvalues ( K ); 
+    cerr << "lambda_max: gt:" << eigenv->Max() << " est:" << lambdaMax << endl; 
+    delete eigenv;
+  } catch (...) {
+    cerr << "NICE eigenvalues function failed!" << endl;
+  }*/
+
+  double gt_nlikelihood = gt_logdet + gt_dataterm;
+  cerr << "OPTGT: " << mypara << " " << gt_nlikelihood << " " << gt_logdet << " " << gt_dataterm << endl;
+}
+
+
+double GPLikelihoodApprox::evaluate(const OPTIMIZATION::matrix_type & x)
+{
+  Vector xv;
+ 
+  xv.resize ( x.rows() );
+  for ( uint i = 0 ; i < x.rows(); i++ )
+    xv[i] = x(i,0);
+
+  // check whether we have been here before
+  unsigned long hashValue = xv.getHashValue();
+  if (verbose)  
+    std::cerr << "Current parameter: " << xv << " (weird hash value is " << hashValue << ")" << std::endl;
+  map<unsigned long, double>::const_iterator k = alreadyVisited.find(hashValue);
+  
+  if ( k != alreadyVisited.end() )
+  {
+    if (verbose)
+      std::cerr << "Using cached value: " << k->second << std::endl;
+    
+    //already computed, simply return the cached value
+    return k->second;
+  }
+
+  // set parameter value and check lower and upper bounds of pf
+  if ( ikm->outOfBounds(xv) )
+  {
+    if (verbose)
+      std::cerr << "Parameters are out of bounds" << std::endl;
+    return numeric_limits<double>::max();
+  }
+  
+  ikm->setParameters ( xv );
+  if (verbose)  
+    std::cerr << "setParameters xv: " << xv << std::endl;
+
+  // --- calculate the likelihood
+  // (a) logdet(K + sI)
+  Timer t;
+  if (verbose)  
+    std::cerr << "Calculating eigendecomposition " << ikm->rows() << " x " << ikm->cols() << std::endl;
+  t.start();
+  Vector eigenmax;
+  Matrix eigenmaxvectors;
+ 
+  int rank = nrOfEigenvaluesToConsider;
+
+  /** calculate the biggest eigenvalue */
+  // We use a Arnoldi solver and not a specialized one.....
+  // We might also think about a coordinate descent solver for Arnoldi iterations, however,
+  // the current implementation converges very quickly
+  //old version: just use the first eigenvalue
+  
+  //NOTE
+  // in theory, we have these values already on hand since we've done it in FMKGPHypOpt.
+  // Think about wether to give them as input to this function or not
+  eig->getEigenvalues( *ikm, eigenmax, eigenmaxvectors, rank ); 
+  if (verbose)
+    std::cerr << "eigenmax: " << eigenmax << std::endl;
+      
+  t.stop();
+
+  SparseVector binaryDataterms;
+  Vector diagonalElements;
+  
+  ikm->getDiagonalElements ( diagonalElements );
+
+  // set simple jacobi pre-conditioning
+  ILSConjugateGradients *linsolver_cg = dynamic_cast<ILSConjugateGradients *> ( linsolver );
+
+  //TODO why do we need this?  
+  if ( linsolver_cg != NULL )
+    linsolver_cg->setJacobiPreconditioner ( diagonalElements );
+  
+
+  // all alpha vectors will be stored!
+  map<int, Vector> alphas;
+
+  // This has to be done m times for the multi-class case
+  if (verbose)
+    std::cerr << "run ILS for every bin label. binaryLabels.size(): " << binaryLabels.size() << std::endl;
+  for ( map<int, Vector>::const_iterator j = binaryLabels.begin(); j != binaryLabels.end() ; j++)
+  {
+    // (b) y^T (K+sI)^{-1} y
+    int classCnt = j->first;
+    if (verbose)
+    {
+      std::cerr << "Solving linear equation system for class " << classCnt << " ..." << std::endl;
+      std::cerr << "Size of the kernel matrix " << ikm->rows() << std::endl;
+    }
+
+    /** About finding a good initial solution
+     * K~ = K + sigma^2 I
+     *
+     * (0) we have already estimated alpha for a previous hyperparameter, then
+     *     we should use this as an initial estimate. According to my quick
+     *     tests this really helps!
+     * (1) K~ \approx lambda_max v v^T
+     * \lambda_max v v^T * alpha = y     | multiply with v^T from left
+     * => \lambda_max v^T alpha = v^T y
+     * => alpha = y / lambda_max could be a good initial start
+     * If we put everything in the first equation this gives us
+     * v = y ....which is somehow a weird assumption (cf Kernel PCA)
+     *  This reduces the number of iterations by 5 or 8
+     */
+    Vector alpha;
+    
+    if ( (usePreviousAlphas) && (lastAlphas != NULL) )
+    {
+      std::map<int, NICE::Vector>::iterator alphaIt = lastAlphas->begin();
+      alpha = (*lastAlphas)[classCnt];
+    }
+    else  
+    {
+      alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );
+    }
+    
+    Vector initialAlpha;
+    if ( verbose )
+     initialAlpha = alpha;
+
+    if ( verbose )
+      cerr << "Using the standard solver ..." << endl;
+
+    t.start();
+    linsolver->solveLin ( *ikm, binaryLabels[classCnt], alpha );
+    t.stop();
+   
+    //TODO This is only important for the incremental learning stuff.
+//     if ( verbose )
+//     {
+//       double initialAlphaNorm ( initialAlpha.normL1() );
+//       //compute the difference
+//       initialAlpha -= alpha;
+//       //take the abs of the differences
+//       initialAlpha.absInplace();
+//       //and compute a final score using a suitable norm
+// //       double difference( initialAlpha.normInf() );
+//       double difference( initialAlpha.normL1() );
+//       std::cerr << "debug -- last entry of new alpha: " << abs(alpha[alpha.size() -1 ]) << std::endl;
+//       std::cerr << "debug -- difference using inf norm: " << difference  << std::endl;
+//       std::cerr << "debug -- relative difference using inf norm: " << difference / initialAlphaNorm  << std::endl;
+//     }
+
+
+    if ( verbose )
+      std::cerr << "Time used for solving (K + sigma^2 I)^{-1} y: " << t.getLast() << std::endl;
+    // this term is no approximation at all
+    double dataterm = binaryLabels[classCnt].scalarProduct(alpha);
+    binaryDataterms[classCnt] = (dataterm);
+
+    alphas[classCnt] = alpha;
+  }
+  
+  // approximation stuff
+  if (verbose)  
+    cerr << "Approximating logdet(K) ..." << endl;
+  t.start();
+  LogDetApproxBaiAndGolub la;
+  la.setVerbose(this->verbose);
+
+  //NOTE: this is already the squared frobenius norm, that we are looking for.
+  double frobNormSquared(0.0);
+  
+  // ------------- LOWER BOUND, THAT IS USED --------------------
+  // frobNormSquared ~ \sum \lambda_i^2 <-- LOWER BOUND
+  for (int idx = 0; idx < rank; idx++)
+  {
+    frobNormSquared += (eigenmax[idx] * eigenmax[idx]);
+  }
+
+                
+  if (verbose)
+    cerr << " frob norm squared: est:" << frobNormSquared << endl;
+  if (verbose)  
+    std::cerr << "trace: " << diagonalElements.Sum() << std::endl;
+  double logdet = la.getLogDetApproximationUpperBound( diagonalElements.Sum(), /* trace = n only for non-transformed features*/
+                             frobNormSquared, /* use a rough approximation of the frobenius norm */
+                             eigenmax[0], /* upper bound for eigen values */
+                             ikm->rows() /* = n */ 
+                          );
+  
+  t.stop();
+  
+  if (verbose)
+    cerr << "Time used for approximating logdet(K): " << t.getLast() << endl;
+
+  // (c) adding the two terms
+  double nlikelihood = nrOfClasses*logdet;
+  double dataterm = binaryDataterms.sum();
+  nlikelihood += dataterm;
+
+  if (verbose)
+    cerr << "OPT: " << xv << " " << nlikelihood << " " << logdet << " " << dataterm << endl;
+
+  if ( nlikelihood < min_nlikelihood )
+  {
+    min_nlikelihood = nlikelihood;
+    ikm->getParameters ( min_parameter );
+    min_alphas = alphas;
+  }
+
+  alreadyVisited.insert ( pair<int, double> ( hashValue, nlikelihood ) );
+  return nlikelihood;
+}
+
+void GPLikelihoodApprox::setParameterLowerBound(const double & _parameterLowerBound)
+{
+  parameterLowerBound = _parameterLowerBound;
+}
+  
+void GPLikelihoodApprox::setParameterUpperBound(const double & _parameterUpperBound)
+{
+  parameterUpperBound = _parameterUpperBound;
+}
+
+void GPLikelihoodApprox::setLastAlphas(std::map<int, NICE::Vector> * _lastAlphas)
+{
+  lastAlphas = _lastAlphas;
+}
+
+void GPLikelihoodApprox::setBinaryLabels(const std::map<int, Vector> & _binaryLabels)
+{
+  binaryLabels = _binaryLabels;
+}
+
+void GPLikelihoodApprox::setUsePreviousAlphas( const bool & _usePreviousAlphas )
+{
+  this->usePreviousAlphas = _usePreviousAlphas; 
+}
+
+void GPLikelihoodApprox::setVerbose( const bool & _verbose )
+{
+  this->verbose = _verbose;
+}
+
+void GPLikelihoodApprox::setDebug( const bool & _debug )
+{
+  this->debug = _debug;
+}

+ 133 - 0
GPLikelihoodApprox.h

@@ -0,0 +1,133 @@
+/** 
+* @file GPLikelihoodApprox.h
+* @brief GP likelihood approximation as a cost function (Interface)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/09/2012
+
+*/
+#ifndef _NICE_GPLIKELIHOODAPPROXINCLUDE
+#define _NICE_GPLIKELIHOODAPPROXINCLUDE
+
+#include <map>
+
+#include <core/vector/VectorT.h>
+#include <core/basics/Config.h>
+#include <core/algebra/EigValues.h>
+#include <core/algebra/IterativeLinearSolver.h>
+
+#include <core/optimization/blackbox/CostFunction.h>
+
+#include "FastMinKernel.h"
+#include "ImplicitKernelMatrix.h"
+
+#include "gp-hik-core/parameterizedFunctions/ParameterizedFunction.h"
+
+namespace NICE {
+
+ /** 
+ * @class GPLikelihoodApprox
+ * @brief GP likelihood approximation as a cost function
+ * @author Erik Rodner, Alexander Freytag
+ */
+ 
+class GPLikelihoodApprox : public OPTIMIZATION::CostFunction
+{
+
+  protected:
+    
+    /** method computing eigenvalues */
+    EigValues *eig;
+
+    /** method for solving linear equation systems */
+    IterativeLinearSolver *linsolver;
+
+    /** object providing fast calculations */
+    ImplicitKernelMatrix *ikm;
+
+    /** set of binary label vectors */
+    std::map<int, Vector> binaryLabels;
+   
+    /** number of classes */
+    int nrOfClasses;
+    
+    /** To define how fine the approximation of the squared frobenius norm will be*/
+    int nrOfEigenvaluesToConsider;
+    
+    //! only for debugging purposes, printing some statistics
+    void calculateLikelihood ( double mypara, const FeatureMatrix & f, const std::map< int, NICE::Vector > & yset, double noise, double lambdaMax );
+
+    //! last alpha vectors computed (from previous IL-step)
+    std::map<int, Vector> * lastAlphas;
+    
+    //! alpha vectors of the best solution
+    std::map<int, Vector> min_alphas;
+
+    //! minimal value of the likelihood
+    double min_nlikelihood;
+
+    //! best hyperparameter vector
+    Vector min_parameter;
+
+    //! function value pairs already visited
+    std::map<unsigned long, double> alreadyVisited;
+
+    //! to check whether the current solution of our optimization routine is too small
+    double parameterLowerBound;
+    //! to check whether the current solution of our optimization routine is too large
+    double parameterUpperBound;
+
+    //! Just for debugging to verify wheter the likelihood approximation is useful at all
+    bool verifyApproximation;
+    
+    /** verbose flag */
+    bool verbose;    
+    /** debug flag for several outputs useful for debugging*/
+    bool debug;  
+    
+    /** after adding new examples, shall the previous alpha solution be used as an initial guess?*/
+    bool usePreviousAlphas;
+
+  public:
+
+    // ------ constructors and destructors ------
+    /** simple constructor */
+    GPLikelihoodApprox( const std::map<int, Vector> & binaryLabels, 
+                        ImplicitKernelMatrix *ikm,
+                        IterativeLinearSolver *linsolver,
+                        EigValues *eig,
+                        bool verifyApproximation = false,
+                        int _nrOfEigenvaluesToConsider = 1
+                      );
+      
+    /** simple destructor */
+    virtual ~GPLikelihoodApprox();
+     
+    // ------ main methods ------
+    /**
+    * @brief Evaluate the likelihood for given hyperparameters
+    *
+    * @param x vector with specified hyperparameters to evaluate their likelihood
+    *
+    * @return likelihood 
+    */
+    virtual double evaluate(const OPTIMIZATION::matrix_type & x);
+     
+    
+    // ------ get and set methods ------
+    const Vector & getBestParameters () const { return min_parameter; };
+    const std::map<int, Vector> & getBestAlphas () const { return min_alphas; };
+    
+    void setParameterLowerBound(const double & _parameterLowerBound);
+    void setParameterUpperBound(const double & _parameterUpperBound);
+    
+    void setLastAlphas(std::map<int, NICE::Vector> * _lastAlphas);
+    void setBinaryLabels(const std::map<int, Vector> & _binaryLabels);
+    
+    void setUsePreviousAlphas( const bool & _usePreviousAlphas );
+    void setVerbose( const bool & _verbose );
+    void setDebug( const bool & _debug );
+};
+
+}
+
+#endif

+ 230 - 0
IKMLinearCombination.cpp

@@ -0,0 +1,230 @@
+/** 
+* @file IKMLinearCombination.cpp
+* @brief Combination of several (implicit) kernel matrices, such as noise matrix and gp-hik kernel matrix (Implementation)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/14/2012
+
+*/
+#include <iostream>
+
+#include "IKMLinearCombination.h"
+
+using namespace NICE;
+using namespace std;
+
+
+IKMLinearCombination::IKMLinearCombination()
+{
+  verbose = false;
+}
+
+IKMLinearCombination::~IKMLinearCombination()
+{
+  if (matrices.size() != 0)
+  {
+    for (int i = 0; i < matrices.size(); i++)
+      delete matrices[i];
+  }
+}
+
+
+void IKMLinearCombination::getDiagonalElements ( Vector & diagonalElements ) const
+{
+  diagonalElements.resize ( rows() );
+  diagonalElements.set(0.0);
+  
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    Vector diagonalElementsSingle;
+    ikm->getDiagonalElements ( diagonalElementsSingle );
+    diagonalElements += diagonalElementsSingle;
+  }
+}
+
+void IKMLinearCombination::getFirstDiagonalElement ( double & diagonalElement ) const
+{
+  diagonalElement = 0.0;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    double firstElem;
+    ikm->getFirstDiagonalElement(firstElem);
+    diagonalElement += firstElem;
+  }
+}
+
+
+uint IKMLinearCombination::getNumParameters() const
+{
+  return parameterRanges[ parameterRanges.size() - 1 ] + matrices[ parameterRanges.size() - 1 ]->getNumParameters();
+}
+    
+void IKMLinearCombination::getParameters(Vector & parameters) const
+{
+  uint ind = 0;
+  parameters.resize ( getNumParameters() );
+  if (verbose)
+    cerr << "Number of total parameters: " << parameters.size() << endl;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++, ind++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    if (verbose)
+      cerr << "Model " << ind << " has " << ikm->getNumParameters() << " parameters" << endl;
+    if ( ikm->getNumParameters() == 0 ) continue;
+    Vector singleParameterRef = parameters.getRangeRef( parameterRanges[ ind ], parameterRanges[ ind ] + ikm->getNumParameters() - 1 );
+    ikm->getParameters ( singleParameterRef );
+  }
+}
+
+void IKMLinearCombination::setParameters(const Vector & parameters)
+{
+  uint ind = 0;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++, ind++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    if ( ikm->getNumParameters() == 0 ) continue;
+    ikm->setParameters ( parameters.getRange( parameterRanges[ ind ], parameterRanges[ ind ] + ikm->getNumParameters() - 1) );
+  }
+}
+
+void IKMLinearCombination::setVerbose(const bool& _verbose)
+{
+  verbose = _verbose;
+}
+
+bool IKMLinearCombination::outOfBounds(const Vector & parameters) const
+{
+  uint ind = 0;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++, ind++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    if ( ikm->getNumParameters() == 0 ) continue;
+    if ( ikm->outOfBounds( parameters.getRange( parameterRanges[ ind ], parameterRanges[ ind ] + ikm->getNumParameters() - 1) ) )
+      return true;
+  }
+  return false;
+}
+
+Vector IKMLinearCombination::getParameterLowerBounds() const
+{
+  Vector lB;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    if ( ikm->getNumParameters() == 0 ) continue;
+    lB.append( ikm->getParameterLowerBounds() );
+  }
+  return lB;
+}
+
+Vector IKMLinearCombination::getParameterUpperBounds() const
+{
+  Vector uB;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    if ( ikm->getNumParameters() == 0 ) continue;
+    uB.append( ikm->getParameterUpperBounds() );
+  }
+  return uB;
+}
+
+void IKMLinearCombination::updateParameterRanges()
+{
+  if ( matrices.size() == 0 ) {
+    parameterRanges.clear();
+  } else {
+    parameterRanges.resize(matrices.size());
+    parameterRanges[0] = 0;
+
+    if ( matrices.size() == 1 ) return;
+
+    uint ind = 1;
+    vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin();
+    for ( ; ind < parameterRanges.size(); i++, ind++ )
+    {
+      ImplicitKernelMatrix *ikm = *i;
+      if (verbose)
+        cerr << "Parameter range: size is " << parameterRanges.size() << ", index is " << ind << endl;
+      parameterRanges[ind] = parameterRanges[ind-1] + ikm->getNumParameters();
+      if (verbose)
+        cerr << "Value is " << parameterRanges[ind] << endl;
+    }
+  }
+}
+    
+void IKMLinearCombination::addModel ( ImplicitKernelMatrix *ikm )
+{
+  matrices.push_back ( ikm );
+  updateParameterRanges();
+}
+
+void IKMLinearCombination::multiply (NICE::Vector & y, const NICE::Vector & x) const
+{
+  y.resize( rows() );
+  y.set(0.0);
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    Vector ySingle;
+    ikm->multiply ( ySingle, x );
+    y += ySingle;
+  }
+}
+
+uint IKMLinearCombination::rows () const
+{
+  return cols();
+}
+
+uint IKMLinearCombination::cols () const
+{
+  if ( matrices.empty() )
+    fthrow(Exception, "No models stored, cols() and rows() are unavailable");
+  return (* matrices.begin())->cols();
+}
+
+double IKMLinearCombination::approxFrobNorm() const
+{
+  double frobNormApprox(0.0);
+  if (verbose)
+    std::cerr << "IKMLinCom: single approx: " ;
+  for ( vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    frobNormApprox += ikm->approxFrobNorm();
+    if (verbose)
+      std::cerr << ikm->approxFrobNorm() << " ";
+  }
+  if (verbose)
+    std::cerr << std::endl;
+  return frobNormApprox;
+}
+
+void IKMLinearCombination::setApproximationScheme(const int & _approxScheme)
+{
+  for ( std::vector<ImplicitKernelMatrix *>::const_iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    ikm->setApproximationScheme(_approxScheme);
+  }
+}
+
+ImplicitKernelMatrix * IKMLinearCombination::getModel(const uint & idx) const
+{
+  if ( idx <= matrices.size() )
+    return matrices[idx];
+  else
+    return NULL;
+}
+
+// ----------------- INCREMENTAL LEARNING METHODS -----------------------
+void IKMLinearCombination::addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels)
+{
+  for ( vector<ImplicitKernelMatrix *>::iterator i = matrices.begin(); i != matrices.end(); i++ )
+  {
+    ImplicitKernelMatrix *ikm = *i;
+    ikm->addExample(x, binLabels);
+  }
+}

+ 81 - 0
IKMLinearCombination.h

@@ -0,0 +1,81 @@
+/** 
+* @file IKMLinearCombination.h
+* @brief Combination of several (implicit) kernel matrices, such as noise matrix and gp-hik kernel matrix (Interface)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/14/2012
+
+*/
+#ifndef _NICE_IKMLINEARCOMBINATIONINCLUDE
+#define _NICE_IKMLINEARCOMBINATIONINCLUDE
+
+#include <vector>
+#include "ImplicitKernelMatrix.h"
+
+namespace NICE {
+
+ /** 
+ * @class IKMLinearCombination
+ * @brief Combination of several (implicit) kernel matrices, such as noise matrix and gp-hik kernel matrix
+ * @author Erik Rodner, Alexander Freytag
+ */
+
+class IKMLinearCombination : public ImplicitKernelMatrix
+{
+
+  protected:
+    std::vector< ImplicitKernelMatrix * > matrices;
+    std::vector<int> parameterRanges;
+    bool verbose;
+
+    void updateParameterRanges();
+  public:
+
+    /** simple constructor */
+    IKMLinearCombination();
+      
+    /** simple destructor */
+    virtual ~IKMLinearCombination();
+
+    virtual void getDiagonalElements ( Vector & diagonalElements ) const;
+    virtual void getFirstDiagonalElement ( double & diagonalElement ) const;
+    virtual uint getNumParameters() const;
+    
+    virtual void getParameters(Vector & parameters) const;
+    virtual void setParameters(const Vector & parameters);
+    virtual bool outOfBounds(const Vector & parameters) const;
+    
+    void setVerbose(const bool & _verbose);
+
+    virtual Vector getParameterLowerBounds() const;
+    virtual Vector getParameterUpperBounds() const;
+
+    void addModel ( ImplicitKernelMatrix *ikm );
+    
+    /** multiply with a vector: A*x = y */
+    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
+
+    /** get the number of rows in A */
+    virtual uint rows () const;
+
+    /** get the number of columns in A */
+    virtual uint cols () const;
+    
+    virtual double approxFrobNorm() const;
+    
+    virtual void setApproximationScheme(const int & _approxScheme);
+    
+    ImplicitKernelMatrix * getModel(const uint & idx) const;
+    inline int getNumberOfModels(){return matrices.size();};
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 ) {};
+    virtual void store ( std::ostream & os, int format = 0 ) const {};  
+    virtual void clear () {};
+    
+    void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels);
+
+};
+
+}
+
+#endif

+ 246 - 0
IKMNoise.cpp

@@ -0,0 +1,246 @@
+/** 
+* @file IKMNoise.cpp
+* @author Erik Rodner, Alexander Freytag
+* @brief Noise matrix (for model regularization) as an implicit kernel matrix (Implementation)
+* @date 02/14/2012
+
+*/
+#include <iostream>
+#include <limits>
+
+#include "IKMNoise.h"
+
+using namespace NICE;
+using namespace std;
+
+IKMNoise::IKMNoise()
+{
+  this->size = 0;
+  this->noise = 0.1;
+  this->optimizeNoise = false;
+  this->np = 0;
+  this->nn = 0;
+  this->verbose = false;
+}
+
+IKMNoise::IKMNoise( uint size, double noise, bool optimizeNoise )
+{
+  this->size = size;
+  this->noise = noise;
+  this->optimizeNoise = optimizeNoise;
+  this->np = 0;
+  this->nn = 0;
+  this->verbose = false;
+}
+
+IKMNoise::IKMNoise( const Vector & labels, double noise, bool optimizeNoise )
+{
+  this->size = labels.size();
+  this->noise = noise;
+  this->optimizeNoise = optimizeNoise;
+  this->labels = labels;
+  this->np = 0;
+  this->nn = 0;
+  this->verbose = false;
+  for ( uint i = 0 ; i < labels.size(); i++ )
+    if ( labels[i] == 1 ) 
+      this->np++;
+    else
+      this->nn++;
+    
+  if (verbose)
+  {
+    std::cerr << "IKMNoise np : " << np << " nn: " << nn << std::endl;
+  }
+}
+
+
+IKMNoise::~IKMNoise()
+{
+}
+
+
+void IKMNoise::getDiagonalElements ( Vector & diagonalElements ) const
+{
+  diagonalElements.resize( size );
+  if ( labels.size() == 0 ) {
+    diagonalElements.set( noise );
+  } else {
+    for ( uint i = 0 ; i < labels.size(); i++ )
+      if ( labels[i] == 1 ) {
+        diagonalElements[i] = 2*np*noise/size;
+      } else {
+        diagonalElements[i] = 2*nn*noise/size;
+      }
+  }
+}
+
+void IKMNoise::getFirstDiagonalElement ( double & diagonalElement ) const
+{
+  if ( labels.size() == 0 )
+  {
+    if (verbose)
+    {    
+      std::cerr << "IKMNoise::getFirstDiagonalElement  and labels.size() is zero" << std::endl;
+    }
+    diagonalElement = noise ;
+  }
+  else
+  {
+    if ( labels[0] == 1 )
+    {
+      if (verbose)
+      {          
+        std::cerr << "IKMNoise::getFirstDiagonalElement -- and first entry is +1" << std::endl;
+      }
+      diagonalElement = 2*np*noise/size;
+    } 
+    else
+    {
+      if (verbose)
+      {                
+        std::cerr << "IKMNoise::getFirstDiagonalElement -- and first entry is -1" << std::endl;
+      }
+      diagonalElement = 2*nn*noise/size;
+    }
+  }
+}
+
+
+uint IKMNoise::getNumParameters() const
+{
+  return optimizeNoise ? 1 : 0;
+}
+    
+void IKMNoise::getParameters(Vector & parameters) const
+{
+  if ( optimizeNoise )
+  {
+    parameters.resize(1);
+    parameters[0] = log(noise);
+  }
+}
+
+void IKMNoise::setParameters(const Vector & parameters)
+{
+  if ( optimizeNoise )
+  {
+    noise = exp(parameters[0]);
+  }
+}
+
+bool IKMNoise::outOfBounds(const Vector & parameters) const
+{
+  // we do not have any restrictions
+  return false;
+}
+
+Vector IKMNoise::getParameterLowerBounds() const
+{
+  Vector lB;
+  if ( optimizeNoise ) {
+    lB.resize(1);
+    lB[0] = -std::numeric_limits<double>::max();
+  }
+  return lB;
+}
+
+Vector IKMNoise::getParameterUpperBounds() const
+{
+  Vector uB;
+  if ( optimizeNoise ) {
+    uB.resize(1);
+    uB[0] = -std::numeric_limits<double>::max();
+  }
+  return uB;
+}
+
+void IKMNoise::multiply (NICE::Vector & y, const NICE::Vector & x) const
+{
+  y.resize( rows() );
+  
+  if ( labels.size() == 0 )
+  {
+    y = noise * x;
+  } else {
+    for ( uint i = 0 ; i < labels.size(); i++ )
+      if ( labels[i] == 1 ) {
+        y[i] = 2*np*noise/size * x[i];
+      } else {
+        y[i] = 2*nn*noise/size * x[i];
+      }
+  }
+}
+
+uint IKMNoise::rows () const
+{
+  return cols();
+}
+
+uint IKMNoise::cols () const
+{
+  return size;
+}
+
+double IKMNoise::approxFrobNorm() const
+{
+  NICE::Vector diagEl;
+  this->getDiagonalElements ( diagEl);
+  return diagEl.normL2();
+}
+
+// ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
+
+void IKMNoise::restore ( std::istream & is, int format )
+{
+  if (is.good())
+  {
+    is.precision (std::numeric_limits<double>::digits10 + 1); 
+    
+    std::string tmp;
+    is >> tmp; //class name
+    
+    is >> tmp;
+    is >> size;
+    
+    is >> tmp;
+    is >> noise;
+    
+    is >> tmp;
+    is >> optimizeNoise;
+    
+    is >> tmp;
+    is >> np;
+    
+    is >> tmp;
+    is >> nn;
+    
+    is >> tmp;
+    is >> labels;
+  }
+}
+
+void IKMNoise::store ( std::ostream & os, int format ) const
+{
+  os << "IKMNoise" << std::endl;
+  os << "size: " << size << std::endl;
+  os << "noise: " << noise << std::endl;
+  os << "optimizeNoise: " <<  optimizeNoise << std::endl;
+  os << "np: " << np  << std::endl;
+  os << "nn: " << nn << std::endl;
+  os << "labels: " << labels << std::endl;
+}
+
+// ----------------- INCREMENTAL LEARNING METHODS -----------------------
+void IKMNoise::addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels)
+{
+  ++size;
+  if ( (np != 0) && (nn != 0) )
+  {
+    labels = binLabels;
+    if (binLabels[binLabels.size()-1] == 1)
+      ++np;
+    else
+      ++nn;
+  }
+}

+ 84 - 0
IKMNoise.h

@@ -0,0 +1,84 @@
+/** 
+* @file IKMNoise.h
+* @author Erik Rodner, Alexander Freytag
+* @brief Noise matrix (for model regularization) as an implicit kernel matrix (Interface)
+* @date 02/14/2012
+
+*/
+#ifndef _NICE_IKMNOISEINCLUDE
+#define _NICE_IKMNOISEINCLUDE
+
+#include <vector>
+#include "ImplicitKernelMatrix.h"
+
+namespace NICE {
+
+ /** 
+ * @class IKMNoise
+ * @brief Noise matrix (for model regularization) as an implicit kernel matrix
+ * @author Erik Rodner, Alexander Freytag
+ */
+
+class IKMNoise : public ImplicitKernelMatrix
+{
+
+  protected:
+    Vector labels;
+
+    uint size;
+
+    double noise;
+
+    bool optimizeNoise;
+
+    uint np;
+    uint nn;
+    
+    /** give some debug outputs. There is not set function so far... */
+    bool verbose;
+  
+  public:
+
+    IKMNoise();
+    
+    IKMNoise( uint size, double noise, bool optimizeNoise );
+    
+    IKMNoise( const Vector & labels, double noise, bool optimizeNoise );
+      
+    virtual ~IKMNoise();
+
+    virtual void getDiagonalElements ( Vector & diagonalElements ) const;
+    virtual void getFirstDiagonalElement ( double & diagonalElement ) const;
+    virtual uint getNumParameters() const;
+    
+    virtual void getParameters(Vector & parameters) const;
+    virtual void setParameters(const Vector & parameters);
+    virtual bool outOfBounds(const Vector & parameters) const;
+
+    virtual Vector getParameterLowerBounds() const;
+    virtual Vector getParameterUpperBounds() const;
+
+    /** multiply with a vector: A*x = y */
+    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
+
+    /** get the number of rows in A */
+    virtual uint rows () const;
+
+    /** get the number of columns in A */
+    virtual uint cols () const;
+    
+    virtual double approxFrobNorm() const;
+    virtual void setApproximationScheme(const int & _approxScheme) {};
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 );
+    virtual void store ( std::ostream & os, int format = 0 ) const; 
+    virtual void clear () {};
+    
+    void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels);
+
+};
+
+}
+
+#endif

+ 21 - 0
ImplicitKernelMatrix.cpp

@@ -0,0 +1,21 @@
+/** 
+* @file ImplicitKernelMatrix.cpp
+* @brief An implicit kernel matrix, allowing for fast multiplication with arbitrary vectors (Implementation - abstract)
+* @author Erik Rodner, Alexander Freytag
+* @date 02/14/2012
+*/
+#include <iostream>
+
+#include "ImplicitKernelMatrix.h"
+
+using namespace NICE;
+
+
+ImplicitKernelMatrix::ImplicitKernelMatrix()
+{
+}
+
+ImplicitKernelMatrix::~ImplicitKernelMatrix()
+{
+}
+

+ 66 - 0
ImplicitKernelMatrix.h

@@ -0,0 +1,66 @@
+/** 
+* @file ImplicitKernelMatrix.h
+* @author Erik Rodner, Alexander Freytag
+* @brief An implicit kernel matrix, allowing for fast multiplication with arbitrary vectors (Interface)
+* @date 02/14/2012
+
+*/
+#ifndef _NICE_IMPLICITKERNELMATRIXINCLUDE
+#define _NICE_IMPLICITKERNELMATRIXINCLUDE
+
+#include <iostream>
+
+#include <core/algebra/GenericMatrix.h>
+
+#include "core/basics/Persistent.h"
+
+namespace NICE {
+  
+/** @class ImplicitKernelMatrix
+ * @brief An implicit kernel matrix, allowing for fast multiplication with arbitrary vectors
+ * @author Erik Rodner, Alexander Freytag
+ * @date 02/14/2012
+ */
+
+class ImplicitKernelMatrix : public GenericMatrix, NICE::Persistent
+{
+
+  protected:
+
+  public:
+
+    /** simple constructor */
+    ImplicitKernelMatrix();
+      
+    /** simple destructor */
+    virtual ~ImplicitKernelMatrix();
+
+    //get set methods
+    virtual void getDiagonalElements ( Vector & diagonalElements ) const = 0;
+    virtual void getFirstDiagonalElement ( double & diagonalElement ) const = 0;
+
+    virtual uint getNumParameters() const = 0;
+
+    virtual void getParameters(Vector & parameters) const = 0;
+    virtual void setParameters(const Vector & parameters) = 0;
+    virtual bool outOfBounds(const Vector & parameters) const = 0;
+
+    virtual Vector getParameterLowerBounds() const = 0;
+    virtual Vector getParameterUpperBounds() const = 0;
+    
+    virtual double approxFrobNorm() const = 0;
+    virtual void setApproximationScheme(const int & _approxScheme) = 0;
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 ) = 0;
+    virtual void store ( std::ostream & os, int format = 0 )  const = 0;
+    virtual void clear () = 0;
+    
+    //high order methods
+    virtual void addExample(const NICE::SparseVector & x, const NICE::Vector & binLabels) = 0;
+    virtual void  multiply (NICE::Vector &y, const NICE::Vector &x) const = 0;
+};
+
+}
+
+#endif

+ 165 - 0
License

@@ -0,0 +1,165 @@
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.

+ 8 - 0
Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 39 - 0
Quantization.cpp

@@ -0,0 +1,39 @@
+/** 
+* @file Quantization.cpp
+* @brief Quantization of one-dimensional signals with a standard range of [0,1] (Implementation)
+* @author Erik Rodner
+* @date 01/09/2012
+
+*/
+#include <iostream>
+
+#include "Quantization.h"
+
+using namespace NICE;
+
+
+Quantization::Quantization( uint numBins )
+{
+  this->numBins = numBins;
+}
+
+Quantization::~Quantization()
+{
+}
+
+uint Quantization::size() const
+{
+  return numBins;
+}
+  
+double Quantization::getPrototype (uint bin) const
+{
+  return bin / (double)(numBins-1);
+}
+  
+uint Quantization::quantize (double value) const
+{
+  if ( value <= 0.0 ) return 0;
+  else if ( value >= 1.0 ) return numBins-1;
+  else return (uint)( value * (numBins-1) + 0.5 );
+}

+ 69 - 0
Quantization.h

@@ -0,0 +1,69 @@
+/** 
+* @file Quantization.h
+* @brief Quantization of one-dimensional signals with a standard range of [0,1] (Interface)
+* @author Erik Rodner
+* @date 01/09/2012
+*/
+#ifndef _NICE_QUANTIZATIONINCLUDE
+#define _NICE_QUANTIZATIONINCLUDE
+
+#include <core/basics/types.h>
+
+namespace NICE {
+  
+ /** 
+ * @class Quantization
+ * @brief Quantization of one-dimensional signals with a standard range of [0,1]
+ * @author Erik Rodner
+ */
+ 
+class Quantization
+{
+
+  /** TODO
+   * The current implementation only provides uniform quantization. We could extend this
+   * by giving a ParameterizedFunction object to the constructor, which would allow us to inverse transform function values
+   * before performing the binning.
+   */
+
+  protected:
+
+  uint numBins;
+
+  public:
+
+  /** simple constructor */
+  Quantization( uint numBins );
+    
+  /** simple destructor */
+  virtual ~Quantization();
+
+  /**
+  * @brief get the size of the vocabulary, i.e. the number of bins
+  */
+  virtual uint size() const;
+
+  /**
+  * @brief get specific word or prototype element of the quantization
+  *
+  * @param bin the index of the bin
+  *
+  * @return value of the prototype
+  */
+  virtual double getPrototype (uint bin) const;
+
+  /**
+  * @brief Determine for a given signal value the bin in the vocabulary. This is not the corresponding prototype, which 
+  * has to be requested with getPrototype afterwards
+  *
+  * @param value signal function value
+  *
+  * @return index of the bin entry corresponding to the given signal value
+  */
+  virtual uint quantize (double value) const;
+     
+};
+
+}
+
+#endif

+ 18 - 0
README

@@ -0,0 +1,18 @@
+This code requires the NICE-core library (available on github) and implements the ideas presented in the following papers:
+
+Alexander Freytag and Erik Rodner and Paul Bodesheim and Joachim Denzler
+"Rapid Uncertainty Computation with Gaussian Processes and Histogram Intersection Kernels"
+Asian Conference on Computer Vision (ACCV). 2012.
+
+Erik Rodner and Alexander Freytag and Paul Bodesheim and Joachim Denzler
+"Large-Scale Gaussian Process Classification with Flexible Adaptive Histogram Kernels"
+European Conference on Computer Vision (ECCV). 2012. pages 85--98
+
+If you use this software in your research, you have to cite at least one of the two papers.
+
+Problems, questions, and general feedback should be addressed to:
+
+Alexander.Freytag <at> uni-jena.de
+Erik.Rodner <at> uni-jena.de
+
+Copyright (LGPL) Alexander Freytag and Erik Rodner

+ 666 - 0
SortedVectorSparse.h

@@ -0,0 +1,666 @@
+/**
+* @file SortedVectorSparse.h
+* @brief A sparse vector that is always sorted and keeps index mapping! (Interface and Implementation)
+* @author Alexander Freytag
+* @date 10-01-2012 (dd-mm-yyyy)
+*/
+#ifndef SORTEDVECTORSPARSEINCLUDE
+#define SORTEDVECTORSPARSEINCLUDE
+
+#include <vector>
+#include <cmath>
+#include <map>
+#include <algorithm>
+#include <iostream>
+#include <limits>
+
+#include <core/basics/Exception.h>
+#include <core/vector/VectorT.h>
+#include <core/vector/SparseVectorT.h>
+#include "core/basics/Persistent.h"
+
+namespace NICE {
+
+ /** 
+ * @class SortedVectorSparse
+ * @brief A sparse vector that is always sorted and keeps index mapping!
+ * @author Alexander Freytag
+ */  
+  
+template<class T> class SortedVectorSparse : NICE::Persistent{
+
+  public:
+    //! original index, transformed feature value
+    typedef typename std::pair< int, T > dataelement;
+    typedef typename std::multimap< T, dataelement >::iterator elementpointer;
+    typedef typename std::multimap< T, dataelement >::const_iterator const_elementpointer;
+    typedef typename std::multimap< T, dataelement >::const_reverse_iterator const_reverse_elementpointer;
+
+  protected:
+    T tolerance;
+    int n;
+    
+    //! verbose flag for output after calling the restore-function
+    bool verbose;
+
+    //! mapping of the original feature value to the index and the transformed feature value
+    std::multimap< T, dataelement > nzData;
+
+    //! non zero index mapping, original index -> pointer to the element
+    std::map<int, elementpointer > nonzero_indices;
+
+  public:
+    /**
+    * @brief default constructor
+    * @author Alexander Freytag
+    * @date 10-01-2012 (dd-mm-yyyy)
+    */
+    SortedVectorSparse() {
+      n = 0;
+      tolerance = ( T ) 10e-10;
+      verbose = false;
+    }
+
+    /**
+    * @brief standard constructor
+    * @author Alexander Freytag
+    * @date 10-01-2012 (dd-mm-yyyy)
+    */
+    SortedVectorSparse ( const SortedVectorSparse<T> &v ) : nzData ( v.nzData )
+    {
+      this->tolerance = v.getTolerance();
+      this->n = v.getN();
+      this->nonzero_indices = v.nonzero_indices;
+      this->verbose = v.getVerbose();      
+    }
+
+    SortedVectorSparse ( const std::vector<T> &v, const T & _tolerance )
+    {
+      tolerance = _tolerance;
+      n = 0;
+      insert ( v );
+      verbose = false;
+    }
+
+    /**
+    * @brief standard destructor
+    * @author Alexander Freytag
+    * @date 10-01-2012 (dd-mm-yyyy)
+    */
+    ~SortedVectorSparse() {}
+
+    T getTolerance() const {
+      return tolerance;
+    };
+    int getN() const {
+      return n;
+    };
+    void setTolerance ( const T & _tolerance ) {
+      if ( _tolerance < 0 )
+        this->tolerance = -_tolerance;
+      else
+        this->tolerance = _tolerance;
+    };
+
+
+    void setN ( const int & _n ) {
+      n = _n;
+    };
+    int getZeros() const {
+      //std::cerr << "n in getZeros: " << n << std::endl;
+      return n - nzData.size();
+    };
+    int getNonZeros() const {
+      return nzData.size();
+    };
+
+    /**
+    * @brief add an element to the vector. If feature number is set, we do not check, wether this feature was already available or not!
+    *
+    * @param newElement element which will be added
+    * @param featureNumber the index of the new element (optional)
+    */
+    void insert ( const T & newElement, const int & featureNumber = -1 )
+    {
+      int newIndex ( featureNumber);
+      if ( featureNumber < 0)
+        newIndex = n;      
+      
+      if ( !checkSparsity ( newElement ) )
+      {
+        // element is not sparse
+        std::pair<T, dataelement > p ( newElement, dataelement ( newIndex, newElement ) );
+        elementpointer it = nzData.insert ( p );
+        nonzero_indices.insert ( std::pair<int, elementpointer> ( newIndex, it ) );
+      }
+      n++;
+    }
+  
+    /**
+    * @brief add an element to the vector. If feature number is set, we do not check, wether this feature was already available or not!
+    *
+    * @param newElement element which will be added
+    * @param newElementTransformed transformed feature value
+    * @param featureNumber the index of the new element (optional)
+    */
+    void insert ( const T & newElement, const T & newElementTransformed, const int & featureNumber = -1 )
+    {
+      int newIndex ( featureNumber);
+      if ( featureNumber < 0)
+        newIndex = n;
+      
+      if ( !checkSparsity ( newElement ) )
+      {
+        // element is not sparse
+        
+        std::pair<T, dataelement > p ( newElement, dataelement ( newIndex, newElementTransformed ) );
+        elementpointer it = nzData.insert ( p );
+        nonzero_indices.insert ( std::pair<int, elementpointer> ( newIndex, it ) );
+      }
+      n++;
+    }
+
+    /**
+    * @brief add a vector of new elements to the vector 
+    *
+    * @param v new element which will be added
+    */
+    void insert ( const std::vector<T> &v )
+    {
+      for ( uint i = 0; i < v.size(); i++ )
+        insert ( v[i] );
+    }
+    /**
+    * @brief add a vector of new elements to the vector. It doesn't make much sense to have such a function, but who knows...
+    *
+    * @param v Vector of new Elements
+    */
+    void insert ( const NICE::SparseVector* v )
+    {
+      for (NICE::SparseVector::const_iterator vIt = v->begin(); vIt != v->end(); vIt++)
+      {
+        insert((T)vIt->second);
+      }
+    }
+    
+    /**
+    * @brief non-efficient access to a specific non-zero element
+    *
+    * @param an index of a non-zero element (not the original index!)
+    *
+    * @return value of the element (not the original value)
+    */
+    T accessNonZero ( int a ) const
+    {
+      const_elementpointer it = nzData.begin();
+      advance ( it, a );
+      dataelement de = it->second;
+
+      return de.second;
+    };
+
+    /**
+    * @brief access the transformed value
+    *
+    * @param a original index of the element
+    *
+    * @return value of the element
+    */
+    inline T access ( int a ) const
+    {
+      typename std::map<int, elementpointer>::const_iterator i = nonzero_indices.find ( a );
+      if ( i != nonzero_indices.end() ) {
+        // accessing a nonzero element
+        const elementpointer & it = i->second;
+        const dataelement & de = it->second;
+        // we access the transformed value here and not the
+        // original one
+        return de.second;
+      } else {
+        // the element is zero
+        return ( T ) 0;
+      }
+    }
+
+    /**
+    * @brief access the original value
+    *
+    * @param a original index of the element
+    *
+    * @return value of the element
+    */
+    inline T accessOriginal ( int a ) const
+    {
+      typename std::map<int, elementpointer>::const_iterator i = nonzero_indices.find ( a );
+      if ( i != nonzero_indices.end() ) {
+        // accessing a nonzero element
+        elementpointer it = i->second;
+        return it->first;
+      } else {
+        // the element is zero
+        return ( T ) 0;
+      }
+    }
+
+    std::multimap< T, dataelement > & nonzeroElements()
+    {
+      return nzData;
+    }
+
+    const std::multimap< T, dataelement > & nonzeroElements() const
+    {
+      return nzData;
+    }
+
+    const std::map< int, elementpointer> & nonzeroIndices() const
+    {
+      return nonzero_indices;
+    }
+
+    /**
+    * @brief check whether the elment is sparse with the given tolerance
+    *
+    * @param element
+    *
+    * @return
+    */
+    bool checkSparsity ( T element )
+    {
+      if ( element > tolerance )
+        return false;
+      if ( element < -tolerance )
+        return false;
+
+      return true;
+    }
+
+    /**
+    * @brief set a specific element. A boolean flag controls
+    * whether we set the transformed value or the original value. Setting the original
+    * value (default case) is highly inefficient. Setting the transformed value is appropiate
+    * when applying an order preserving transformation.
+    *
+    * @param a proper index
+    * @param newElement element value
+    */
+    void set ( int a, T newElement, bool setTransformedValue = false )
+    {
+      if ( a >= n || a < 0 )
+        fthrow ( Exception, "SortedVectorSparse::set(): out of bounds" );
+
+      typename std::map<int, elementpointer>::iterator i = nonzero_indices.find ( a );
+
+      // check whether the element was previously non-sparse
+      if ( i != nonzero_indices.end() ) {
+        elementpointer it = i->second;
+
+        if ( checkSparsity ( newElement ) ) {
+          // old: non-sparse, new:sparse
+          // delete the element
+          nzData.erase ( it );
+          nonzero_indices.erase ( i );
+        } else {
+          // old: non-sparse, new: non-sparse
+          // The following statement would be nice, but it is not allowed.
+          // This is also the reason why we implemented the transformed feature value ability.
+          // it->first = newElement;
+          if ( setTransformedValue ) {
+            // set the transformed value
+            it->second.second = newElement;
+          } else {
+            // the following is a weird tricky and expensive
+            set ( a, 0.0 );
+            //std::cerr << "Element after step 1: " << access(a) << std::endl;
+            set ( a, newElement );
+          }
+          //std::cerr << "Element after step 2: " << access(a) << std::endl;
+        }
+      } else {
+        // the element was previously sparse
+        if ( !checkSparsity ( newElement ) )
+        {
+          //std::cerr << "changing a zero value to a non-zero value " << newElement << std::endl;
+          // old element is not sparse
+          dataelement de ( a, newElement );
+          std::pair<T, dataelement> p ( newElement, de );
+          elementpointer it = nzData.insert ( p );
+          nonzero_indices.insert ( std::pair<int, elementpointer> ( a, it ) );
+        }
+      }
+    }
+
+    SortedVectorSparse<T> operator= ( const SortedVectorSparse<T> & F )
+    {
+      this->tolerance = F.getTolerance();
+      this->n = F.getN();
+      this->nonzero_indices = F.nonzero_indices;
+      this->nzData = F.nzData;
+
+      return *this;
+    }
+
+    /**
+    * @brief Computes the permutation of the non-zero elements for a proper (ascending) ordering
+    * @author Alexander Freytag
+    * @date 10-01-2012 (dd-mm-yyyy)
+    */
+    std::vector<int> getPermutationNonZero() const
+    {
+      std::vector<int> rv ( nzData.size() );
+      int idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      {
+        rv[idx] = it->second.first;
+      }
+      return rv;
+    };
+
+    /**
+    * @brief Computes the permutation of the non-zero elements for a proper (ascending) ordering
+    * @author Alexander Freytag
+    * @date 23-01-2012 (dd-mm-yyyy)
+    * @return  std::map<int, int>, with the absolute feature numbers as key element and their permutation as second
+    */
+    std::map<int, int> getPermutationNonZeroReal() const
+    {
+      std::map<int, int> rv;
+//         int idx = 0;
+//         for (typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++)
+//         {
+//           //inserts the real feature number as key
+//           //TODO DO not insert the feature, but its original index, which is stored somewhere else!
+//           rv.insert(std::pair<int,int>(it->second.first,it->second.second));
+//           std::cerr << "inserting: " << it->second.first << " - " << it->second.second << std::endl;
+//           //if we want to use the relative feature number (realtive to non-zero elements), use the following
+//           //rv.insert(std::pair<int,int>(idx,it->second.first));
+//         }
+
+      int nrZeros ( this->getZeros() );
+
+      int idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      {
+        //inserts the real feature number as key
+        rv.insert ( std::pair<int, int> ( nrZeros + idx, it->second.first ) );
+      }
+      return rv;
+    };
+
+    /**
+    * @brief Computes the permutation of the non-zero elements for a proper (ascending) ordering
+    * @author Alexander Freytag
+    * @date 23-01-2012 (dd-mm-yyyy)
+    * @return  std::map<int, int>, with the relative feature numbers as key element  (realtive to non-zero elements) and their permutation as second
+    */
+    std::map<int, int> getPermutationNonZeroRelative() const
+    {
+      std::map<int, int> rv;
+      int idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      {
+        //inserts the real feature number as key
+        //rv.insert(std::pair<int,int>(it->second.first,it->second.first));
+        //if we want to use the relative feature number (realtive to non-zero elements), use the following
+        rv.insert ( std::pair<int, int> ( idx, it->second.first ) );
+      }
+      return rv;
+    };
+
+
+
+    /**
+    * @brief Computes the permutation of the elements for a proper (ascending) ordering
+    */
+    std::vector<int> getPermutation() const
+    {
+      std::vector<int> rv ( n );
+
+      int idx = n - 1;
+      typename std::multimap<T, dataelement>::const_reverse_iterator it ;
+      for ( it = nzData.rbegin(); it != nzData.rend() && ( it->first > tolerance ); it++, idx-- )
+      {
+        rv[ idx ] = it->second.first;
+      }
+
+      for ( int i = n - 1 ; i >= 0 ; i-- )
+        if ( nonzero_indices.find ( i ) == nonzero_indices.end() )
+        {
+          rv[ idx ] = i;
+          idx--;
+        }
+
+      for ( ; it != nzData.rend(); it++, idx-- )
+      {
+        rv[ idx ] = it->second.first;
+      }
+
+      return rv;
+    };
+
+    /**
+    * @brief Orders the elements of the vector in ascending order and stores them in a seperate vector
+    * @author Alexander Freytag
+    * @date 10-01-2012 (dd-mm-yyyy)
+    */
+    std::vector<std::pair<int, T> > getOrderInSeparateVector() const
+    {
+      std::vector<std::pair<int, T> > rv;
+      rv.resize ( nzData.size() );
+      uint idx = 0;
+      for ( typename std::multimap<T, dataelement>::const_iterator it = nzData.begin(); it != nzData.end(); it++, idx++ )
+      {
+        rv[idx].first = it->second.first;
+        rv[idx].second = it->second.second;
+      }
+      return rv;
+    };
+
+    /**
+    * @brief get the median of the vector including zero elements
+    *
+    * @return return the median value
+    */
+    T getMedian() const
+    {
+      if ( n % 2 == 1 ) 
+      {
+        // even number of training examples
+        int medianPosition = nzData.size() - (int)(n/2);
+        if ( medianPosition < 0 ) 
+          return 0.0;
+        else
+          return accessNonZero(medianPosition); 
+      } else {
+        // odd number of training examples
+        int medianA = nzData.size() - (int)(n/2);
+        int medianB = nzData.size() - (int)((n+1)/2);
+        T a = 0.0;
+        T b = 0.0;
+        if ( medianA >= 0)
+          a = accessNonZero( medianA );
+        if ( medianB >= 0)
+          b = accessNonZero( medianB );
+        return (a + b)/2.0;
+      }
+    }
+    
+    /**
+    * @brief get the maximum of the vector including zero elements
+    *
+    * @return return the median value
+    */
+    T getMax() const
+    {
+      if (nzData.size() > 0)
+        return accessNonZero(nzData.size()-1);
+      return (T) 0.0;
+    }
+    
+    /**
+    * @brief get the minimum of the vector including zero elements
+    *
+    * @return return the median value
+    */
+    T getMin() const
+    {
+      if (nzData.size() < (uint) n)
+        return (T) 0.0;
+      return accessNonZero(0);
+    }
+    
+    
+
+    /**
+    * @brief get median feature values for each class seperately, we do not apply averaging when the number
+    * of examples is even
+    *
+    * @param classMedians resulting sparse vector, i.e. classMedians[classno] is the median value 
+    * of every example of class classno
+    * @param labels vector of labels with the same size n as the current vector
+    * @param elementCounts this vector contains the number of examples for each class, compute this using the labels
+    * for efficiency reasons
+    */
+    void getClassMedians ( SparseVector & classMedians, const Vector & labels, const Vector & elementCounts ) const
+    {
+      if ( labels.size() != n )
+        fthrow(Exception, "Label vector has to have the same size as the SortedVectorSparse structure");
+      Vector c ( elementCounts );
+      for ( uint i = 0 ; i < c.size(); i++ )
+        c[i] /= 2;
+      // now we have in c the position of the current median
+      typename std::multimap<T, dataelement>::const_reverse_iterator it;
+
+      for ( it = nzData.rbegin(); it != nzData.rend(); it++ )
+      {
+        const dataelement & de = it->second;
+        int origIndex = de.first;
+        double value = de.second;
+        int classno = labels[origIndex];
+        c[ classno ]--;
+        if ( c[classno] == 0 )
+          classMedians[classno] = value;
+      }
+
+      // remaining medians are zero!
+      for ( uint classno = 0 ; classno < c.size(); classno++ )
+        if ( c[classno] > 0 )
+          classMedians[classno] = 0.0;
+    }
+
+    /**
+    * @brief Print the content of the sparse vector
+    * @author Alexander Freytag
+    * @date 12-01-2012 (dd-mm-yyyy)
+    */
+    void print(std::ostream & os) const
+    {
+      typename std::multimap<T, dataelement>::const_iterator it = nzData.begin();
+
+      if (os.good())
+      {
+        for ( ; it != nzData.end() ; it++ )
+        {
+          if ( it->first < ( T ) 0.0 )
+            os << it->first << " ";
+          else
+            break;
+        }
+
+        for ( int i = 0; i < getZeros(); i++ )
+        {
+          os << ( T ) 0.0 << " " ;
+        }
+
+        for ( ; ( it != nzData.end() ); it++ )
+        {
+          os << it->second.second << " ";
+        }
+        os << std::endl;
+      }
+    }
+    
+    /** set verbose flag used for restore-functionality*/
+    void setVerbose( const bool & _verbose) { verbose = _verbose;};
+    bool getVerbose( ) const { return verbose;};
+    
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 )
+    {
+      if (is.good())
+      {
+        is.precision (std::numeric_limits<double>::digits10 + 1);
+        
+        std::string tmp;
+        is >> tmp; //class name
+        
+        is >> tmp;
+        is >> tolerance;
+               
+        is >> tmp;
+        is >> n;
+               
+        is >> tmp;
+        int size;
+        is >> size;
+        
+        is >> tmp;
+        
+        T origValue;
+        int origIndex;
+        T transformedValue;
+        
+        nzData.clear();
+        for (int i = 0; i < size; i++)
+        {
+         
+          is >> origValue;
+          is >> origIndex;
+          is >> transformedValue;
+        
+          std::pair<T, dataelement > p ( origValue, dataelement ( origIndex, transformedValue ) );
+          elementpointer it = nzData.insert ( p);
+          nonzero_indices.insert ( std::pair<int, elementpointer> ( origIndex, it ) );
+        }
+        
+        if (verbose)
+        {
+          std::cerr << "SortedVectorSparse::restore" << std::endl;      
+          std::cerr << "tolerance: " << tolerance << std::endl;          
+          std::cerr << "n: " << n << std::endl;          
+          std::cerr << "size: " << size << std::endl;          
+        }
+      }
+      else
+      {
+        std::cerr << "SortedVectorSparse::restore -- InStream not initialized - restoring not possible!" << std::endl;
+      }      
+    };
+    virtual void store ( std::ostream & os, int format = 0 ) const
+    {
+      if (os.good())
+      {
+        os.precision (std::numeric_limits<double>::digits10 + 1);
+        os << "SortedVectorSparse" << std::endl;
+        os << "tolerance: " << tolerance << std::endl;
+        os << "n: " << n << std::endl;
+        os << "nonZeros: " << nzData.size() << std::endl;
+        os << "underlying_data_(sorted)" << std::endl;
+        for (const_elementpointer elP = nzData.begin();  elP != nzData.end(); elP++)
+        {
+          os << elP->first << " " << elP->second.first << " " << elP->second.second << " ";
+        }
+        os << std::endl;
+      }
+      else
+      {
+        std::cerr << "SortedVectorSparse::store -- OutStream not initialized - storing not possible!" << std::endl;
+      }      
+    };    
+    
+    virtual void clear (){};
+};
+
+} // namespace
+
+#endif

+ 50 - 0
VectorSorter.cpp

@@ -0,0 +1,50 @@
+// /** 
+// * @file VectorSorter.cpp
+// * @brief Interface for a std::vector coming up with several methods for ordering the elements
+// * @author Alexander Freytag
+// * @date 12/07/2011
+// */
+// 
+// #include "VectorSorter.h"
+// 
+// using namespace NICE;
+// 
+// 		/** default constructor*/
+// // 		template<class T>
+// // 		VectorSorter<T>::VectorSorter() : std::vector<T>() {}
+// 		/** standard constructor*/
+// // 		template<class T>
+// // 		VectorSorter<T>::VectorSorter(const VectorSorter<T> &v) : std::vector<T>(v) {}
+// 		/** standard constructor*/
+// // 		template<class T>
+// // 		VectorSorter<T>::VectorSorter(const std::vector<T> &v) : std::vector<T>(v) {}
+// 		/** comparison of elements*/
+// 		template<class T>
+// 		bool VectorSorter<T>::operator()(int a, int b) { return (*this)[a] < (*this)[b];}
+// 		
+// 		template<class T>
+// 		std::vector<int> VectorSorter<T>::getOrderPermutation()
+// 		{
+// 			std::vector<int> rv((*this).size());
+// 			int idx = 0;
+// 			for (std::vector<int>::iterator i = rv.begin(); i != rv.end(); i++)
+// 			{
+// 				*i = idx++;
+// 			}
+// 			std::sort(rv.begin(), rv.end(), *this);
+// 			return rv;
+// 		}
+// 		
+// 		template<class T>
+// 		std::vector<T> VectorSorter<T>::getOrderInSeparateVector()
+// 		{
+// 			std::vector<T> rv((*this));
+// 			std::sort(rv.begin(), rv.end());
+// 			return rv;
+// 		}
+// 		
+// 		template<class T>
+// 		void VectorSorter<T>::getOrder()
+// 		{
+// 			std::sort((*this).begin(), (*this).end());
+// 		}

+ 101 - 0
VectorSorter.h

@@ -0,0 +1,101 @@
+/** 
+* @file VectorSorter.h
+* @brief Obsolete: A std::vector coming up with several methods for ordering the elements (Interface and Implementation)
+* @author Alexander Freytag
+* @date 12/07/2011
+*/
+#ifndef VECTORSORTERINCLUDE
+#define VECTORSORTERINCLUDE
+
+#include <vector>
+#include <algorithm>
+
+namespace NICE {
+  
+ /** 
+ * @class VectorSorter
+ * @brief Obsolete: A std::vector coming up with several methods for ordering the elements
+ * @author Alexander Freytag
+ */    
+  
+  template<class T> class VectorSorter : public std::vector<T>{
+    public:
+      /** 
+      * @brief default constructor
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      VectorSorter() : std::vector<T>() {}
+
+      /** 
+      * @brief standard constructor
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      VectorSorter(const VectorSorter<T> &v) : std::vector<T>(v) {}
+      
+      /** 
+      * @brief standard constructor
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      VectorSorter(const std::vector<T> &v) : std::vector<T>(v) {}
+
+      /** 
+      * @brief standard destructor
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      ~VectorSorter() {}
+      
+      /** 
+      * @brief comparison of elements
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      bool operator()(int a, int b) { return (*this)[a] < (*this)[b];};
+      
+      /** 
+      * @brief Computes the permutation of the elements for a proper (ascending) ordering
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      std::vector<int> getOrderPermutation()
+      {
+        std::vector<int> rv((*this).size());
+        int idx = 0;
+        for (std::vector<int>::iterator i = rv.begin(); i != rv.end(); i++)
+        {
+          *i = idx++;
+        }
+        std::sort(rv.begin(), rv.end(), *this);
+        return rv;
+      };
+      
+      /** 
+      * @brief Orders the elements of the vector in ascending order and stores them in a seperate vector
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      std::vector<T> getOrderInSeparateVector()
+      {
+        std::vector<T> rv((*this));
+        std::sort(rv.begin(), rv.end());
+        return rv;
+      };
+      
+      /** 
+      * @brief Orders the elements of the vector in ascending order
+      * @author Alexander Freytag
+      * @date 12/07/2011
+      */
+      void getOrder()
+      {
+        std::sort((*this).begin(), (*this).end());
+      };
+
+  };
+  
+} // namespace
+
+#endif

+ 33 - 0
algebra/LogDetApprox.h

@@ -0,0 +1,33 @@
+/** 
+* @file LogDetApprox.h
+* @brief LogDet Approximations (Interface - abstract)
+* @author Alexander Freytag
+* @date 05-01-2012 (dd-mm-yyyy)
+*/
+#ifndef LOGDETAPPROXINCLUDE
+#define LOGDETAPPROXINCLUDE
+
+#include "core/vector/MatrixT.h"
+
+namespace NICE {
+
+ /** 
+ * @class LogDetApprox
+ * @brief LogDet Approximations (abstract interface)
+ * @author Alexander Freytag
+ */
+ 
+	class LogDetApprox 
+	{
+
+		protected:
+			
+		public:
+			LogDetApprox(){};
+			~LogDetApprox(){};
+			
+			virtual double getLogDetApproximation(const NICE::Matrix & A)=0;
+	};
+} //namespace
+
+#endif

+ 168 - 0
algebra/LogDetApproxBaiAndGolub.cpp

@@ -0,0 +1,168 @@
+/** 
+ * @file LogDetApproxBaiAndGolub.cpp
+* @brief LogDet Approximation as stated by Bai and Golub ("Bounds for the Trace of the Inverse and the Determinant of Symmetric Positive Definite Matrices" in Annals of Numerical Mathematics) (Implementation)
+* @author Alexander Freytag
+* @date 05-01-2012 (dd-mm-yyyy)
+*/
+
+#include <limits>
+#include <cmath> 
+
+#include "gp-hik-core/algebra/LogDetApproxBaiAndGolub.h"
+#include "core/vector/VectorT.h"
+
+
+using namespace NICE;
+using namespace std;
+
+LogDetApproxBaiAndGolub::LogDetApproxBaiAndGolub()
+{
+  verbose = false;
+}
+
+LogDetApproxBaiAndGolub::~LogDetApproxBaiAndGolub()
+{
+}
+
+void LogDetApproxBaiAndGolub::setVerbose(const bool & _verbose)
+{
+  verbose = _verbose;
+}
+
+double LogDetApproxBaiAndGolub::getLogDetApproximation(const NICE::Matrix & A)
+{
+  //todo compute lowest and largest eigenvalue if suitable methods here!
+
+  double lambdaLowerBound(0.0);
+  NICE::Vector ones(A.rows(), 1.0);
+  NICE::Vector rightMultiplication;
+  rightMultiplication.multiply(A, ones);
+
+  //there is no nice way for multiplying two NICE::Vectors and returning a scalar :(
+  rightMultiplication *= ones;
+  //TODO For some reason I get an compilation error here: /home/luetz/code/fast-hik/nice/./core/vector/VectorT.tcc:539: undefined reference to `ippGetStatusString(IppStatus)'
+  //Therefor the nasty workaround :(
+  // 	double lambdaUpperBound(rightMultiplication.Sum());
+  double lambdaUpperBound(0);
+  for ( uint i = 0; i < rightMultiplication.size(); i++)
+  {
+    lambdaUpperBound += rightMultiplication[i];
+  }
+
+  //we could also call return getLogDetApproximation(A,lambdaUpperBound,lambdaLowerBound); - but this would need a second function call and we only have to write 3 extra lines of code
+  double logDetLowerBound(getLogDetApproximationLowerBound(A.trace(), A.squaredFrobeniusNorm(), lambdaLowerBound, A.rows()) );
+  double logDetUpperBound(getLogDetApproximationUpperBound(A.trace(), A.squaredFrobeniusNorm(), lambdaUpperBound, A.rows()) );
+
+  return (fabs(logDetLowerBound) + fabs(logDetUpperBound) ) / 2.0;
+}
+
+
+double LogDetApproxBaiAndGolub::getLogDetApproximation(const NICE::Matrix A, const double & lambdaUpperBound, const double & lambdaLowerBound)
+{
+  double logDetLowerBound(getLogDetApproximationLowerBound(A.trace(), A.squaredFrobeniusNorm(), lambdaLowerBound, A.rows()) );
+  double logDetUpperBound(getLogDetApproximationUpperBound(A.trace(), A.squaredFrobeniusNorm(), lambdaUpperBound, A.rows()) );
+
+  return (fabs(logDetLowerBound) + fabs(logDetUpperBound) ) / 2.0;
+}
+
+double LogDetApproxBaiAndGolub::getLogDetApproximation(const double & mu1, const double & mu2, const double & lambdaUpperBound, const double & lambdaLowerBound, const int & n )
+{
+  double logDetLowerBound(getLogDetApproximationLowerBound(mu1, mu2, lambdaLowerBound, n) );
+  double logDetUpperBound(getLogDetApproximationUpperBound(mu1, mu2, lambdaUpperBound, n) );
+
+  return (logDetLowerBound + logDetUpperBound ) / 2.0;
+}
+
+
+double LogDetApproxBaiAndGolub::getLogDetApproximationUpperBound(const double & mu1, const double & mu2, const double & lambdaUpperBound, const int & n )
+{
+  double tUpper(numeric_limits<double>::max());
+  if (  (lambdaUpperBound*n-mu1) != 0)
+    tUpper = (lambdaUpperBound*mu1 - mu2) / (lambdaUpperBound*n-mu1);
+
+  if ( tUpper < 1e-10 ) {
+    fthrow(Exception, "LogDetApproxBaiAndGolub::getLogDetApproximationLowerBound: tUpper < 0.0 !! " << mu1 << " " << mu2 << " " << n << " " << lambdaUpperBound );
+  }
+
+  // boundUpper = [log(beta) , log(tUpper)] * ([beta , tUpper; power(beta,2), power(tUpper,2)]^-1 * [mu1;mu2]);
+  //inversion of a 2x2-matrix can be done explicitely: A^{-1} = \frac{1}{ad-bc} \bmatrix{ d & -b \\ -c & a}
+  NICE::Matrix InnerMatrix(2,2);
+  InnerMatrix(0,0) = pow(tUpper,2);
+  InnerMatrix(0,1) = -tUpper;
+  InnerMatrix(1,0) = -pow(lambdaUpperBound,2);
+  InnerMatrix(1,1) = lambdaUpperBound;
+  InnerMatrix *= 1.0/(lambdaUpperBound*pow(tUpper,2) - tUpper*pow(lambdaUpperBound,2));
+
+  NICE::Vector leftSide(2);
+  leftSide(0) = log(lambdaUpperBound);
+  leftSide(1) = log(tUpper);
+
+  if (verbose)
+  {
+    cerr << "Left side: " << leftSide << endl;
+    cerr << InnerMatrix << endl;
+  }
+
+  NICE::Vector rightSide(2);
+  rightSide(0) = mu1;
+  rightSide(1) = mu2;
+
+  NICE::Vector rightMultiplication;
+  rightMultiplication.multiply(InnerMatrix,rightSide);
+
+  //there is no nice way for multiplying two NICE::Vectors and returning a scalar :(
+  leftSide *= rightMultiplication;
+
+  // 	return leftSide.Sum();
+  //TODO For some reason I get an compilation error here: /home/luetz/code/fast-hik/nice/./core/vector/VectorT.tcc:539: undefined reference to `ippGetStatusString(IppStatus)'
+  //Therefor the nasty workaround :(
+  double result(0.0);
+
+  for ( uint i = 0; i < leftSide.size(); i++)
+  {
+    result += leftSide[i];
+  }
+  return result;
+}
+
+
+double LogDetApproxBaiAndGolub::getLogDetApproximationLowerBound(const double & mu1, const double & mu2, const double & lambdaLowerBound, const int & n )
+{
+  double tLower(numeric_limits<double>::max());
+  if (  (lambdaLowerBound*n-mu1) != 0)
+    tLower = (lambdaLowerBound*mu1 - mu2) / (lambdaLowerBound*n-mu1);
+
+  // boundLower = [log(alpha) , log(tLower)] * ([alpha , tLower; power(alpha,2), power(tLower,2)]\[mu1;mu2]);
+  //inversion of a 2x2-matrix can be done explicitely: A^{-1} = \frac{1}{ad-bc} \bmatrix{ d & -b \\ -c & a}
+  NICE::Matrix InnerMatrix(2,2);
+  InnerMatrix(0,0) = pow(tLower,2);
+  InnerMatrix(0,1) = -tLower;
+  InnerMatrix(1,0) = -pow(lambdaLowerBound,2);
+  InnerMatrix(1,1) = lambdaLowerBound;
+  InnerMatrix *= 1.0/(lambdaLowerBound*pow(tLower,2) - tLower*pow(lambdaLowerBound,2));
+
+  NICE::Vector leftSide(2);
+  leftSide(0) = log(lambdaLowerBound);
+  leftSide(1) = log(tLower);
+
+  NICE::Vector rightSide(2);
+  rightSide(0) = mu1;
+  rightSide(1) = mu2;
+
+  NICE::Vector rightMultiplication;
+  rightMultiplication.multiply(InnerMatrix,rightSide);
+
+  return leftSide.scalarProduct( rightMultiplication );
+  
+//   //there is no nice way for multiplying two NICE::Vectors and returning a scalar so far
+//   leftSide *= rightMultiplication;
+// 
+//   //nasty workaround for leftSide.Sum(), which does not compile properly on all machines
+//   double result(0.0);
+// 
+//   for ( uint i = 0; i < leftSide.size(); i++)
+//   {
+//     result += leftSide[i];
+//   }
+//   return result;
+}

+ 116 - 0
algebra/LogDetApproxBaiAndGolub.h

@@ -0,0 +1,116 @@
+/** 
+* @file LogDetApproxBaiAndGolub.h
+* @brief LogDet Approximation as stated by Bai and Golub ("Bounds for the Trace of the Inverse and the Determinant of Symmetric Positive Definite Matrices" in Annals of Numerical Mathematics") (Interface)
+* @author Alexander Freytag
+* @date 05-01-2012 (dd-mm-yyyy)
+*/
+#ifndef LOGDETAPPROXBAIANDGOLUBINCLUDE
+#define LOGDETAPPROXBAIANDGOLUBINCLUDE
+
+#include "gp-hik-core/algebra/LogDetApprox.h"
+
+namespace NICE {
+
+ /** 
+ * @class LogDetApproxBaiAndGolub
+ * @brief LogDet Approximation as stated by Bai and Golub ("Bounds for the Trace of the Inverse and the Determinant of Symmetric Positive Definite Matrices" in Annals of Numerical Mathematics")
+ * @author Alexander Freytag
+ */
+ 
+  class LogDetApproxBaiAndGolub : public LogDetApprox
+  {
+
+    protected:
+      bool verbose;
+      
+    public:
+      
+      //------------------------------------------------------
+      // several constructors and destructors
+      //------------------------------------------------------
+      
+      /** 
+      * @brief Default constructor
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      */
+      LogDetApproxBaiAndGolub();
+      
+      /** 
+      * @brief Default destructor
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      */
+      ~LogDetApproxBaiAndGolub();
+      
+      //------------------------------------------------------
+      // get and set methods
+      //------------------------------------------------------      
+      void setVerbose(const bool & _verbose);
+      
+      //------------------------------------------------------
+      // high level methods
+      //------------------------------------------------------
+      
+      /** 
+      * @brief  Compute an approximation for the logDet using Bai and Golubs paper
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      * @pre A has to be symmetric and positive definite
+      * @param A symmetric positive definite matrix
+      * @return approximated logDet of A, computed by taking (LogDetUpperBound+LogDetLowerBound)/2
+      */
+      virtual double getLogDetApproximation(const NICE::Matrix & A);
+      
+      /** 
+      * @brief  Compute an approximation for the logDet using Bai and Golubs paper
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      * @pre A has to be symmetric and positive definite
+      * @param A symmetric positive definite matrix
+      * @param lambdaUpperBound guaranteed upper bound on the eigenvalues of A
+      * @param lambdaLowerBound guaranteed lower bound on the eignvalues of A
+      * @return approximated logDet of A, computed by taking (LogDetUpperBound+LogDetLowerBound)/2
+      */
+      double getLogDetApproximation(const NICE::Matrix A, const double & lambdaUpperBound, const double & lambdaLowerBound);
+      
+      /** 
+      * @brief  Compute an approximation for the logDet using Bai and Golubs paper
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      * @param mu1 ideally the trace of matrix A
+      * @param mu2 ideally the frobenius norm of matrix A
+      * @param lambdaUpperBound guaranteed upper bound on the eigenvalues of A
+      * @param lambdaLowerBound guaranteed lower bound on the eignvalues of A
+      * @param n number of rows in A (equals number of training examples used to compute the matrix, if A is a kernel matrix)
+      * @return approximated logDet of A, computed by taking (LogDetUpperBound+LogDetLowerBound)/2
+      */
+      double getLogDetApproximation(const double & mu1, const double & mu2, const double & lambdaUpperBound, const double & lambdaLowerBound, const int & n );
+      
+      /** 
+      * @brief  Compute an upper bound on the logDet using Bai and Golubs paper
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      * @param mu1 ideally the trace of matrix A
+      * @param mu2 ideally the frobenius norm of matrix A
+      * @param lambdaUpperBound the guaranteed upper bound on the eigenvalues of A
+      * @param n number of rows in A (equals number of training examples used to compute the matrix, if A is a kernel matrix)
+      * @return guaranteed upper bound on the log det of A, if the inputs are correctly computed
+      */
+      double getLogDetApproximationUpperBound(const double & mu1, const double & mu2, const double & lambdaUpperBound, const int & n );
+      
+      /** 
+      * @brief  Compute a lower bound on the logDet using Bai and Golubs paper
+      * @author Alexander Freytag
+      * @date 05-01-2012 (dd-mm-yyyy)
+      * @param mu1 ideally the trace of matrix A
+      * @param mu2 ideally the frobenius norm of matrix A
+      * @param lambdaLowerBound the guaranteed lower bound on the eigenvalues of A
+      * @param n number of rows in A (equals number of training examples used to compute the matrix, if A is a kernel matrix)
+      * @return guaranteed lower bound on the log det of A, if the inputs are correctly computed
+      */
+      double getLogDetApproximationLowerBound(const double & mu1, const double & mu2, const double & lambdaLowerBound, const int & n );
+  };
+} //namespace
+
+#endif

+ 8 - 0
algebra/Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
algebra/Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 148 - 0
configs/AL_predVar_fine.conf

@@ -0,0 +1,148 @@
+[train0]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run0.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test0]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run0.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train1]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run1.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test1]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run1.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train2]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run2.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test2]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run2.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train3]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run3.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test3]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run3.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train4]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run4.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test4]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run4.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train5]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run5.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test5]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run5.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train6]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run6.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test6]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run6.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train7]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run7.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test7]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run7.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train8]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run8.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test8]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run8.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train9]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run9.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test9]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run9.test
+classselection_test = "*"
+examples_test = seq * 50
+
+
+[cache]
+#root = "/home/rodner/3rdparty/imagenetBOF/niceFeatures/"
+root = "/home/luetz/data/feature-storage/15Scenes/niceFeatures/"
+
+[GP_IL]
+trainExPerClass = 1
+num_runs = 10
+do_classification = true
+incrementalAddSize = 3
+nrOfIncrements = 30
+
+[main]
+# extension of all files in the cache
+ext = ".feat"
+queryStrategy = gpPredVar
+
+[GPHIKClassifier]
+noise =  0.0000001
+# no uncertainty for standard classification
+uncertaintyPredictionForClassification = false
+#--define the uncertainty prediction scheme--
+# standatd predictive variance
+#uncertaintyPrediction = pred_variance
+# use the heuristic as proposed by Kapoor et al.
+#uncertaintyPrediction = heuristic
+# no classification uncertainty at all?
+#uncertaintyPrediction = none
+
+#--define the computation scheme for the predictive variance, if needed--
+#if we do not need any predictive variance for this experiment
+#varianceApproximation = none
+# predictive variance approximation useful for sparse features - really fast
+#varianceApproximation = approximate_rough 
+# predictive variance approximation with eigenvectors (finer)
+varianceApproximation = approximate_fine
+nrOfEigenvaluesToConsiderForVarApprox = 2
+#exact computation of predictive variance
+#varianceApproximation = exact
+
+#--define the optimization method--
+optimization_method = none
+#optimization_method = downhillsimplex
+parameter_lower_bound = 1.0
+parameter_upper_bound = 1.0 
+
+#--stuff for the IterativeLinearSolver--
+#ils_verbose = true

+ 48 - 0
configs/AwA.conf

@@ -0,0 +1,48 @@
+[traintest]
+#the original images are currently not awailable
+dataset = /home/luetz/data/Animals_with_Attributes/features/rgsift-hist/
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/phog-hist/
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/cq-hist/
+#classselection_train = "*"
+#classselection_test = "*"
+classselection_train = "pig, giant+panda, seal, raccoon, rat, hippopotamus, leopard, persian+cat, chimpanzee, humpback+whale"
+classselection_test = "pig, giant+panda, seal, raccoon, rat, hippopotamus, leopard, persian+cat, chimpanzee, humpback+whale"
+examples_train = random * 46
+#examples_train = random * 260
+examples_test = random * 46
+#examples_test = random * 40
+#examples_test = random pig 311, random giant+panda 922, random seal 489, random raccoon 613, random rat 283, random hippopotamus 703, random leopard 588, random persian+cat 694, random chimpanzee 
+#681, random humpback+whale 696
+#examples_train = random pig 165, random giant+panda 922, random seal 489, random raccoon 613, random rat 283, random hippopotamus 703, random leopard 588, random persian+cat 694, random chimpanzee
+#681, random humpback+whale 696
+
+
+#[train]
+##the original images are currently not awailable
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/
+#classselection_train = "*"
+#examples_train = random * 1
+
+#[test]
+##the original images are currently not awailable
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/
+#classselection_test = "*"
+#examples_test = random * 15
+
+[cache]
+root = "/home/luetz/data/Animals_with_Attributes/features/rgsift-hist/"
+#root = "/home/dbv/bilder/Animals_with_Attributes/features/phog-hist/"
+#root = "/home/dbv/bilder/Animals_with_Attributes/features/cq-hist/"
+
+[HIKGP]
+#parameter_lower_bound = 0.5
+#parameter_upper_bound = 2.5
+ils_max_iterations = 50
+ils_method = MINRES
+optimization_method = downhillsimplex
+optimize_noise = true
+
+[main]
+nrRuns = 10
+transform = absexp
+#transform = exp

+ 52 - 0
configs/GP_IL_New_Examples.conf

@@ -0,0 +1,52 @@
+[traintest]
+#dataset = /home/dbv/bilder/15Scenes/imagesScaled/
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = random * 100
+examples_test = random * 50
+
+[cache]
+#root = "/home/rodner/3rdparty/imagenetBOF/niceFeatures/"
+root = "/home/luetz/data/feature-storage/15Scenes/niceFeatures/"
+
+[GP_IL]
+trainExPerClass = 10
+num_runs = 10
+do_classification = true
+incrementalAddSize = 1
+nrOfIncrements = 50
+
+[main]
+# extension of all files in the cache
+ext = ".feat"
+
+[GPHIKClassifier]
+noise =  0.01
+parameter_lower_bound = 0.5
+parameter_upper_bound = 2.0 
+#--define the uncertainty prediction scheme--
+# standatd predictive variance
+#uncertaintyPrediction = pred_variance
+# use the heuristic as proposed by Kapoor et al.
+uncertaintyPrediction = heuristic
+# no classification uncertainty at all?
+#uncertaintyPrediction = none
+
+#--define the computation scheme for the predictive variance, if needed--
+#if we do not need any predictive variance for this experiment
+#varianceApproximation = none
+# predictive variance approximation useful for sparse features - really fast
+varianceApproximation = approximate_rough 
+# predictive variance approximation with eigenvectors (finer)
+#varianceApproximation = approximate_fine
+#nrOfEigenvaluesToConsiderForVarApprox = 2
+#exact computation of predictive variance
+#varianceApproximation = exact
+
+#--define the optimization method--
+#optimization_method = none
+optimization_method = downhillsimplex
+
+#--stuff for the IterativeLinearSolver--
+#ils_verbose = true

+ 41 - 0
configs/ImagenetBinaryGP.conf

@@ -0,0 +1,41 @@
+#[HIKGP]
+[GPHIKClassifier]
+
+#optimization_method = "downhillsimplex"
+optimization_method = "none"
+parameter_upper_bound = 5.0
+ils_max_iterations = 500
+ils_verbose = true
+noise = 10.0
+verbose = true
+ils_min_residual = 1e-2
+learn_balanced = true
+
+[main]
+positive_class = 1
+
+# whether to use eriks folder (only works on dionysos)
+imageNetLocal = false
+
+# standard setting with one negative example for each category
+nneg = 50
+
+
+# with 20 iterations
+# This standard config should lead to ...  classification performance
+# With quantization we get: 0.891481 (with only 100 bins :)
+
+# Additional quantization
+
+#[HIKGP]
+[GPHIKClassifier]
+use_quantization = true
+num_bins = 100
+
+[RegGaussianProcess]
+noise = 10.0
+optimize_parameters = false
+
+[Kernel]
+robust_cholesky = "static"
+rchol_noise_variance = 10.0

+ 6 - 0
configs/computeNormHistFeat.conf

@@ -0,0 +1,6 @@
+[main]
+nrOfExamplesPerClass = 500
+nrOfDimensions = 50
+nrOfClasses = 3
+destination = /home/luetz/tmp/features.data
+saveLastDimension = false

+ 19 - 0
configs/createSIFTFeatures.conf

@@ -0,0 +1,19 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = seq * 100
+examples_test = all *
+
+[HSG]
+sample_scaling = 8
+localfeature_type = NICE_SIFT
+
+[main]
+destForFeat = /home/dbv/bilder/15Scenes/features/
+percentageOfPatchesForKMeans = 0.05
+nrOfClusters = 200
+verbose = false
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 18 - 0
configs/createSIFTFeaturesHSG.conf

@@ -0,0 +1,18 @@
+[all]
+dataset = /home/dbv/bilder/15Scenes/
+classselection = "*"
+examples = all *
+
+[HSG]
+sample_scaling = 8
+localfeature_type = NICE_SIFT
+
+[main]
+destForFeat = /home/dbv/bilder/15Scenes/features/
+verbose = false
+
+[cache]
+#root = "/home/dbv/bilder/15Scenes/features/featuresRaw/"
+root = "/home/luetz/tmp/lf/"
+descriptor_format = "binary_double"
+

+ 9 - 0
configs/scenes.reclassification.conf

@@ -0,0 +1,9 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = seq * 100
+examples_test = reclassification
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 9 - 0
configs/scenes.smalltest.conf

@@ -0,0 +1,9 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = random * 10
+examples_test = all *
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 9 - 0
configs/scenes.std.conf

@@ -0,0 +1,9 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/imagesScaled/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = seq * 100
+examples_test = all *
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 1 - 0
doxy/CODING

@@ -0,0 +1 @@
+indention: 2 spaces

+ 10 - 0
doxy/coding.doxy

@@ -0,0 +1,10 @@
+/** \page coding Coding Guidlines
+
+
+NICE is basically guided on the <a href="http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml">Google style guide</a>
+
+additonal there are some additional regulations:
+
+\verbinclude CODING
+
+ */

BIN
doxy/logoV1.png


+ 101 - 0
doxy/logoV1.svg

@@ -0,0 +1,101 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="744.09448819"
+   height="1052.3622047"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.1 r9760"
+   sodipodi:docname="logoV1.svg">
+  <defs
+     id="defs4" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="5.6568542"
+     inkscape:cx="194.28172"
+     inkscape:cy="858.0379"
+     inkscape:document-units="px"
+     inkscape:current-layer="g4051"
+     showgrid="false"
+     inkscape:window-width="1920"
+     inkscape:window-height="972"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <g
+       id="g4051">
+      <flowRoot
+         style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         id="flowRoot2985"
+         xml:space="preserve"><flowRegion
+           id="flowRegion2987"><rect
+             y="156.35687"
+             x="159.6041"
+             height="70.710678"
+             width="160.61426"
+             id="rect2989" /></flowRegion><flowPara
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:TeX Gyre Schola;-inkscape-font-specification:TeX Gyre Schola"
+           id="flowPara2991">CV</flowPara></flowRoot>      <flowRoot
+         transform="matrix(1.6881042,0,0,2.6839504,-198.72748,-269.84001)"
+         style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         id="flowRoot2993"
+         xml:space="preserve"><flowRegion
+           id="flowRegion2995"><rect
+             y="148.07646"
+             x="237.85715"
+             height="74.285713"
+             width="50.714287"
+             id="rect2997" /></flowRegion><flowPara
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:TeX Gyre Schola;-inkscape-font-specification:TeX Gyre Schola"
+           id="flowPara2999">J</flowPara></flowRoot>      <rect
+         ry="1.8145518"
+         y="154.19693"
+         x="177.67902"
+         height="3.6291037"
+         width="63.757946"
+         id="rect3001"
+         style="fill:#00000a;fill-opacity:1;stroke:#000000;stroke-width:2.60832787;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+      <text
+         xml:space="preserve"
+         style="font-size:10px;font-style:italic;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:East Syriac Ctesiphon;-inkscape-font-specification:East Syriac Ctesiphon Bold Italic"
+         x="203.82353"
+         y="151.50813"
+         id="text2992"
+         sodipodi:linespacing="125%"><tspan
+           sodipodi:role="line"
+           id="tspan2994"
+           x="203.82353"
+           y="151.50813"
+           style="font-size:14px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:TeX Gyre Schola;-inkscape-font-specification:TeX Gyre Schola Bold">NICE</tspan></text>
+    </g>
+  </g>
+</svg>

+ 35 - 0
doxy/mainpage.doxy

@@ -0,0 +1,35 @@
+/** \mainpage GP-HIK-CORE Documentation
+ * \authors
+ * gp-hik-core: Alexander Freytag, Erik Rodner \n
+ * Makefiles: Erik Rodner, Olaf Kaehler
+ *
+ * \section intro Introduction
+ * This is the documentation of the modul gp-hik-core, which is part of the main library NICE (New Image C++ Extension library)
+ *
+ * <hr>
+ * \subpage install
+ * <hr>
+ * \subpage coding
+ * <hr>
+ * \section license_sec License
+ *
+ * The following license covers <b>most</b> of this library.
+ * Some individual files have a different license.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ * You can find the complete license at http://www.gnu.org/licenses/lgpl.txt
+ */
+

+ 7 - 0
doxy/readme.doxy

@@ -0,0 +1,7 @@
+/** \page install Installation Guide
+
+  \verbinclude README
+  
+  recommended packages:
+  \verbinclude PACKAGES
+*/

+ 1685 - 0
doxyfile_gp_hik.txt

@@ -0,0 +1,1685 @@
+# Doxyfile 1.7.3
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = GP HIK Core
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER         = 1.0 (beta version)
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "Provices methods and data structures needed for efficiently evaluate the Histogram Intersection Kernel with Gaussian processes useful for efficient large-scale learning, optimization, uncertainty prediction, and incremental learning."
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO           =  doxy/logoV1.png
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = ./doc/
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES       = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE            =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT                  =
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS          = *.doxy \
+*.cpp \
+*.h \
+*.tcc \
+*.m
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                = doxygen
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       = */.git/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH           = README \
+			 PACKAGES \
+			 doxy/CODING
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = test*.cpp
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET        =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS     = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET        = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE               =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING     =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+#  will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [0,1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES       = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
+# MathJax, but it is strongly recommended to install a local copy of MathJax
+# before deployment.
+
+MATHJAX_RELPATH        = http://www.mathjax.org/mathjax
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE      = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA             =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD                =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS        = 0
+
+# By default doxygen will write a font called Helvetica to the output
+# directory and reference it in all dot files that doxygen generates.
+# When you want a differently looking font you can specify the font name
+# using DOT_FONTNAME. You need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, svg, gif or svg.
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP            = YES

+ 122 - 0
kernels/GeneralizedIntersectionKernelFunction.h

@@ -0,0 +1,122 @@
+/** 
+* @file GeneralizedIntersectionKernelFunction.h
+* @brief The generalized intersection kernel function as distance measure between two histograms interpreted as vectors (Interface)
+* @author Alexander Freytag
+* @date 23-12-2011 (dd-mm-yyyy)
+*/
+#ifndef _NICE_GENERALIZEDINTERSECTIONKERNELFUNCTION
+#define _NICE_GENERALIZEDINTERSECTIONKERNELFUNCTION
+
+#include <iostream>
+
+#include <core/vector/MatrixT.h>
+
+#include <gp-hik-core/FeatureMatrixT.h>
+
+
+namespace NICE {
+
+  /** 
+ * @class GeneralizedIntersectionKernelFunction
+ * @brief The generalized intersection kernel function as distance measure between two histograms interpreted as vectors
+ * @author Alexander Freytag
+ */
+
+template<class T> class GeneralizedIntersectionKernelFunction
+{
+
+    protected:
+    //TODO one could also use a separate function here, such as pow(,a) - this would be much more generalized but only power the inputs.
+      double exponent;
+
+    public:
+  
+    /** 
+    * @brief Default constructor
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    GeneralizedIntersectionKernelFunction();
+    
+    /** 
+    * @brief Recommended constructor
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    GeneralizedIntersectionKernelFunction(const double & _exponent);
+      
+    /** 
+    * @brief Default desctructor
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    ~GeneralizedIntersectionKernelFunction();
+    
+    /** 
+    * @brief Set exponent to specified value
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    void set_exponent(const double & _exponent);
+    
+    /** 
+    * @brief Return currently used exponent
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    double get_exponent();
+
+    /** 
+    * @brief Measures the distance between tow vectors using the generalized histogram intersection distance
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    double measureDistance ( const std::vector<T> & a, const std::vector<T> & b  );
+  
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the g-HIK distance
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const std::vector<std::vector<T> > & X  );
+    
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the g-HIK distance and add a given amount of noise on the main diagonal
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const std::vector<std::vector<T> > & X , const double & noise);
+    
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the g-HIK distance and add a given amount of noise on the main diagonal
+    * @author Alexander Freytag
+    * @date 03-02-2012 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const NICE::FeatureMatrixT<T>  & X , const double & noise);
+    
+    /** 
+    * @brief Computes the similarity between the data and a new vector using the g-HIK distance
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    std::vector<double> computeKernelVector ( const std::vector<std::vector<T> > & X , const std::vector<T> & xstar);
+    
+    /** 
+    * @brief Simply print the name of the class
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    void sayYourName();
+
+};
+
+  //! default definition for a GeneralizedIntersectionKernelFunction
+  typedef GeneralizedIntersectionKernelFunction<double> GeneralizedIntersectionKernelFunctionDouble;
+
+}
+
+#ifdef __GNUC__
+#include "gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.tcc"
+#endif
+
+#endif

+ 147 - 0
kernels/GeneralizedIntersectionKernelFunction.tcc

@@ -0,0 +1,147 @@
+/** 
+* @file GeneralizedIntersectionKernelFunction.cpp
+* @brief The generalized intersection kernel function as distance measure between two histograms interpreted as vectors (Implementation)
+* @author Alexander Freytag
+* @date 08-12-2011 (dd-mm-yyyy)
+*/
+
+#include <gp-hik-core/SortedVectorSparse.h>
+
+#include "GeneralizedIntersectionKernelFunction.h"
+#include <math.h>
+
+using namespace NICE;
+
+template <typename T>
+GeneralizedIntersectionKernelFunction<T>::GeneralizedIntersectionKernelFunction()
+{
+  exponent = 1.0;
+}
+
+template <typename T>
+GeneralizedIntersectionKernelFunction<T>::GeneralizedIntersectionKernelFunction(const double & _exponent)
+{
+  exponent = _exponent;
+}
+
+template <typename T>
+GeneralizedIntersectionKernelFunction<T>::~GeneralizedIntersectionKernelFunction()
+{
+}
+
+template <typename T>
+void GeneralizedIntersectionKernelFunction<T>::set_exponent(const double & _exponent)
+{
+  exponent = _exponent;
+}
+
+template <typename T>
+double GeneralizedIntersectionKernelFunction<T>::get_exponent()
+{
+  return exponent;
+}
+
+template <typename T>
+double GeneralizedIntersectionKernelFunction<T>::measureDistance ( const std::vector<T> & a, const std::vector<T> & b  )
+{
+  int size( (int) a.size());
+  if ((int) b.size() < size)
+    size = (int) b.size();
+
+  double distance(0.0);
+
+  for (int i = 0; i < size; i++)
+  {
+    if ( a[i] < b[i])
+      distance += pow((double) a[i],exponent);
+    else
+      distance += pow((double) b[i],exponent);
+  }
+  return distance;
+}
+
+template <typename T>
+NICE::Matrix GeneralizedIntersectionKernelFunction<T>::computeKernelMatrix ( const std::vector<std::vector<T> > & X  )
+{
+  NICE::Matrix K;
+
+  K.resize((int) X.size(), (int) X.size());
+
+  for (int i = 0; i < (int) X.size(); i++)
+  {
+    for (int j = i; j < (int) X.size(); j++)
+    {
+      //Kernel matrix has to be symmetric
+      K(i,j) = measureDistance(X[i],X[j]);
+      K(j,i) = measureDistance(X[i],X[j]);
+    }
+  }
+
+  return K;
+}
+
+template <typename T>
+NICE::Matrix GeneralizedIntersectionKernelFunction<T>::computeKernelMatrix ( const std::vector<std::vector<T> > & X , const double & noise)
+{
+  NICE::Matrix K(computeKernelMatrix(X));
+  for (int i = 0; i < (int) X.size(); i++)
+    K(i,i) += noise;
+  return K;
+}
+
+template <typename T>
+NICE::Matrix GeneralizedIntersectionKernelFunction<T>::computeKernelMatrix ( const NICE::FeatureMatrixT<T>  & X , const double & noise)
+{
+  NICE::Matrix K;  
+  K.resize(X.get_n(), X.get_n());
+  
+  //run over every dimension and add the corresponding min-values to the entries in the kernel matrix
+  for (int dim = 0; dim < X.get_d(); dim++)
+  {
+   const std::multimap< double, typename SortedVectorSparse<double>::dataelement> & nonzeroElements = X.getFeatureValues(dim).nonzeroElements();
+    
+    //compute the min-values (similarities) between every pair in this dimension, zero elements do not influence this
+    SortedVectorSparse<double>::const_elementpointer it1 = nonzeroElements.begin();  
+    for (; it1 != nonzeroElements.end(); it1++)
+    {
+      int i(it1->second.first);
+      SortedVectorSparse<double>::const_elementpointer it2 = it1;  
+      for (; it2 != nonzeroElements.end(); it2++)
+      {  
+        int j(it2->second.first);
+        double val(pow(std::min(it1->second.second, it2->second.second),exponent));
+        K(i,j) += val;
+        //kernel-matrix has to be symmetric, but avoid adding twice the value to the main-diagonal
+        if ( i != j)
+          K(j,i) += val;
+      } // for-j-loop
+    } // for-i-loop
+    
+  }//dim-loop  
+  
+  //add noise on the main diagonal
+  for (int i = 0; i < (int) X.get_n(); i++)
+    K(i,i) += noise;
+  return K;
+}
+
+template <typename T>
+std::vector<double> GeneralizedIntersectionKernelFunction<T>::computeKernelVector ( const std::vector<std::vector<T> > & X , const std::vector<T> & xstar)
+{
+  std::vector<double> kstar;
+
+  kstar.resize((int) X.size());
+
+  for (int i = 0; i < (int) X.size(); i++)
+  {
+    kstar[i] = measureDistance(X[i], xstar);
+  }
+
+  return kstar;
+}
+
+template <typename T>
+void GeneralizedIntersectionKernelFunction<T>::sayYourName() 
+{
+  std::cerr << "I'm the Generalized Intersection Kernel." << std::endl;
+}

+ 45 - 0
kernels/GenericKernelFunction.h

@@ -0,0 +1,45 @@
+/** 
+* @file GenericKernelFunction.h
+* @author Alexander Freytag
+* @brief Abstract class for all kernels (Interface - abstract)
+* @date 12/08/2011
+*/
+#ifndef _NICE_GENERICKERNELFUNCTION
+#define _NICE_GENERICKERNELFUNCTION
+
+#include <vector>
+#include <core/vector/VectorT.h>
+
+namespace NICE {
+  
+ /** 
+ * @class GenericKernelFunction
+ * @brief Abstract class for all kernels
+ * @author Alexander Freytag
+ */
+
+template<class T> class GenericKernelFunction 
+{
+
+    protected:
+
+    public:
+  
+    /** simple constructor */
+    GenericKernelFunction(){};
+      
+    /** simple destructor */
+    ~GenericKernelFunction(){};
+
+
+    virtual double measureDistance ( const std::vector<T> & a, const std::vector<T> & b  )=0;
+    virtual NICE::Matrix computeKernelMatrix ( const std::vector<std::vector<T> > & X )=0;
+    virtual NICE::Matrix computeKernelMatrix ( const std::vector<std::vector<T> > & X , const double & noise)=0;
+    virtual std::vector<double> computeKernelVector ( const std::vector<std::vector<T> > & X , const NICE::Vector & xstar)=0;
+    virtual void sayYourName()=0;
+
+};
+
+}
+
+#endif

+ 79 - 0
kernels/IntersectionKernelFunction.cpp

@@ -0,0 +1,79 @@
+// /** 
+// * @file IntersectionKernelFunction.cpp
+// * @brief Implementation for the intersection kernel function as distance measure between two histograms interpreted as vectors 
+// * @author Alexander Freytag
+// * @date 08-12-2011 (dd-mm-yyyy)
+// */
+// 
+// #include "IntersectionKernelFunction.h"
+// 
+// using namespace NICE;
+// 
+// template <typename T>
+// IntersectionKernelFunction<T>::IntersectionKernelFunction()
+// {
+// }
+// 
+// template <typename T>
+// IntersectionKernelFunction<T>::~IntersectionKernelFunction()
+// {
+// }
+// 
+// template <typename T>
+// double IntersectionKernelFunction<T>::measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b  )
+// {
+//   double distance(0.0);
+//   
+//   for (NICE::SparseVector::const_iterator itA = a.begin(); itA != a.end(); itA++)
+//   {
+//     NICE::SparseVector::const_iterator itB = b.find(itA->first);
+//     if (itB != b.end())
+//       distance += std::min(itA->second , itB->second);
+//   }
+//   
+//   return distance;  
+// }
+// 
+// template <typename T>
+// NICE::Matrix IntersectionKernelFunction<T>::computeKernelMatrix ( const std::vector<NICE::SparseVector > & X , const double & noise)
+// {
+//   NICE::Matrix K;
+//   K.resize(X.size(), X.size());
+//   K.set(0.0);
+//   
+//   for (int i = 0; i < X.size(); i++)
+//   {
+//     for (int j = i; j < X.size(); j++)
+//     {
+//       K(i,j) = measureDistance(X[i],X[j]);
+//       if (i!=j)
+//        K(j,i) = K(i,j);
+//     }
+//   }
+//   
+//   //add noise on the main diagonal
+//   for (int i = 0; i < (int) X.size(); i++)
+//     K(i,i) += noise;
+//   return K;
+// }
+// 
+// template <typename T>
+// std::vector<double> IntersectionKernelFunction<T>::computeKernelVector ( const std::vector<NICE::SparseVector> & X , const NICE::SparseVector & xstar)
+// {
+//   std::vector<double> kstar;
+// 
+//   kstar.resize((int) X.size());  
+//   
+//   for (int i = 0; i < (int) X.size(); i++)
+//   {
+//     kstar[i] = measureDistance(X[i], xstar);
+//   }
+// 
+//   return kstar;
+// }
+// 
+// template <typename T>
+// void IntersectionKernelFunction<T>::sayYourName() 
+// {
+//   std::cerr << "I'm the Intersection Kernel." << std::endl;
+// }

+ 113 - 0
kernels/IntersectionKernelFunction.h

@@ -0,0 +1,113 @@
+/** 
+* @file IntersectionKernelFunction.h
+* @brief The intersection kernel function as distance measure between two histograms interpreted as vectors (Interface)
+* @author Alexander Freytag
+* @date 08-12-2011 (dd-mm-yyyy)
+*/
+#ifndef _NICE_INTERSECTIONKERNELFUNCTION
+#define _NICE_INTERSECTIONKERNELFUNCTION
+
+#include <iostream>
+
+#include <gp-hik-core/FeatureMatrixT.h>
+#include <core/vector/SparseVectorT.h>
+
+
+//TODO functions are not allowed to be virtual anymore due to the template usage - any idea how to treat this?
+//maybe using type erasure: http://www.artima.com/cppsource/type_erasure2.html ?
+
+
+namespace NICE {
+  
+ /** 
+ * @class IntersectionKernelFunction
+ * @brief The intersection kernel function as distance measure between two histograms interpreted as vectors
+ * @author Alexander Freytag
+ */
+ 
+template<class T> class IntersectionKernelFunction 
+{
+
+    protected:
+
+    public:
+      
+    /** 
+    * @brief Default constructor
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    IntersectionKernelFunction();
+      
+    /** 
+    * @brief Default desctructor
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    ~IntersectionKernelFunction();
+
+    /** 
+    * @brief Measures the distance between tow vectors using the histogram intersection distance
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    double measureDistance ( const std::vector<T> & a, const std::vector<T> & b  );
+    
+    double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b  );
+    
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the HIK distance
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const std::vector<std::vector<T> > & X  );
+    
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the HIK distance and add a given amount of noise on the main diagonal
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const std::vector<std::vector<T> > & X , const double & noise);
+    
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the HIK distance and add a given amount of noise on the main diagonal
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const std::vector<NICE::SparseVector > & X , const double & noise);
+
+    /** 
+    * @brief Computes the symmetric and positive semi-definite kernel matrix K based on the given examples using the HIK distance and add a given amount of noise on the main diagonal
+    * @author Alexander Freytag
+    * @date 03-02-2012 (dd-mm-yyyy)
+    */
+    NICE::Matrix computeKernelMatrix ( const NICE::FeatureMatrixT<T> & X , const double & noise);
+    
+    /** 
+    * @brief Computes the similarity between the data and a new vector using the HIK distance
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    std::vector<double> computeKernelVector ( const std::vector<std::vector<T> > & X , const std::vector<T> & xstar);
+    
+    NICE::Vector computeKernelVector ( const std::vector<NICE::SparseVector> & X , const NICE::SparseVector & xstar);
+    
+    /** 
+    * @brief Simply print the name of the class
+    * @author Alexander Freytag
+    * @date 23-12-2011 (dd-mm-yyyy)
+    */
+    void sayYourName();
+
+};
+
+  //! default definition for a IntersectionKernelFunction
+  typedef IntersectionKernelFunction<double> IntersectionKernelFunctionDouble;
+
+}
+
+#ifdef __GNUC__
+#include "gp-hik-core/kernels/IntersectionKernelFunction.tcc"
+#endif
+
+#endif

+ 198 - 0
kernels/IntersectionKernelFunction.tcc

@@ -0,0 +1,198 @@
+/** 
+* @file IntersectionKernelFunction.cpp
+* @brief The intersection kernel function as distance measure between two histograms interpreted as vectors (Implementation)
+* @author Alexander Freytag
+* @date 08-12-2011 (dd-mm-yyyy)
+*/
+
+#include "IntersectionKernelFunction.h"
+
+#include <gp-hik-core/SortedVectorSparse.h>
+
+using namespace NICE;
+
+template <typename T>
+double IntersectionKernelFunction<T>::measureDistance ( const std::vector<T> & a, const std::vector<T> & b  )
+{
+  int size( (int) a.size());
+  if ((int) b.size() < size)
+    size = (int) b.size();
+
+  double distance(0.0);
+
+  for (int i = 0; i < size; i++)
+  {
+    if ( a[i] < b[i])
+      distance += (double) a[i];
+    else
+      distance += (double) b[i];
+  }
+  return distance;
+}
+
+template <typename T>
+NICE::Matrix IntersectionKernelFunction<T>::computeKernelMatrix ( const std::vector<std::vector<T> > & X  )
+{
+  NICE::Matrix K;
+  
+  K.resize((int) X.size(), (int) X.size());
+  
+  double valTmp;
+  for (int i = 0; i < (int) X.size(); i++)
+  {
+    for (int j = i; j < (int) X.size(); j++)
+    {
+      valTmp = measureDistance(X[i],X[j]);
+      //Kernel matrix has to be symmetric
+      K(i,j) = valTmp;
+      //kernel-matrix has to be symmetric, but avoid adding twice the value to the main-diagonal
+      if ( i != j)
+        K(j,i) = valTmp;
+    }
+  }
+
+  return K;
+}
+
+template <typename T>
+NICE::Matrix IntersectionKernelFunction<T>::computeKernelMatrix ( const std::vector<std::vector<T> > & X , const double & noise)
+{
+  NICE::Matrix K(computeKernelMatrix(X));
+  for (int i = 0; i < (int) X.size(); i++)
+    K(i,i) += noise;
+  return K;
+}
+
+template <typename T>
+NICE::Matrix IntersectionKernelFunction<T>::computeKernelMatrix ( const NICE::FeatureMatrixT<T>  & X , const double & noise)
+{
+  NICE::Matrix K;  
+  K.resize(X.get_n(), X.get_n());
+  K.set(0.0);
+  
+  //run over every dimension and add the corresponding min-values to the entries in the kernel matrix
+  for (int dim = 0; dim < X.get_d(); dim++)
+  {
+   const std::multimap< double, typename SortedVectorSparse<double>::dataelement> & nonzeroElements = X.getFeatureValues(dim).nonzeroElements();
+    
+    //compute the min-values (similarities) between every pair in this dimension, zero elements do not influence this
+    SortedVectorSparse<double>::const_elementpointer it1 = nonzeroElements.begin();  
+    
+    for (; it1 != nonzeroElements.end(); it1++)
+    {
+      int i(it1->second.first);
+      SortedVectorSparse<double>::const_elementpointer it2 = it1;  
+      for (; it2 != nonzeroElements.end(); it2++)
+      {  
+        int j(it2->second.first);
+        double val(std::min(it1->second.second, it2->second.second));
+        K(i,j) += val;
+        //kernel-matrix has to be symmetric, but avoid adding twice the value to the main-diagonal
+        if ( i != j)
+          K(j,i) += val;
+      } // for-j-loop
+    } // for-i-loop
+    
+  }//dim-loop  
+  
+  //add noise on the main diagonal
+  for (int i = 0; i < (int) X.get_n(); i++)
+    K(i,i) += noise;
+  return K;
+}
+
+template <typename T>
+std::vector<double> IntersectionKernelFunction<T>::computeKernelVector ( const std::vector<std::vector<T> > & X , const std::vector<T> & xstar)
+{ 
+  std::vector<double> kstar;
+
+  kstar.resize((int) X.size());
+
+  for (int i = 0; i < (int) X.size(); i++)
+  {
+    kstar[i] = measureDistance(X[i], xstar);
+  }
+
+  return kstar;
+}
+
+template <typename T>
+IntersectionKernelFunction<T>::IntersectionKernelFunction()
+{
+}
+
+template <typename T>
+IntersectionKernelFunction<T>::~IntersectionKernelFunction()
+{
+}
+
+template <typename T>
+double IntersectionKernelFunction<T>::measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b  )
+{
+  double distance(0.0);
+  
+  for (NICE::SparseVector::const_iterator itA = a.begin(); itA != a.end(); itA++)
+  {
+    NICE::SparseVector::const_iterator itB = b.find(itA->first);
+    if (itB != b.end())
+      distance += std::min(itA->second , itB->second);
+  }
+  
+  return distance;  
+}
+
+template <typename T>
+NICE::Matrix IntersectionKernelFunction<T>::computeKernelMatrix ( const std::vector<NICE::SparseVector > & X , const double & noise)
+{
+  
+  std::cerr << "compute Kernel Matrix with vector of sparse vectors called " << std::endl;
+  NICE::Matrix K;
+  std::cerr << "NICE::Matrix initialized" << std::endl;
+  std::cerr << "now perform the resize to: "<< X.size() << std::endl;
+  K.resize(X.size(), X.size());
+  std::cerr << "NICE::matrix set to size : " << X.size() << std::endl;
+  K.set(0.0);
+  std::cerr << "set entries to zero" << std::endl; 
+  
+  std::cerr << "compute Kernel Matrix" << std::endl;
+  for (int i = 0; i < X.size(); i++)
+  {
+    std::cerr << i << " / " << X.size() << std::endl;
+    for (int j = i; j < X.size(); j++)
+    {
+      K(i,j) = measureDistance(X[i],X[j]);
+      if (i!=j)
+       K(j,i) = K(i,j);
+    }
+  }
+  std::cerr << "compute kernel matrix done" << std::endl;
+  
+  //add noise on the main diagonal
+  for (int i = 0; i < (int) X.size(); i++)
+    K(i,i) += noise;
+  return K;
+}
+
+template <typename T>
+NICE::Vector IntersectionKernelFunction<T>::computeKernelVector ( const std::vector<NICE::SparseVector> & X , const NICE::SparseVector & xstar)
+{
+  NICE::Vector kstar;
+
+  kstar.resize((int) X.size());  
+  
+  if (X.size() > 0)
+  {
+    for (int i = 0; i < (int) X.size(); i++)
+    {
+      kstar[i] = measureDistance(X[i], xstar);
+    }  
+  }
+  
+  return kstar;
+}
+
+template <typename T>
+void IntersectionKernelFunction<T>::sayYourName() 
+{
+  std::cerr << "I'm the Intersection Kernel." << std::endl;
+}

+ 8 - 0
kernels/Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
kernels/Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 1 - 0
libdepend.inc

@@ -0,0 +1 @@
+$(call PKG_DEPEND_INT,core/)

+ 8 - 0
parameterizedFunctions/Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
parameterizedFunctions/Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 93 - 0
parameterizedFunctions/PFAbsExp.h

@@ -0,0 +1,93 @@
+/** 
+* @file PFAbsExp.h
+* @author Erik Rodner
+* @brief Parameterized Function: absolute value and exponential operation -- pow(fabs(x), exponent) (Interface + Implementation)
+* @date 01/04/2012
+*/
+#ifndef _NICE_PFABSEXPINCLUDE
+#define _NICE_PFABSEXPINCLUDE
+
+#include <math.h>
+#include "ParameterizedFunction.h"
+
+namespace NICE {
+  
+ /** 
+ * @class PFAbsExp
+ * @brief Parameterized Function: absolute value and exponential operation -- pow(fabs(x), exponent)
+ * @author Erik Rodner
+ */
+ 
+class PFAbsExp : public ParameterizedFunction
+{
+  protected:
+    double upperBound;
+    double lowerBound;
+
+  public:
+
+  /** simple constructor, we only have one parameter */
+  PFAbsExp( double exponent = 1.0, 
+            double lB = -std::numeric_limits<double>::max(), 
+            double uB = std::numeric_limits<double>::max() ) : 
+            ParameterizedFunction(1) 
+  { 
+    m_parameters[0] = exponent; 
+    upperBound = uB;
+    lowerBound = lB;
+  };
+  
+  ~PFAbsExp(){};
+    
+  double f ( uint index, double x ) const { 
+/*        std::cerr << "upperBound: " << upperBound << std::endl;
+    std::cerr << "lowerBound: " << lowerBound << std::endl;
+    std::cerr << "m_parameters: " << m_parameters << std::endl;   */ 
+    return pow(fabs(x),m_parameters[0]); 
+  }
+
+  bool isOrderPreserving() const { return true; };
+
+  Vector getParameterUpperBounds() const { return Vector(1, upperBound); };
+  Vector getParameterLowerBounds() const { return Vector(1, lowerBound); };
+  
+  void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
+  void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
+  
+  /** Persistent interface */
+  virtual void restore ( std::istream & is, int format = 0 )
+  {
+    if (is.good())
+    {
+      is.precision (std::numeric_limits<double>::digits10 + 1);
+      
+      std::string tmp;
+      is >> tmp;
+      is >> upperBound;
+
+      is >> tmp;
+      is >> lowerBound;      
+    }
+    ParameterizedFunction::restore(is);
+    
+  };
+  virtual void store ( std::ostream & os, int format = 0 ) const
+  {
+    if (os.good())
+    {
+      os.precision (std::numeric_limits<double>::digits10 + 1); 
+      os << "upperBound: " << std::endl <<  upperBound << std::endl;
+      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+    }
+    ParameterizedFunction::store(os);
+  };
+  
+  virtual void clear () {};
+  
+  virtual std::string sayYourName() const {return "absexp";};
+     
+};
+
+}
+
+#endif

+ 85 - 0
parameterizedFunctions/PFExp.h

@@ -0,0 +1,85 @@
+/** 
+* @file PFExp.h
+* @author Erik Rodner
+* @brief Parameterized Function: exponential operation -- exp(fabs(x), exponent) (Interface + Implementation)
+*/
+#ifndef _NICE_PFEXPINCLUDE
+#define _NICE_PFEXPINCLUDE
+
+#include <math.h>
+#include "ParameterizedFunction.h"
+
+namespace NICE {
+  
+ /** 
+ * @class PFExp
+ * @brief Parameterized Function: Parameterized Function: exponential operation -- exp(fabs(x), exponent)
+ * @author Erik Rodner
+ */
+ 
+class PFExp : public ParameterizedFunction
+{
+  protected:
+    double upperBound;
+    double lowerBound;
+
+  public:
+
+  /** simple constructor, we only have one parameter */
+  PFExp( double exponent = 1.0, 
+            double lB = -std::numeric_limits<double>::max(), 
+            double uB = std::numeric_limits<double>::max() ) : 
+            ParameterizedFunction(1) 
+  { 
+    m_parameters[0] = exponent; 
+    upperBound = uB;
+    lowerBound = lB;
+  };
+  
+  ~PFExp(){};
+
+  double f ( uint index, double x ) const { return (exp(fabs(x) * m_parameters[0]) - 1.0) / (exp(m_parameters[0]) - 1.0); }
+
+  bool isOrderPreserving() const { return true; };
+
+  Vector getParameterUpperBounds() const { return Vector(1, upperBound); };
+  Vector getParameterLowerBounds() const { return Vector(1, lowerBound); };
+  
+  void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
+  void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
+
+  /** Persistent interface */
+  virtual void restore ( std::istream & is, int format = 0 )
+  {
+    if (is.good())
+    {
+      is.precision (std::numeric_limits<double>::digits10 + 1);
+      
+      std::string tmp;
+      is >> tmp;
+      is >> upperBound;
+
+      is >> tmp;
+      is >> lowerBound;      
+    }
+    ParameterizedFunction::restore(is);
+  };
+  virtual void store ( std::ostream & os, int format = 0 ) const
+  {
+    if (os.good())
+    {
+      os.precision (std::numeric_limits<double>::digits10 + 1); 
+      os << "upperBound: " << std::endl <<  upperBound << std::endl;
+      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+    }
+    ParameterizedFunction::store(os);
+  };
+  virtual void clear () {};
+  
+  virtual std::string sayYourName() const {return "exp";};
+  
+};
+
+}
+
+#endif

+ 99 - 0
parameterizedFunctions/PFMKL.h

@@ -0,0 +1,99 @@
+/** 
+* @file PFMKL.h
+* @brief Parameterized Function: weights for Multiple Kernel Learning approach (Interface + Implementation)
+* @author Alexander Freytag
+
+*/
+#ifndef _NICE_PFMULTIPLEKERNELLEARNINGINCLUDE
+#define _NICE_PFMULTIPLEKERNELLEARNINGINCLUDE
+
+#include <math.h>
+#include "ParameterizedFunction.h"
+
+namespace NICE {
+  
+ /** 
+ * @class PFMKL
+ * @brief Parameterized Function: weights for Multiple Kernel Learning approach
+ * @author Alexander Freytag
+ */
+ 
+class PFMKL : public ParameterizedFunction
+{
+  protected:
+
+    double upperBound;
+    double lowerBound;
+    std::set<int> steps;
+
+  public:
+
+  PFMKL(    const std::set<int> & _steps,
+            double lB = -std::numeric_limits<double>::max(), 
+            double uB = std::numeric_limits<double>::max() ) : 
+            ParameterizedFunction(_steps.size()+1) 
+  { 
+    upperBound = uB;
+    lowerBound = std::max( lB, 0.0 );
+    if ( uB < 1.0 )
+      m_parameters.set(uB);
+    else
+      m_parameters.set(1.0);
+    steps = _steps;
+  };
+  
+  ~PFMKL(){};
+    
+  double f ( uint index, double x ) const
+  { 
+    int dummyCnt ( 0 );
+    for (std::set<int>::const_iterator it = steps.begin(); it != steps.end(); it++, dummyCnt++)
+    {
+      if ( index < *it)
+        return x * m_parameters[dummyCnt];
+    }
+  }
+
+  bool isOrderPreserving() const { return true; };
+
+  Vector getParameterUpperBounds() const { return Vector(m_parameters.size(), upperBound); };
+  Vector getParameterLowerBounds() const { return Vector(m_parameters.size(), lowerBound); };
+  
+  void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
+  void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
+  
+  /** Persistent interface */
+  virtual void restore ( std::istream & is, int format = 0 )
+  {
+    if (is.good())
+    {
+      is.precision (std::numeric_limits<double>::digits10 + 1);
+      
+      std::string tmp;
+      is >> tmp;
+      is >> upperBound;
+
+      is >> tmp;
+      is >> lowerBound;   
+    }
+    ParameterizedFunction::restore(is);
+  };  
+  virtual void store ( std::ostream & os, int format = 0 ) const
+  {
+    if (os.good())
+    {
+      os.precision (std::numeric_limits<double>::digits10 + 1); 
+      os << "upperBound: " << std::endl <<  upperBound << std::endl;
+      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+    }
+    ParameterizedFunction::store(os);
+  };  
+  virtual void clear () {};
+  
+  virtual std::string sayYourName() const {return "MKL weighting";};
+  
+};
+
+}
+
+#endif

+ 95 - 0
parameterizedFunctions/PFWeightedDim.h

@@ -0,0 +1,95 @@
+/** 
+* @file PFWeightedDim.h
+* @brief Parameterized Function: weights for each dimension (Interface + Implementation)
+* @author Erik Rodner
+
+*/
+#ifndef _NICE_PFWEIGHTEDDIMINCLUDE
+#define _NICE_PFWEIGHTEDDIMINCLUDE
+
+#include <math.h>
+#include "ParameterizedFunction.h"
+
+namespace NICE {
+  
+ /** 
+ * @class PFWeightedDim
+ * @brief Parameterized Function: weights for each dimension
+ * @author Erik Rodner
+ */
+ 
+class PFWeightedDim : public ParameterizedFunction
+{
+  protected:
+
+    double upperBound;
+    double lowerBound;
+    uint dimension;
+
+  public:
+
+  PFWeightedDim( uint dimension, 
+            double lB = -std::numeric_limits<double>::max(), 
+            double uB = std::numeric_limits<double>::max() ) : 
+            ParameterizedFunction(dimension) 
+  { 
+    this->dimension = dimension;
+    upperBound = uB;
+    lowerBound = lB;
+    if ( uB < 1.0 )
+      m_parameters.set(uB);
+    else
+      m_parameters.set(1.0);
+  };
+  
+  ~PFWeightedDim(){};
+    
+  double f ( uint index, double x ) const { return m_parameters[index] * m_parameters[index] * x; }
+
+  bool isOrderPreserving() const { return true; };
+
+  Vector getParameterUpperBounds() const { return Vector(m_parameters.size(), upperBound); };
+  Vector getParameterLowerBounds() const { return Vector(m_parameters.size(), lowerBound); };
+  
+  void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) { if (_newLowerBounds.size() > 0) lowerBound = _newLowerBounds(0);};
+  void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) { if (_newUpperBounds.size() > 0) upperBound = _newUpperBounds(0);};
+  
+  /** Persistent interface */
+  virtual void restore ( std::istream & is, int format = 0 )
+  {
+    if (is.good())
+    {
+      is.precision (std::numeric_limits<double>::digits10 + 1);
+      
+      std::string tmp;
+      is >> tmp;
+      is >> upperBound;
+
+      is >> tmp;
+      is >> lowerBound;   
+      
+      is >> tmp;
+      is >> dimension;
+    }
+    ParameterizedFunction::restore(is);
+  };  
+  virtual void store ( std::ostream & os, int format = 0 ) const
+  {
+    if (os.good())
+    {
+      os.precision (std::numeric_limits<double>::digits10 + 1); 
+      os << "upperBound: " << std::endl <<  upperBound << std::endl;
+      os << "lowerBound: " << std::endl <<  lowerBound << std::endl;
+      os << "dimension: " << std::endl << dimension << std::endl;
+    }
+    ParameterizedFunction::store(os);
+  };  
+  virtual void clear () {};
+  
+  virtual std::string sayYourName() const {return "weightedDim";};
+  
+};
+
+}
+
+#endif

+ 54 - 0
parameterizedFunctions/ParameterizedFunction.cpp

@@ -0,0 +1,54 @@
+/** 
+* @file ParameterizedFunction.cpp
+* @brief Simple parameterized multi-dimensional function (Implementation)
+* @author Erik Rodner
+* @date 01/04/2012
+
+*/
+#include <iostream>
+
+#include "ParameterizedFunction.h"
+
+using namespace NICE;
+using namespace std;
+
+
+ParameterizedFunction::ParameterizedFunction( uint dimension )
+{
+  m_parameters.resize(dimension);
+}
+      
+void ParameterizedFunction::applyFunctionToDataMatrix ( std::vector< std::vector< double > > & dataMatrix ) const
+{
+  // REMARK: might be inefficient due to virtual calls
+  int iCnt(0);
+  for ( vector< vector<double> >::iterator i = dataMatrix.begin() ; i != dataMatrix.end(); i++, iCnt++ )
+  {
+    uint index = 0;
+    for ( vector<double>::iterator j = i->begin(); j != i->end(); j++, index++ )
+    {
+      *j = f ( iCnt, *j );
+    }
+  }
+}
+
+void ParameterizedFunction::restore ( std::istream & is, int format )
+{
+  if (is.good())
+  {
+    is.precision (numeric_limits<double>::digits10 + 1);
+    
+    string tmp;
+    is >> tmp;
+    is >> m_parameters;
+  }
+}
+
+void ParameterizedFunction::store ( std::ostream & os, int format ) const
+{
+  if (os.good())
+  {
+    os.precision (numeric_limits<double>::digits10 + 1); 
+    os << "m_parameters: " << std::endl << m_parameters << std::endl;
+  }
+};

+ 131 - 0
parameterizedFunctions/ParameterizedFunction.h

@@ -0,0 +1,131 @@
+/** 
+* @file ParameterizedFunction.h
+* @brief Simple parameterized multi-dimensional function (Interface)
+* @author Erik Rodner
+* @date 01/04/2012
+*/
+#ifndef _NICE_PARAMETERIZEDFUNCTIONINCLUDE
+#define _NICE_PARAMETERIZEDFUNCTIONINCLUDE
+
+#include <vector>
+#include <limits>
+
+#include "core/basics/Persistent.h"
+#include <core/vector/VectorT.h>
+#include <core/vector/SparseVectorT.h>
+
+namespace NICE {
+  
+/** @class ParameterizedFunction
+ * @brief
+ * simple parameterized multi-dimensional function 
+ * 
+ * @description
+ * current requirements: 
+ * (1) f(0) = 0
+ * (2) f is monotonically increasing
+ *
+ * @author Erik Rodner
+ */
+class ParameterizedFunction : NICE::Persistent
+{
+
+  protected:
+
+    /** parameters of the function */
+    Vector m_parameters;
+
+
+  public:
+
+    /**
+    * @brief contructor taking the dimension of the parameter vector, initializes
+    * the member variable
+    *
+    * @param dimension dimension of the parameter vector
+    */
+    ParameterizedFunction ( uint dimension );
+
+    /** empty destructor */
+    virtual ~ParameterizedFunction () {};
+
+    /**
+    * @brief Function evaluation
+    *
+    * @param index component of the vectoR
+    * @param x function argument
+    *
+    * @return function value, which depends on the stored parameters
+    */
+    virtual double f ( uint index, double x ) const = 0;
+    
+    /**
+    * @brief Tell whether this function is order-preserving in the sense that
+    * a permutation of function values is invariant with respect to the parameter value.
+    * Therefore, the function is either monotonically increasing or decreasing.
+    *
+    * @return boolean flag =true iff the function is order preserving
+    */
+    virtual bool isOrderPreserving () const = 0;
+
+    /**
+    * @brief get the lower bound for each parameter
+    *
+    * @return vector with lower bounds
+    */
+    virtual NICE::Vector getParameterLowerBounds() const = 0;
+    
+    /**
+    * @brief set the lower bounds for each parameter
+    *
+    * @return vector with upper bounds
+    */
+    virtual void setParameterLowerBounds(const NICE::Vector & _newLowerBounds) = 0;
+
+
+    /**
+    * @brief get the upper bounds for each parameter
+    *
+    * @return vector with upper bounds
+    */
+    virtual NICE::Vector getParameterUpperBounds() const = 0;
+    
+    /**
+    * @brief set the upper bounds for each parameter
+    *
+    * @return vector with upper bounds
+    */
+    virtual void setParameterUpperBounds(const NICE::Vector & _newUpperBounds) = 0;
+
+    /**
+    * @brief read-only access of the parameters
+    *
+    * @return const-reference to the stored parameter vector
+    */
+    const NICE::Vector & parameters() const { return m_parameters; }
+
+    /**
+    * @brief read-and-write access to the parameters
+    *
+    * @return reference to the stored parameter vector
+    */
+    Vector & parameters() { return m_parameters; }
+   
+    /**
+    * @brief apply function to a data matrix ( feature vectors stored in rows )
+    *
+    * @param dataMatrix input matrix, e.g. featureMatrix[0][1..d] is the d-dimensional feature vector of example 0
+    */
+    void applyFunctionToDataMatrix ( std::vector< std::vector< double > > & dataMatrix ) const;
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 );
+    virtual void store ( std::ostream & os, int format = 0 ) const;
+    virtual void clear () {};
+    
+    virtual std::string sayYourName() const = 0;
+};
+
+}
+
+#endif

+ 88 - 0
progs/Makefile.inc

@@ -0,0 +1,88 @@
+# BINARY-DIRECTORY-MAKEFILE
+# conventions:
+# - there are no subdirectories, they are ignored!
+# - all ".C", ".cpp" and ".c" files in the current directory are considered
+#   independent binaries, and linked as such.
+# - the binaries depend on the library of the parent directory
+# - the binary names are created with $(BINNAME), i.e. it will be more or less
+#   the name of the .o file
+# - all binaries will be added to the default build list ALL_BINARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+#SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+#include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# include the libdepend.inc file, which gives additional dependencies for the
+# libraries and binaries. additionally, an automatic dependency from the library
+# of the parent directory is added (commented out in the code below).
+
+-include $(SUBDIR)libdepend.inc
+
+PARENTDIR:=$(patsubst %/,%,$(dir $(patsubst %/,%,$(SUBDIR))))
+$(eval $(call PKG_DEPEND_INT,$(PARENTDIR)))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+      $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+BINARIES:=$(patsubst %.o,$(BINDIR)%,$(filter-out moc_%,$(notdir $(OBJS))))
+ALL_BINARIES+=$(BINARIES)
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. each binary depends on the corresponding .o file and
+# on the libraries specified by the INTLIBS/EXTLIBS. these dependencies can be
+# specified manually or they are automatically stored in a .bd file.
+
+$(foreach head,$(wildcard $(SUBDIR)*.h),$(eval $(shell grep -q Q_OBJECT $(head) && echo $(head) | sed -e's@^@/@;s@.*/\(.*\)\.h$$@$(BINDIR)\1:$(OBJDIR)moc_\1.o@')))
+-include $(OBJS:%.o=%.bd)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 274 - 0
progs/completeEvaluationFastMinkernel.cpp

@@ -0,0 +1,274 @@
+/** 
+* @file completeEvaluationFastMinkernel.cpp
+* @brief Demo-Program to show how to call some methods of the FastMinKernel class
+* @author Alexander Freytag
+* @date 12/08/2011
+*/
+
+#include <vector>
+#include <iostream>
+#include <cstdlib>
+#include <ctime>
+
+#include "core/vector/MatrixT.h"
+#include "core/vector/VectorT.h"
+#include "gp-hik-core/FastMinKernel.h"
+#include "gp-hik-core/tools.h"
+#include "gp-hik-core/VectorSorter.h"
+#include "gp-hik-core/FeatureMatrixT.h"
+#include "gp-hik-core/kernels/IntersectionKernelFunction.h"
+
+
+using namespace std;
+using namespace NICE;
+
+/**
+ * @brief Printing main menu.
+ * @author Alexander Freytag
+ * @date 12/06/2011
+ * 
+ * @return void
+ **/
+void print_main_menu()
+{
+  std::cerr << "===============================================================================================================" << std::endl;
+  std::cerr << "|| Welcome to the evaluation programm for our FastMinKernel class                                            ||" << std::endl;
+  std::cerr << "||                                                                                                           ||" << std::endl;  
+  std::cerr << "|| We will run some tests to evaluate the efficiency of our fast computations compared to the baseline ones. ||" << std::endl;
+  std::cerr << "|| Note, that the benefit is larger for higher number of dimensions.                                         ||" << std::endl;
+  std::cerr << "|| Note further, that we randomly sample features, wherefore the results might differ from run to run.       ||" << std::endl;
+  std::cerr << "|| Finally, note that in practical applications the speed-up is larger due to sparse features.             ||" << std::endl;
+  std::cerr << "===============================================================================================================" << std::endl;  
+  
+  
+  std::cout << std::endl << "Input options:" << std::endl;
+  std::cout << "   -n <number>  number of examples to generate (optional)"<< std::endl;
+  std::cout << "   -d <number>  number of dimensions for each example"<< std::endl;
+  std::cout << "   -v 1/0  verbose mode (optional)"<< std::endl;
+  return;
+}
+
+int main (int argc, char* argv[])
+{
+  std::cout.precision(15);
+  std::cerr.precision(15);
+  
+  int nEx (5);
+  int d (10);
+  bool verbose(false);
+  bool nGiven (false);
+  
+  int rc;
+  if (argc<2)
+  {
+    print_main_menu();
+    return -1;
+  }
+  
+  while ((rc=getopt(argc,argv,"n:d:v:h"))>=0)
+  {
+    switch(rc)
+    {
+      case 'n': 
+      {
+        nEx = atoi(optarg); 
+        nGiven = true;
+        break;
+      }
+      case 'd': d = atoi(optarg); break;
+      case 'v': verbose = atoi(optarg); break;
+      default: print_main_menu();
+    }
+  }
+  
+  srand ( time(NULL) );
+
+  std::vector<int> trainingSizes; trainingSizes.clear();
+  std::vector<int> dataDimensions; dataDimensions.clear();
+  std::vector<float> timePreparationEfficiently; timePreparationEfficiently.clear();
+  std::vector<float> timeMultiplicationEfficiently; timeMultiplicationEfficiently.clear();
+  std::vector<float> timePreparationSlowly; timePreparationSlowly.clear();
+  std::vector<float> timeMultiplicationSlowly; timeMultiplicationSlowly.clear();
+  std::vector<float> timeKSumEfficiently; timeKSumEfficiently.clear();
+  std::vector<float> timeKSumSlowly; timeKSumSlowly.clear();
+  std::vector<double> errorsMultiplication; errorsMultiplication.clear();
+  std::vector<double> errorsKSum; errorsKSum.clear();
+  
+  int lower (1000);
+  int upper(10000);
+  int stepSize(1000);
+  if (nGiven)
+  {
+    lower = nEx;
+    upper = nEx;
+  }
+  for (int n = lower; n <= upper; n+=stepSize)
+  {
+    if (verbose)
+      std::cerr << "================================" << std::endl;
+
+    std::cerr << "n: " << n << std::endl;
+    trainingSizes.push_back(n);
+    dataDimensions.push_back(d);
+    
+    //generate random data with specified dimensions and number of examples
+    std::vector<std::vector<double> > rand_feat;
+    generateRandomFeatures(d,n,rand_feat);
+    
+    //transpose the data structure so that it fits to our fastHIK struct
+    std::vector<std::vector<double> > rand_feat_transposed (rand_feat);
+    transposeVectorOfVectors(rand_feat_transposed);
+
+    //generate random alpha vectors
+    Vector alphas;
+    generateRandomFeatures(n, alphas);
+
+    //for these experiments, the noise does not matter
+    double noise (0.0);
+    
+    //---------------- EVALUATE THE RUNTIME needed for initializing both methods (fast-hik vs baseline) ---------------------------
+    
+    time_t  hik_efficient_preparation_start = clock();
+    FastMinKernel fastHIK ( rand_feat, noise );
+    
+    NICE::VVector A; 
+    NICE::VVector B; 
+    
+    fastHIK.hik_prepare_alpha_multiplications(alphas, A, B);
+
+    float time_hik_efficient_preparation = (float) (clock() - hik_efficient_preparation_start);
+    if (verbose)
+    {
+      std::cerr << "Time for HIK efficient preparation: " << time_hik_efficient_preparation/CLOCKS_PER_SEC << std::endl;
+    }
+    
+    timePreparationEfficiently.push_back(time_hik_efficient_preparation/CLOCKS_PER_SEC);
+    
+    //---------------- EVALUATE THE ERROR AND RUNTIME FOR MULTIPLY K \alpha (aka kernel_multiply) ---------------------------
+    
+    Vector beta;
+    //tic
+    time_t  hik_multiply_start = clock();
+    fastHIK.hik_kernel_multiply(A, B, alphas, beta);
+    //toc
+    float time_hik_multiply = (float) (clock() - hik_multiply_start); 
+    if (verbose)
+    {
+      std::cerr << "Time for HIK multiplication: " << time_hik_multiply/CLOCKS_PER_SEC << std::endl;
+    }
+    
+    timeMultiplicationEfficiently.push_back(time_hik_multiply/CLOCKS_PER_SEC);
+
+    NICE::IntersectionKernelFunction<double> hikSlow;
+    //tic
+    time_t  hik_slow_prepare_start = clock();
+    NICE::Matrix K (hikSlow.computeKernelMatrix(rand_feat_transposed, noise));
+    //toc
+    float time_hik_slow_prepare = (float) (clock() - hik_slow_prepare_start); 
+    if (verbose)
+    {
+      std::cerr << "Time for HIK slow preparation of Kernel Matrix: " << time_hik_slow_prepare/CLOCKS_PER_SEC << std::endl;
+    }
+    timePreparationSlowly.push_back(time_hik_slow_prepare/CLOCKS_PER_SEC);
+    
+    time_t  hik_slow_multiply_start = clock();
+    Vector betaSlow = K*alphas;
+    
+    //toc
+    float time_hik_slow_multiply = (float) (clock() - hik_slow_multiply_start); 
+    if (verbose)
+    {
+      std::cerr << "Time for HIK slow multiply: " << time_hik_slow_multiply/CLOCKS_PER_SEC << std::endl;
+    }
+    timeMultiplicationSlowly.push_back(time_hik_slow_multiply/CLOCKS_PER_SEC);
+
+    Vector diff = beta - betaSlow;
+    double error = diff.normL2();
+    
+    errorsMultiplication.push_back(error);
+
+    if (verbose)
+    {
+      std::cerr << "error: " << error << std::endl;
+    }
+    
+    //---------------- EVALUATE THE ERROR AND RUNTIME FOR COMPUTING k_* \alpha (aka kernel_sum) ---------------------------
+    
+    Vector xstar;
+    generateRandomFeatures(d, xstar);
+
+    double kSum;    
+    //tic
+    time_t  hik_ksum_start = clock();
+    fastHIK.hik_kernel_sum(A, B, xstar, kSum);
+    //toc
+    float time_hik_ksum = (float) (clock() - hik_ksum_start); 
+    if (verbose)
+      std::cerr << "Time for HIK efficient kSum: " << time_hik_ksum/CLOCKS_PER_SEC << std::endl;
+    timeKSumEfficiently.push_back(time_hik_ksum/CLOCKS_PER_SEC);
+    
+    if (verbose)
+    {
+      std::cerr << "kSum efficiently: " << kSum << std::endl;
+    }
+    
+    //tic
+    time_t  hik_ksum_slowly_start = clock();
+    std::vector<double> xstar_stl (d, 0.0);
+    for (int i = 0; i < d; i++)
+    {
+      xstar_stl[i] = xstar[i];
+    }
+    
+    Vector kstarSlow ( hikSlow.computeKernelVector(rand_feat_transposed, xstar_stl));
+    xstar.resize(xstar_stl.size());
+    for ( int i = 0 ; i < xstar.size() ; i++ )
+      xstar[i] = xstar_stl[i];
+    double kSumSlowly = alphas.scalarProduct(kstarSlow);
+    
+    //toc
+    float time_hik_slowly_ksum = (float) (clock() - hik_ksum_slowly_start); 
+    if (verbose)
+      std::cerr << "Time for HIK slowly kSum: " << time_hik_slowly_ksum/CLOCKS_PER_SEC << std::endl;
+    timeKSumSlowly.push_back(time_hik_slowly_ksum/CLOCKS_PER_SEC);
+    
+    if (verbose)
+    {
+      std::cerr << "kSumSlowly: " << kSumSlowly << std::endl;
+    }
+    
+    
+    double kSumError( fabs(kSumSlowly - kSum)); 
+    errorsKSum.push_back(kSumError);
+    
+    if (verbose)
+      std::cerr << "kSumError: " << kSumError << std::endl;
+  }
+
+  //---------------- FINAL OUTPUT ---------------------------
+  std::cerr << std::endl <<  "n - d - timePreparationEfficiently - timeMultiplicationEfficiently - timePreparationSlowly - timeMultiplicationSlowly - timeKSumEfficiently - timeKSumSlowly" << std::endl;
+  for (int i = 0; i < (int) trainingSizes.size(); i++)
+  {
+    std::cerr << trainingSizes[i] << " ";
+    std::cerr << dataDimensions[i] << " ";
+    std::cerr << timePreparationEfficiently[i] << " ";
+    std::cerr << timeMultiplicationEfficiently[i] << " ";
+    std::cerr << timePreparationSlowly[i] << " ";
+    std::cerr << timeMultiplicationSlowly[i] << " ";
+    std::cerr << timeKSumEfficiently[i] << " ";
+    std::cerr << timeKSumSlowly[i] << " ";
+    std::cerr << std::endl;
+  }
+  
+  std::cerr << std::endl << "n - d - errorMultiplication - errorsKSum" << std::endl;
+  for (int i = 0; i < (int) trainingSizes.size(); i++)
+  {
+    std::cerr << trainingSizes[i] << " ";
+    std::cerr << dataDimensions[i] << " ";
+    std::cerr << errorsMultiplication[i] << " ";
+    std::cerr << errorsKSum[i] << " ";
+    std::cerr << std::endl;
+  }
+  
+  return 0;
+}

+ 122 - 0
progs/toyExample.cpp

@@ -0,0 +1,122 @@
+/** 
+* @file toyExample.cpp
+* @brief Demo-Program to show how to call some methods of the GPHIKClassifier class
+* @author Alexander Freytag
+* @date 19-10-2012
+*/
+
+#include <iostream>
+#include <vector>
+
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+#include <core/vector/MatrixT.h>
+#include <core/vector/VectorT.h>
+
+#include "gp-hik-core/GPHIKClassifier.h"
+
+using namespace std; //C basics
+using namespace NICE;  // nice-core
+
+int main (int argc, char* argv[])
+{  
+  
+  Config conf ( argc, argv );
+  std::string trainData = conf.gS( "main", "trainData", "progs/toyExampleSmallScaleTrain.data" );
+
+  
+  //------------- read the training data --------------
+  
+  NICE::Matrix dataTrain;
+  NICE::Vector yBinTrain;
+  NICE::Vector yMultiTrain;  
+
+  std::ifstream ifsTrain ( trainData.c_str() , ios::in );
+
+  if (ifsTrain.good() )
+  {
+    ifsTrain >> dataTrain;
+    ifsTrain >> yBinTrain;
+    ifsTrain >> yMultiTrain;
+    ifsTrain.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read training data, aborting." << std::endl;
+    return -1;
+  }
+  
+  //----------------- convert data to sparse data structures ---------
+  std::vector< NICE::SparseVector *> examplesTrain;
+  examplesTrain.resize( dataTrain.rows() );
+  
+  std::vector< NICE::SparseVector *>::iterator exTrainIt = examplesTrain.begin();
+  for (int i = 0; i < (int)dataTrain.rows(); i++, exTrainIt++)
+  {
+    *exTrainIt =  new NICE::SparseVector( dataTrain.getRow(i) );
+  }
+  
+  //----------------- train our classifier -------------
+  conf.sB("GPHIKClassifier", "verbose", false);
+  GPHIKClassifier * classifier  = new GPHIKClassifier ( &conf );  
+  classifier->train ( examplesTrain , yMultiTrain );
+  
+  // ------------------------------------------
+  // ------------- CLASSIFICATION --------------
+  // ------------------------------------------
+  
+  
+  //------------- read the test data --------------
+  
+  NICE::Matrix dataTest;
+  NICE::Vector yBinTest;
+  NICE::Vector yMultiTest;  
+  
+  std::string testData = conf.gS( "main", "testData", "progs/toyExampleTest.data" );  
+  std::ifstream ifsTest ( testData.c_str(), ios::in );
+  if (ifsTest.good() )
+  {
+    ifsTest >> dataTest;
+    ifsTest >> yBinTest;
+    ifsTest >> yMultiTest;
+    ifsTest.close();  
+  }
+  else 
+  {
+    std::cerr << "Unable to read test data, aborting." << std::endl;
+    return -1;
+  }
+  
+  //TODO adapt this to the actual number of classes
+  NICE::Matrix confusionMatrix(3, 3, 0.0);
+  
+  NICE::Timer t;
+  double testTime (0.0);
+  
+  for (int i = 0; i < (int)dataTest.rows(); i++)
+  {
+    //----------------- convert data to sparse data structures ---------
+    NICE::SparseVector * example =  new NICE::SparseVector( dataTest.getRow(i) );
+       
+    int result;
+    NICE::SparseVector scores;
+   
+    // and classify
+    t.start();
+    classifier->classify( example, result, scores );
+    t.stop();
+    testTime += t.getLast();
+    
+    confusionMatrix(result, yMultiTest[i]) += 1.0;
+  }
+  
+  std::cerr << "Time for testing: " << testTime << std::endl;
+  
+  confusionMatrix.normalizeColumnsL1();
+  std::cerr << confusionMatrix << std::endl;
+
+  std::cerr << "average recognition rate: " << confusionMatrix.trace()/confusionMatrix.rows() << std::endl;
+  
+  
+  return 0;
+}

+ 64 - 0
progs/toyExampleSmallScaleTrain.data

@@ -0,0 +1,64 @@
+60 x 49
+0.0259695 0.0105403 0.0110604 0.0160947 0.00950027 0.0188586 0.0118317 0.0126629 0.00929102 0.0130691 0.00773724 0.0117242 0.0186207 0.00753209 0.0112492 0.00827115 0.00863989 0.014142 0.00777097 0.0128102 0.0114344 0.00784775 0.0160248 0.0107051 0.00827794 0.00769294 0.00802757 0.00909618 0.0119556 0.0140857 0.0100281 0.00829676 0.01237 0.00871181 0.01287 0.0101447 0.00948488 0.00805543 0.00947402 0.00882249 0.00796149 0.0172038 0.0114972 0.0125575 0.00895035 0.0116704 0.0145875 0.00764121 0.0151904 
+0.0271022 0.011318 0.00996974 0.0110196 0.00925289 0.0109691 0.0108103 0.0108613 0.0116696 0.00787904 0.0109383 0.00769676 0.00829481 0.00969134 0.00929602 0.0130977 0.0177721 0.0186529 0.00898793 0.00846386 0.00851917 0.0142691 0.00784852 0.0179733 0.00924451 0.0112334 0.0105803 0.013269 0.01111 0.0107221 0.0134643 0.00755481 0.0162522 0.00919093 0.0112732 0.0108264 0.00968506 0.0159471 0.0106169 0.017662 0.00839274 0.0103854 0.00808056 0.011202 0.0145698 0.0101001 0.0143089 0.0134676 0.0107477 
+0.0230633 0.0114753 0.00784154 0.00929423 0.00889608 0.0116286 0.0150744 0.0123752 0.0122833 0.010329 0.0124179 0.0140873 0.0126277 0.0111987 0.0103069 0.00949489 0.0146096 0.00890901 0.00783397 0.0112901 0.013117 0.00811653 0.00767978 0.0122787 0.00962363 0.0200206 0.011309 0.0105445 0.00752052 0.0122045 0.00849636 0.0113249 0.00950693 0.00775034 0.0129373 0.010784 0.0114393 0.0119846 0.0088356 0.00808703 0.0103027 0.0111316 0.0077251 0.0119095 0.00794289 0.0154611 0.0132691 0.0132304 0.0142578 
+0.0239072 0.00923091 0.0108958 0.0105975 0.0148438 0.0115835 0.0139692 0.00877158 0.00861952 0.016659 0.0123196 0.0199786 0.0134272 0.00917523 0.00795431 0.019383 0.0159615 0.0100525 0.00801319 0.0117417 0.00912759 0.0101336 0.00795618 0.0111197 0.0095205 0.0110377 0.0137116 0.0111116 0.0120717 0.0139439 0.0166161 0.0142446 0.00807274 0.00767242 0.0149445 0.0119278 0.013909 0.00800654 0.0104336 0.0131392 0.00808004 0.013861 0.00981833 0.0118265 0.0126384 0.0096132 0.0134302 0.0162862 0.0119054 
+0.0223962 0.0127152 0.00925128 0.0129104 0.00813518 0.0136416 0.0122769 0.0114984 0.0128947 0.00802646 0.00793451 0.00908589 0.0137168 0.00981374 0.0124787 0.0109228 0.0106603 0.0149199 0.00805851 0.00907345 0.011635 0.0144065 0.0131782 0.00814107 0.00788798 0.0169886 0.0138701 0.0125473 0.0075731 0.0134445 0.0179868 0.0129148 0.0148743 0.00902368 0.0182203 0.00754253 0.0160189 0.0109286 0.00852182 0.0159449 0.00850632 0.00917183 0.0176302 0.0150493 0.0106994 0.0141455 0.011174 0.00956637 0.0159048 
+0.0304368 0.00985075 0.00924785 0.00869394 0.0140252 0.0100286 0.0161972 0.0158769 0.0108712 0.00941128 0.0100974 0.0138416 0.00780139 0.00798227 0.0115011 0.0118319 0.0096153 0.00843833 0.0123238 0.00848155 0.0117371 0.01002 0.00968988 0.00976564 0.0102043 0.00838277 0.0199357 0.0101832 0.00840926 0.00891564 0.00995065 0.0111052 0.0101932 0.0129618 0.0135528 0.0115114 0.00822212 0.0105505 0.0133001 0.0105868 0.0126539 0.0100525 0.00810846 0.00946776 0.0081359 0.008567 0.00906365 0.0202011 0.0115662 
+0.0241134 0.0143089 0.00763809 0.0103048 0.0147389 0.0116759 0.0143365 0.00955372 0.0136892 0.0112737 0.0131253 0.0128577 0.0136494 0.00796086 0.0117908 0.0114703 0.0119028 0.00813583 0.0144375 0.00849329 0.0126432 0.0141733 0.0078145 0.0144172 0.00884723 0.0151062 0.008119 0.00825625 0.0101327 0.00828455 0.00826487 0.0114902 0.0122419 0.00858807 0.0128025 0.0150127 0.01324 0.00885035 0.0110488 0.011903 0.00945571 0.017715 0.00826096 0.00909538 0.0103109 0.0098135 0.00834678 0.016477 0.0128185 
+0.0256172 0.00805043 0.0158787 0.0123214 0.0124122 0.013729 0.0137244 0.00867987 0.0135993 0.00762924 0.00890781 0.0144716 0.00900865 0.00791389 0.0147116 0.0107582 0.0110171 0.012628 0.00907872 0.00827167 0.0120265 0.00826881 0.0151552 0.0113008 0.0116872 0.00987471 0.0148357 0.00969207 0.0142194 0.0111191 0.00852744 0.0129664 0.0123777 0.00889003 0.00941833 0.00866721 0.0112646 0.0138308 0.010994 0.00828151 0.0119669 0.0113105 0.0198909 0.0111647 0.0151214 0.00941323 0.0157938 0.00781977 0.0104112 
+0.0234519 0.0128553 0.00967367 0.00816595 0.0104776 0.00946706 0.00879412 0.0133896 0.0180495 0.0107266 0.0163932 0.00757559 0.0105604 0.0166635 0.0135827 0.0134133 0.0101368 0.00822928 0.00839668 0.0129781 0.0084781 0.0108501 0.00910018 0.0083439 0.00794161 0.00977823 0.0155439 0.0195073 0.0100602 0.0105728 0.0139483 0.0114243 0.0191313 0.00922986 0.00896528 0.0127137 0.0100254 0.00847564 0.00751717 0.0118792 0.0112046 0.0140585 0.0108731 0.0103888 0.00972049 0.0172753 0.00852407 0.00983208 0.0120798 
+0.0244513 0.00916373 0.0129416 0.00941483 0.0161396 0.00909459 0.00879067 0.00823309 0.0186154 0.00810004 0.00980972 0.0139805 0.0110224 0.0152112 0.00918473 0.00759198 0.0121576 0.00774559 0.0131761 0.0162813 0.0100207 0.0136941 0.00990834 0.0128233 0.0139276 0.00916473 0.0158321 0.00826871 0.0150633 0.0148289 0.0160133 0.0119603 0.0115088 0.0119952 0.0185482 0.0159011 0.012829 0.00832603 0.013801 0.00838511 0.0113083 0.0130678 0.00842253 0.00917392 0.00795815 0.0149192 0.0113192 0.00929634 0.0113226 
+0.0270594 0.00807159 0.00991026 0.0105547 0.0148077 0.0128059 0.0122153 0.0143011 0.00932909 0.0151779 0.0127106 0.00992389 0.0183903 0.0156485 0.0157064 0.0164128 0.0116402 0.00943215 0.0105756 0.00813119 0.0112977 0.0131093 0.0140546 0.0183521 0.010916 0.0138169 0.0102092 0.013503 0.00970905 0.00830962 0.0181664 0.00841476 0.0101295 0.0111494 0.00986364 0.0178961 0.0174996 0.0137928 0.0163185 0.0157177 0.017789 0.0110789 0.0117625 0.0113533 0.0087363 0.0103995 0.0153326 0.0196703 0.0113684 
+0.024149 0.0105725 0.00887366 0.00989522 0.0153129 0.0112372 0.0173714 0.0107147 0.0101773 0.0167279 0.0090978 0.0079373 0.0103145 0.0152955 0.012309 0.0210111 0.0080108 0.00998822 0.0157671 0.00933217 0.00820574 0.0187734 0.0177674 0.009834 0.00918752 0.0179541 0.0186268 0.00756802 0.0109617 0.00983035 0.0112832 0.00861512 0.0245526 0.00975396 0.0123681 0.0132452 0.00788526 0.0131673 0.018516 0.0118614 0.0147822 0.0079077 0.0167211 0.0108169 0.00820817 0.0117829 0.00824693 0.0122202 0.0116028 
+0.0207545 0.0125084 0.0108505 0.0104894 0.0128691 0.00994967 0.00832289 0.0130178 0.0150846 0.0160362 0.00750938 0.0108468 0.0160085 0.0127219 0.0144578 0.0124182 0.0105225 0.0101767 0.0107503 0.0144781 0.00883719 0.0102866 0.00984082 0.00817868 0.0163922 0.00778454 0.00821095 0.00961263 0.0127874 0.013317 0.0104098 0.0120479 0.0175316 0.00782725 0.00831699 0.0125904 0.015201 0.0105327 0.00952129 0.0111621 0.00871013 0.0141928 0.00771442 0.00870064 0.00921121 0.00832019 0.0095926 0.0152257 0.00842376 
+0.0313984 0.007982 0.00995331 0.00981045 0.00876915 0.0126173 0.00999689 0.00851341 0.00934417 0.00899455 0.0100468 0.0108085 0.00850033 0.0086989 0.0091643 0.00956867 0.0089853 0.00985235 0.0190657 0.010688 0.00827333 0.0099693 0.0134224 0.0174105 0.0152557 0.00929907 0.0161777 0.0150555 0.00806505 0.00850974 0.0096831 0.0104143 0.0118908 0.00930829 0.0125519 0.011415 0.00957852 0.00792941 0.00892285 0.0101932 0.0121507 0.0139159 0.0122315 0.00979685 0.0107431 0.00871917 0.0141024 0.00864975 0.0106822 
+0.0223997 0.0080833 0.0119063 0.0123641 0.00763316 0.0119364 0.00935 0.0110724 0.0114145 0.0095334 0.0119481 0.009164 0.0193016 0.0196916 0.00848646 0.00863559 0.0168786 0.0128225 0.0113463 0.00856433 0.00803072 0.0111494 0.012586 0.0175893 0.0178392 0.0103538 0.00967134 0.00806565 0.0128146 0.0199558 0.0162451 0.0099261 0.0121075 0.0158896 0.00812237 0.00779004 0.00800657 0.0109748 0.00877823 0.00833482 0.015287 0.00981958 0.012355 0.00902026 0.0118177 0.00912929 0.0143999 0.0107564 0.0103279 
+0.0227656 0.00873123 0.00783844 0.0155652 0.00943663 0.0100323 0.00815993 0.0101672 0.0132211 0.0153565 0.0105628 0.0158833 0.0152073 0.00801318 0.0109822 0.00822322 0.0110543 0.0120444 0.00895155 0.0150476 0.012667 0.00974858 0.0103141 0.00822587 0.00752527 0.0115945 0.00912847 0.0111587 0.00892064 0.0105066 0.0152925 0.00898104 0.0084449 0.0135059 0.0115401 0.00794416 0.00754046 0.00796978 0.0101548 0.00950692 0.0128038 0.0151233 0.00849278 0.00925549 0.00973895 0.0134272 0.0143091 0.00965296 0.00872116 
+0.0220867 0.00952122 0.0142567 0.00857123 0.0112302 0.00998641 0.0104499 0.0134628 0.00992557 0.0138055 0.0172484 0.00923038 0.00773323 0.0141641 0.0107527 0.0104726 0.0129019 0.00930548 0.00855496 0.0115993 0.0166085 0.0102857 0.00924597 0.0110356 0.0116808 0.00765342 0.00769771 0.00903775 0.0141177 0.00807407 0.0120771 0.00976994 0.0188901 0.0127345 0.00857711 0.0182242 0.0117 0.0091719 0.0172657 0.00978154 0.00936656 0.0093512 0.0121914 0.0165407 0.00926855 0.0129943 0.00847156 0.00906975 0.0107144 
+0.0219103 0.017343 0.0196978 0.0104879 0.0105515 0.00999576 0.00928981 0.0149162 0.0090834 0.0113828 0.0109014 0.0153987 0.00770247 0.00823608 0.0128081 0.00914814 0.00751693 0.0107139 0.0125536 0.0102118 0.0116546 0.0123145 0.00896101 0.0118348 0.0104389 0.0133669 0.0111383 0.0153073 0.00830141 0.00823326 0.0108762 0.00976553 0.00819337 0.00757586 0.0104232 0.0150566 0.0112633 0.00918437 0.0088266 0.0131901 0.0148984 0.0114526 0.0173006 0.0126395 0.0134126 0.00977013 0.00799374 0.0209565 0.0158383 
+0.024387 0.0100473 0.0085098 0.0110007 0.0102743 0.0145895 0.0142096 0.00932722 0.0115369 0.00849483 0.00933373 0.00822497 0.0101023 0.0129909 0.0124775 0.0171622 0.00770938 0.0136457 0.00845559 0.00977008 0.00957626 0.00809545 0.0189998 0.00803223 0.00904886 0.00753141 0.00798637 0.0117644 0.00897732 0.00867011 0.0091308 0.0118075 0.00975002 0.0132517 0.0159158 0.0167903 0.0115717 0.00884695 0.016522 0.00820418 0.0075003 0.0130556 0.00946251 0.00784477 0.00826967 0.013128 0.0123578 0.0170744 0.0188824 
+0.0288534 0.0134006 0.0110453 0.0140292 0.015017 0.0128366 0.00788706 0.0178822 0.0127398 0.00967744 0.00767562 0.00849709 0.0133257 0.00838418 0.0171635 0.0132189 0.0105086 0.0139051 0.00804162 0.0101824 0.0154833 0.0156085 0.00862076 0.0130732 0.014416 0.00788632 0.0104821 0.0128714 0.00923762 0.010608 0.0136589 0.0134713 0.0138101 0.00986637 0.00790978 0.0112465 0.0159947 0.00901536 0.0101493 0.0134912 0.00978309 0.015527 0.00969498 0.0126513 0.0111776 0.00755319 0.0184152 0.00789488 0.00930304 
+0.00880662 0.0216464 0.0210872 0.0105126 0.0127658 0.0112209 0.0115129 0.0113154 0.011329 0.0134951 0.00901304 0.00848824 0.0177882 0.0190767 0.0136747 0.010352 0.0152489 0.0109627 0.010929 0.0127208 0.0107025 0.0114791 0.00890777 0.0104932 0.0129995 0.0156713 0.00762406 0.0129343 0.0108742 0.0127172 0.0102928 0.00827948 0.0136815 0.0113224 0.00843217 0.0152795 0.0149782 0.007825 0.0109507 0.0137877 0.0127452 0.0136955 0.00991781 0.00827777 0.0179353 0.00886824 0.0141212 0.0168107 0.0173965 
+0.00900718 0.0217041 0.0139392 0.00853973 0.0096694 0.0162534 0.012773 0.00981645 0.00805063 0.00937836 0.0138217 0.0112746 0.0086497 0.0152205 0.00913559 0.0166785 0.0145056 0.00797746 0.00843824 0.00793002 0.0169129 0.00869163 0.0107405 0.00847737 0.01042 0.00827518 0.0117657 0.00869265 0.0109001 0.0162319 0.0165439 0.0132549 0.0102501 0.00996245 0.0114955 0.0101729 0.0117175 0.0174851 0.0104437 0.00795506 0.0126651 0.0108139 0.0143188 0.00871343 0.012538 0.0223205 0.00845874 0.00775098 0.0148045 
+0.0128452 0.0206778 0.00942699 0.0162227 0.0084621 0.00894997 0.00974841 0.0148166 0.00800937 0.0123893 0.00793666 0.0109363 0.0129667 0.011252 0.0132105 0.018058 0.0100507 0.00875572 0.010091 0.0192821 0.00799599 0.0140755 0.0137605 0.00786973 0.0154267 0.0131201 0.0200065 0.0200133 0.00802706 0.0111778 0.0172287 0.0111324 0.00835332 0.00857922 0.00900446 0.0134356 0.0128486 0.00765603 0.0153792 0.0136076 0.00936792 0.0111302 0.00957391 0.00810223 0.0118117 0.0133464 0.0127174 0.0105692 0.00892939 
+0.0113189 0.025128 0.00901293 0.0131072 0.0129681 0.0157613 0.00831601 0.0111685 0.0130465 0.00889493 0.012909 0.0119596 0.00750153 0.0109797 0.0112245 0.0117992 0.014789 0.0134853 0.0105948 0.00783141 0.0103964 0.0106642 0.00827252 0.0112292 0.00756793 0.0107174 0.0139279 0.0112388 0.00919734 0.00932427 0.00860366 0.0131025 0.0195901 0.00827442 0.0112736 0.00896313 0.0156302 0.0116154 0.00805878 0.014369 0.0132608 0.0105323 0.00876208 0.0118269 0.00961405 0.0105055 0.0126735 0.0082246 0.00777274 
+0.0103316 0.031193 0.0100607 0.0125212 0.0160683 0.00981616 0.00925346 0.0075162 0.00980768 0.00900646 0.00890371 0.0136854 0.0145326 0.00856758 0.0126471 0.0137377 0.00961645 0.0078712 0.0150319 0.0114971 0.0110445 0.0115416 0.0126913 0.0116137 0.00759611 0.00829835 0.00931262 0.0165359 0.00807646 0.0112452 0.0129125 0.0138121 0.0144461 0.009263 0.00773189 0.0118874 0.0141273 0.0141659 0.00968382 0.0129926 0.0109789 0.0101879 0.0102743 0.0165389 0.0144569 0.00819232 0.0153246 0.00796191 0.00828517 
+0.0130705 0.0233932 0.010609 0.00792477 0.0102463 0.0121859 0.0156888 0.0116376 0.0167157 0.0187747 0.00783081 0.0121942 0.00987135 0.0127848 0.00864133 0.00867839 0.0102234 0.0201533 0.00969555 0.00895597 0.00927638 0.0105126 0.0103961 0.0104856 0.0136381 0.00759534 0.0106049 0.0110775 0.00906157 0.0190068 0.00763674 0.00796651 0.00811015 0.00867397 0.00900911 0.00931233 0.00754657 0.00751041 0.00936925 0.0128884 0.0142454 0.0183237 0.00776128 0.00834111 0.0163471 0.0145873 0.0105058 0.0106947 0.0119503 
+0.00775893 0.020603 0.0108698 0.00797924 0.00989985 0.00925665 0.0116527 0.0134322 0.0113285 0.0132066 0.0110814 0.016983 0.0160277 0.0102325 0.0121382 0.010984 0.00756652 0.0125121 0.0107692 0.00966968 0.00933033 0.0133006 0.0103347 0.00842852 0.008004 0.00865679 0.0131454 0.00917677 0.0194663 0.0112063 0.00876808 0.00773606 0.0201326 0.0100063 0.00910977 0.0113968 0.00985285 0.00829201 0.0115724 0.00918228 0.00913257 0.016697 0.0110031 0.011874 0.0143604 0.0129367 0.0122788 0.0141067 0.0165299 
+0.0106273 0.0244861 0.00907672 0.0159438 0.0174978 0.0170136 0.0141612 0.0139259 0.0126844 0.0096883 0.0111207 0.00927742 0.00780215 0.0112174 0.00753353 0.00820472 0.00798994 0.0111971 0.0129177 0.0120068 0.0165062 0.00856146 0.00820934 0.0128443 0.0101435 0.0123611 0.00848645 0.0159466 0.0117611 0.00778523 0.0108223 0.00792226 0.0146178 0.0167422 0.00888029 0.0084917 0.0103163 0.0108963 0.0131455 0.009296 0.00776834 0.0101146 0.0101946 0.0158065 0.0104041 0.0162822 0.0133552 0.01034 0.00806414 
+0.00939357 0.0231166 0.00847506 0.0122524 0.012433 0.0093421 0.0143454 0.0101233 0.0107128 0.0135012 0.00833962 0.0104653 0.00775288 0.0140836 0.0107977 0.00846482 0.0120958 0.0160775 0.0106818 0.00906504 0.00925368 0.0121868 0.0145626 0.0136249 0.00756399 0.0135822 0.0108359 0.00928053 0.0115864 0.0130171 0.00915042 0.00795784 0.00875577 0.00889915 0.00949089 0.0081355 0.00818921 0.0152822 0.0107207 0.0125019 0.00785935 0.00807379 0.0133005 0.0153737 0.00844602 0.008706 0.00832958 0.0176599 0.0113929 
+0.00937036 0.0244766 0.00882136 0.0115904 0.0101322 0.0149058 0.0100778 0.019607 0.0131377 0.0130118 0.0124715 0.0103685 0.00807073 0.00928566 0.0108924 0.0103663 0.0151538 0.0140399 0.0135388 0.0116879 0.00862708 0.0103798 0.0149153 0.0200176 0.00864548 0.0102865 0.0123699 0.0148262 0.0103001 0.0126099 0.0152885 0.0123521 0.0193287 0.00835033 0.00942435 0.0101484 0.014357 0.0137524 0.0175744 0.00881443 0.0139454 0.00923441 0.0154462 0.00888159 0.0111433 0.0163142 0.00983176 0.010001 0.00882299 
+0.00899205 0.0217599 0.00892213 0.0160531 0.0110302 0.0103801 0.0153188 0.012857 0.0122882 0.0084038 0.00827598 0.00821238 0.0100706 0.00916037 0.0145212 0.00997158 0.00996835 0.00804101 0.00832772 0.0085908 0.0136335 0.0131631 0.0118268 0.0083565 0.0106843 0.0181602 0.0116033 0.0126854 0.0160203 0.00970096 0.00926267 0.00816163 0.00921396 0.0150989 0.0111025 0.00957662 0.0173483 0.0118917 0.00918161 0.014099 0.0153688 0.0134076 0.0103182 0.013726 0.0180079 0.00983955 0.0120475 0.00939958 0.0153206 
+0.0105588 0.0234418 0.00939131 0.0113621 0.0130088 0.0153581 0.00841515 0.00865124 0.0135816 0.0162395 0.0203847 0.0127909 0.00829511 0.00881781 0.0163614 0.00820557 0.0124668 0.0123785 0.00910737 0.0111985 0.00886406 0.0112144 0.00894312 0.0152993 0.0124651 0.0104476 0.0100528 0.0118992 0.00857994 0.0135462 0.0162855 0.00787341 0.00978844 0.00891056 0.00849655 0.0124034 0.011003 0.00954397 0.00756585 0.00871334 0.0111837 0.0148349 0.0131524 0.016365 0.00985213 0.0115147 0.00986463 0.00915865 0.00835631 
+0.0089894 0.0268708 0.0120246 0.0114489 0.011564 0.0173305 0.00881312 0.0141614 0.013083 0.00862102 0.00989734 0.0151296 0.0138561 0.00752894 0.0108879 0.0181947 0.00986264 0.0106516 0.00949566 0.0129874 0.00763186 0.0100316 0.0109182 0.0146944 0.0102073 0.00973839 0.0161131 0.0085112 0.015397 0.00965817 0.012005 0.00856396 0.0127106 0.018741 0.0150085 0.0141986 0.0113409 0.00952792 0.00785132 0.00985255 0.00927583 0.0122464 0.00818961 0.00821668 0.0136789 0.0129419 0.0121544 0.0104701 0.0158305 
+0.00777133 0.0338468 0.00751626 0.0160865 0.0101277 0.00850899 0.00752594 0.0095737 0.0106809 0.00867979 0.0113247 0.012077 0.00994838 0.00854107 0.0155612 0.0108833 0.00920965 0.00920464 0.010517 0.00900608 0.0148061 0.00965102 0.0105772 0.008064 0.0109489 0.0120811 0.0133097 0.0132957 0.0141527 0.01249 0.0112634 0.00813683 0.0110562 0.0108294 0.00796001 0.0124461 0.0132342 0.010618 0.0143029 0.00839652 0.0109242 0.00987172 0.00908014 0.0119396 0.01279 0.014592 0.00924242 0.00931531 0.00795973 
+0.00822071 0.026167 0.0104337 0.00948422 0.0117414 0.0210517 0.0137955 0.0129675 0.0138387 0.00893837 0.0151963 0.0171065 0.00775741 0.0101589 0.0141498 0.0187153 0.00947764 0.00774628 0.0154054 0.00989548 0.0152872 0.009876 0.01436 0.0128548 0.0133433 0.0126873 0.0165809 0.00761219 0.00873266 0.0176218 0.00890786 0.007627 0.00928098 0.0118897 0.0171019 0.0098913 0.013492 0.0121169 0.011258 0.0122963 0.0108707 0.011473 0.0107655 0.0148202 0.00982258 0.00957453 0.0111155 0.0100285 0.00813544 
+0.0120856 0.026171 0.00989043 0.00808503 0.00865407 0.0113607 0.00909992 0.013825 0.0130143 0.0134441 0.0126256 0.0119627 0.0108432 0.0130967 0.00952968 0.0164724 0.0111507 0.0160875 0.00754196 0.00870893 0.0187831 0.00819476 0.0120628 0.00911874 0.00942726 0.0179704 0.0110552 0.00994693 0.0129673 0.00918318 0.00912005 0.00789547 0.0103583 0.00988415 0.00999427 0.0121406 0.0157666 0.00870823 0.0119195 0.013 0.00982782 0.008185 0.0125848 0.0125986 0.00854327 0.00809625 0.0127336 0.0165125 0.0135715 
+0.0131534 0.0251287 0.011663 0.0103692 0.0140887 0.0106316 0.0132454 0.0104939 0.00926397 0.0120671 0.0103348 0.00877237 0.0225838 0.0107394 0.0126268 0.00957499 0.00950101 0.0147754 0.0101303 0.00940025 0.01413 0.0137589 0.00832324 0.0161982 0.00979315 0.00869996 0.00832121 0.0110396 0.0102091 0.00861289 0.011158 0.00943551 0.00805955 0.0097125 0.013317 0.0124797 0.0093203 0.0134614 0.00954158 0.0125645 0.0152999 0.0152032 0.0124181 0.0149455 0.00942742 0.0148204 0.0132616 0.0178735 0.0104298 
+0.0128681 0.0239653 0.0105898 0.013532 0.0162459 0.0142268 0.0115755 0.0158183 0.0118954 0.0168663 0.0120186 0.0114937 0.00980749 0.0129433 0.00975411 0.0131276 0.010905 0.012303 0.0130469 0.00818084 0.0160419 0.0112622 0.019154 0.010586 0.0114233 0.017696 0.010955 0.0108811 0.00757016 0.00925705 0.0138948 0.0151963 0.0101173 0.01329 0.00948939 0.0121616 0.0129249 0.013639 0.00887597 0.00781413 0.00790878 0.00990957 0.017771 0.00860841 0.0075033 0.0110234 0.0158632 0.0129937 0.0116417 
+0.0111109 0.0294645 0.0120537 0.0111116 0.0117274 0.00853298 0.00787973 0.00964887 0.0130147 0.00973637 0.0131743 0.00921314 0.0122116 0.0117603 0.0154865 0.010949 0.00978931 0.0098883 0.0172717 0.00869153 0.00793727 0.0112716 0.00787567 0.00865546 0.010282 0.0108673 0.0143578 0.0116831 0.0118659 0.00807395 0.00845995 0.0114815 0.00858862 0.00772967 0.00801831 0.0101241 0.0131679 0.00841491 0.0190119 0.0119131 0.00985021 0.00776973 0.0103715 0.0124589 0.0159059 0.00987744 0.0106877 0.00904324 0.0103904 
+0.0207783 0.0260479 0.00997658 0.013466 0.00918262 0.0102103 0.0102057 0.012457 0.00807424 0.0108472 0.00922633 0.0146097 0.0119745 0.011336 0.00792642 0.012814 0.0102497 0.00972321 0.00987833 0.0110496 0.0144665 0.00823416 0.0110999 0.0122386 0.0127554 0.0113965 0.0152782 0.0100133 0.00860568 0.00944747 0.0137201 0.00764315 0.018137 0.0117797 0.00834321 0.0131745 0.00909519 0.0186882 0.0136019 0.00959667 0.0138963 0.0240943 0.00922153 0.00795525 0.0123477 0.00760298 0.00935222 0.0116062 0.0117592 
+0.00820278 0.00823624 0.0260592 0.00903996 0.00971909 0.00966642 0.00893984 0.0117034 0.0099061 0.0177389 0.0132994 0.012128 0.0102907 0.00821357 0.0130841 0.0103626 0.00939324 0.0128043 0.0114336 0.0183887 0.0122977 0.0136871 0.00766289 0.00772549 0.0091006 0.00988621 0.0140285 0.0132475 0.0149558 0.0140575 0.00941845 0.0135572 0.0127171 0.0101749 0.0133492 0.0123623 0.0157588 0.0132736 0.00873428 0.0232314 0.0144387 0.0107137 0.0114545 0.00904013 0.0133339 0.0114084 0.00760248 0.0116109 0.0140074 
+0.009646 0.0177584 0.0211102 0.00960976 0.0100134 0.0109611 0.0153114 0.010387 0.0165983 0.0175927 0.0101028 0.0138415 0.0113752 0.00790347 0.00757291 0.00788049 0.0113011 0.00869304 0.0144222 0.00752977 0.0112919 0.0142053 0.00801023 0.0135512 0.0189808 0.0138823 0.0103193 0.0148885 0.00916007 0.0111217 0.0113887 0.00874071 0.0126873 0.00977365 0.0158879 0.0135432 0.008609 0.0177525 0.00934098 0.00775321 0.0200725 0.0138581 0.00929476 0.0103565 0.0155505 0.0114794 0.0086308 0.00882947 0.0145632 
+0.0121874 0.0119632 0.0205059 0.00978061 0.0117281 0.0104614 0.0106371 0.0119173 0.00801365 0.00928938 0.00869799 0.0130824 0.00826153 0.0113958 0.00902546 0.0158337 0.0127575 0.0108629 0.0108841 0.0122359 0.011006 0.0104098 0.0118249 0.00972292 0.00757526 0.00852472 0.0108496 0.0122731 0.00866414 0.0102172 0.0118904 0.00838573 0.00840182 0.0145641 0.0107593 0.0102488 0.00931913 0.00907936 0.00976757 0.0106548 0.00773216 0.00831631 0.0087845 0.00974962 0.0102509 0.0077432 0.011432 0.00869996 0.00920229 
+0.0106396 0.010933 0.0255497 0.0130523 0.00784461 0.00802316 0.00839757 0.0145396 0.00936429 0.0104733 0.0104434 0.0117082 0.0108046 0.0143165 0.00999919 0.0131738 0.00875868 0.00904078 0.0159884 0.0159185 0.0100566 0.0169482 0.011197 0.00924774 0.0083769 0.00853495 0.00776002 0.0107559 0.0076253 0.0110167 0.0175894 0.00870061 0.0108202 0.015549 0.00798324 0.018877 0.0218267 0.0131679 0.0118787 0.0100219 0.00780855 0.00804774 0.00867327 0.0110219 0.0084428 0.00776008 0.00766939 0.0120955 0.00801992 
+0.00904685 0.00940322 0.0283131 0.0106521 0.00834238 0.0105714 0.0137723 0.0160339 0.0106749 0.0106381 0.0129054 0.0109451 0.0141554 0.0139853 0.00848258 0.0147618 0.00882912 0.0115394 0.0144487 0.0175726 0.00958771 0.0140424 0.01117 0.00765008 0.0151508 0.0153911 0.00774375 0.0158878 0.00870456 0.00851632 0.00798942 0.0155605 0.00763791 0.0151582 0.00907071 0.00860455 0.00927132 0.00840893 0.00814229 0.0173009 0.00972256 0.0114171 0.00879717 0.00768555 0.0100898 0.0108291 0.0117871 0.00967495 0.00888629 
+0.0128385 0.00900563 0.0300644 0.00903157 0.00825902 0.009528 0.0084389 0.00793054 0.0111465 0.0118638 0.00973942 0.0148717 0.0139167 0.00888057 0.010006 0.00958233 0.0114284 0.01154 0.00976147 0.0105533 0.0123792 0.0127316 0.0113913 0.00938501 0.0108933 0.0132494 0.0100134 0.00793871 0.0119074 0.0138443 0.00850183 0.0106248 0.00916611 0.0142109 0.0174247 0.00920319 0.00830646 0.00963313 0.00905203 0.0171472 0.0109715 0.0116792 0.00925263 0.0113661 0.00752427 0.00986478 0.0174423 0.00845846 0.00833033 
+0.00957112 0.0180033 0.0214033 0.00887345 0.0127341 0.0113676 0.00913553 0.0121502 0.00777487 0.00912217 0.00850362 0.0154226 0.0155775 0.00892688 0.0114641 0.00942511 0.0156517 0.015633 0.0160293 0.00924149 0.0129434 0.0100237 0.011184 0.0153932 0.0100437 0.0124477 0.0126745 0.0119709 0.0140577 0.0135944 0.0188757 0.0106933 0.010631 0.0137078 0.0129499 0.0105144 0.0119992 0.0131591 0.0118469 0.0149973 0.0176692 0.0125257 0.00758253 0.0138059 0.0134798 0.0110038 0.0132354 0.0107177 0.0134322 
+0.00812629 0.0102932 0.0261431 0.00963761 0.00906311 0.00865032 0.0109847 0.00928407 0.0077928 0.0135659 0.0130834 0.00843539 0.0185632 0.0078 0.0152633 0.00880949 0.0132717 0.0168182 0.0103695 0.0089427 0.0106294 0.0107249 0.0081818 0.0196271 0.00827723 0.0136525 0.00876158 0.00939363 0.0133215 0.0150743 0.0109293 0.011322 0.0127186 0.00856855 0.0225302 0.0100105 0.00911908 0.0105362 0.0105037 0.00855259 0.0135456 0.00982338 0.0107018 0.00759774 0.0152149 0.00881609 0.0143926 0.00817332 0.00882965 
+0.00933851 0.00938584 0.0231235 0.00996461 0.0157442 0.00896774 0.0112177 0.0102015 0.011268 0.0148865 0.0130086 0.00867837 0.0146227 0.00816429 0.00796441 0.00974237 0.00811556 0.00877084 0.00792424 0.00825762 0.00984428 0.00905027 0.0113013 0.0116487 0.0117235 0.01317 0.015996 0.0129839 0.0081974 0.00819008 0.00809835 0.00812309 0.0135022 0.0105967 0.0129249 0.00907971 0.011666 0.0102498 0.0102823 0.0127665 0.0177145 0.00864585 0.0109531 0.00915439 0.0163722 0.0169408 0.00839886 0.00808527 0.0132475 
+0.0222342 0.0123037 0.0228699 0.0133582 0.0122959 0.00961472 0.0170842 0.0097461 0.00928027 0.0188664 0.0103514 0.0090499 0.0162294 0.0104919 0.0103414 0.0151058 0.0105229 0.00968669 0.0112248 0.00925596 0.0118245 0.0125294 0.00951293 0.0091115 0.0100605 0.0151374 0.0113915 0.0181758 0.0174017 0.010752 0.013117 0.0109705 0.0178813 0.00835953 0.0152218 0.0105508 0.0112765 0.0148581 0.0117385 0.0142863 0.0142671 0.0102961 0.00755827 0.00936552 0.0149442 0.00892664 0.010704 0.00807923 0.0103055 
+0.0129276 0.0104311 0.0252761 0.00811758 0.0125915 0.0113035 0.0140784 0.00822778 0.011812 0.0077716 0.00817863 0.0158648 0.0134115 0.0114055 0.00879775 0.0143018 0.0127297 0.00820451 0.0140901 0.0148652 0.0105412 0.00750298 0.00978503 0.00777211 0.0106125 0.0137089 0.0100961 0.0106955 0.015458 0.0135725 0.00926069 0.00887185 0.00930858 0.00948317 0.0118273 0.0104596 0.0148373 0.0122002 0.0121313 0.00896792 0.00780052 0.0142384 0.0162418 0.0120706 0.0108821 0.00821499 0.00899263 0.0152771 0.00929117 
+0.00852234 0.00940389 0.0225402 0.0101909 0.0154755 0.0132661 0.00775918 0.00946033 0.00824469 0.0076171 0.0168582 0.0149323 0.012522 0.0119527 0.0135063 0.0140917 0.0138473 0.0136181 0.00928085 0.0120208 0.00863997 0.0101098 0.00867349 0.0127716 0.00905366 0.0104012 0.018715 0.0120801 0.0123961 0.00815842 0.0104416 0.00964878 0.0117088 0.00801172 0.0142401 0.0154078 0.0139241 0.0141611 0.00867801 0.0141621 0.0188441 0.00857864 0.00867845 0.0108114 0.00963661 0.0177781 0.0158615 0.00982639 0.0121654 
+0.0121402 0.0154205 0.0252128 0.012044 0.0151337 0.0107913 0.0132634 0.0144782 0.00976324 0.0160855 0.011114 0.0173437 0.0184064 0.00914569 0.00893225 0.00906138 0.0172909 0.0156876 0.0155012 0.0121186 0.00848431 0.0100921 0.00914746 0.0138674 0.00830907 0.00889417 0.00908697 0.0131617 0.00816596 0.00967272 0.0136794 0.0079061 0.0101782 0.0077368 0.0116617 0.00841962 0.0100697 0.0114808 0.0134463 0.0173815 0.00865972 0.0153556 0.0134127 0.00793805 0.0107085 0.00977577 0.0196041 0.0113622 0.0141046 
+0.0107332 0.0143686 0.0226478 0.0120877 0.0212239 0.0123182 0.0106044 0.0117571 0.00806122 0.0081122 0.0101717 0.0117068 0.00929421 0.012269 0.00839687 0.016317 0.00808212 0.0116174 0.010069 0.00762633 0.0118785 0.0170918 0.00906191 0.0167674 0.00790757 0.0109736 0.0122299 0.00811963 0.00960001 0.00925493 0.0123415 0.01325 0.00753833 0.0123176 0.0145587 0.0118021 0.00766919 0.0124873 0.00973067 0.00847831 0.012676 0.0123287 0.0097535 0.0106664 0.0107547 0.0138246 0.0195624 0.0100525 0.00962325 
+0.0119281 0.0122241 0.0242841 0.0101683 0.0130994 0.00879826 0.00823088 0.0114712 0.0161398 0.0100226 0.0100097 0.0125042 0.0138629 0.0117501 0.00756436 0.0144745 0.0136194 0.0124086 0.0114991 0.0143748 0.0181917 0.0136612 0.00962511 0.0103765 0.0101686 0.0113172 0.00870255 0.019323 0.00995193 0.0118664 0.00851607 0.00946212 0.00907452 0.00818943 0.0131628 0.00996086 0.0141944 0.0150947 0.0111328 0.0130214 0.00983337 0.0136919 0.0113602 0.00871084 0.0157483 0.010904 0.0236183 0.0143695 0.011746 
+0.016832 0.0125288 0.0228028 0.0150509 0.00825244 0.0089719 0.012338 0.0111107 0.00807729 0.0130397 0.0134324 0.0135269 0.0137549 0.00965155 0.0143415 0.0147855 0.00917189 0.00983137 0.0147469 0.00907731 0.0109262 0.0137849 0.00978295 0.0082266 0.00806215 0.00863035 0.00929611 0.0115255 0.0100205 0.0107482 0.0104747 0.0106799 0.0162054 0.00826127 0.0137276 0.00833625 0.0130827 0.00891462 0.010127 0.0143162 0.00873058 0.0115233 0.013493 0.0151634 0.00956458 0.00948767 0.0135435 0.0170953 0.00955095 
+0.008911 0.0106027 0.0220302 0.0087342 0.00759654 0.00953132 0.019344 0.0164507 0.00760068 0.0114743 0.010967 0.0113929 0.00926275 0.00968558 0.0181604 0.0111836 0.010156 0.0166397 0.00962995 0.00851972 0.012249 0.00780138 0.0104243 0.0111163 0.0123718 0.00754142 0.00995157 0.00903349 0.0116911 0.0102157 0.00941913 0.0126701 0.0079727 0.0127494 0.016391 0.00818587 0.0112246 0.0139862 0.0083602 0.00781615 0.0123179 0.0201769 0.0151085 0.0138697 0.0108868 0.0123237 0.00820885 0.00938777 0.00885651 
+0.0149063 0.00834672 0.0258672 0.020298 0.00900085 0.0138103 0.00818875 0.0148981 0.0153341 0.00844291 0.00993773 0.0079899 0.0112637 0.0084996 0.00997534 0.0199401 0.0107206 0.0139009 0.00872373 0.00833983 0.0142801 0.0138285 0.0164602 0.0131497 0.00819378 0.00781263 0.0113703 0.00913357 0.0109295 0.0120916 0.0112749 0.0122972 0.0135997 0.0127951 0.0207119 0.0090782 0.0173306 0.0127178 0.0111936 0.00840975 0.0121481 0.0086159 0.00999495 0.00944532 0.0110877 0.0114007 0.00954282 0.00847484 0.0157283 
+0.0136951 0.00947508 0.0253767 0.00785013 0.00987724 0.0116808 0.00820833 0.00857517 0.0110849 0.0106047 0.00791872 0.0129586 0.0109218 0.0113774 0.0118702 0.0124808 0.0104141 0.0133277 0.0157505 0.0120121 0.00832354 0.0150398 0.0145849 0.0106571 0.00781995 0.00948235 0.00791023 0.0130278 0.0177706 0.00934947 0.00917786 0.00976387 0.011106 0.00872163 0.0125572 0.00872901 0.00833814 0.0119969 0.0114674 0.00899175 0.00761545 0.0152811 0.0113137 0.0112011 0.0155478 0.0090728 0.00907604 0.00936913 0.00997295 
+0.00950312 0.0137425 0.0227265 0.0127662 0.0137664 0.00990442 0.00792798 0.0123088 0.00768171 0.0101829 0.0113718 0.0184991 0.0102974 0.0118027 0.00956921 0.0102662 0.0093895 0.0101815 0.00963158 0.00755334 0.0108035 0.0190889 0.0148255 0.0130755 0.0107963 0.0128566 0.018184 0.00967175 0.0103734 0.0109859 0.0101677 0.0172851 0.0137611 0.00897412 0.0156099 0.00973854 0.00879859 0.0107113 0.0115928 0.0120723 0.00812917 0.0140335 0.0103966 0.00821714 0.00988885 0.00879647 0.00862479 0.0178016 0.0108844 
+
+60 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+60 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 >

+ 154 - 0
progs/toyExampleTest.data

@@ -0,0 +1,154 @@
+150 x 49
+0.0259695 0.0105403 0.0110604 0.0160947 0.00950027 0.0188586 0.0118317 0.0126629 0.00929102 0.0130691 0.00773724 0.0117242 0.0186207 0.00753209 0.0112492 0.00827115 0.00863989 0.014142 0.00777097 0.0128102 0.0114344 0.00784775 0.0160248 0.0107051 0.00827794 0.00769294 0.00802757 0.00909618 0.0119556 0.0140857 0.0100281 0.00829676 0.01237 0.00871181 0.01287 0.0101447 0.00948488 0.00805543 0.00947402 0.00882249 0.00796149 0.0172038 0.0114972 0.0125575 0.00895035 0.0116704 0.0145875 0.00764121 0.0151904 
+0.0271022 0.011318 0.00996974 0.0110196 0.00925289 0.0109691 0.0108103 0.0108613 0.0116696 0.00787904 0.0109383 0.00769676 0.00829481 0.00969134 0.00929602 0.0130977 0.0177721 0.0186529 0.00898793 0.00846386 0.00851917 0.0142691 0.00784852 0.0179733 0.00924451 0.0112334 0.0105803 0.013269 0.01111 0.0107221 0.0134643 0.00755481 0.0162522 0.00919093 0.0112732 0.0108264 0.00968506 0.0159471 0.0106169 0.017662 0.00839274 0.0103854 0.00808056 0.011202 0.0145698 0.0101001 0.0143089 0.0134676 0.0107477 
+0.0230633 0.0114753 0.00784154 0.00929423 0.00889608 0.0116286 0.0150744 0.0123752 0.0122833 0.010329 0.0124179 0.0140873 0.0126277 0.0111987 0.0103069 0.00949489 0.0146096 0.00890901 0.00783397 0.0112901 0.013117 0.00811653 0.00767978 0.0122787 0.00962363 0.0200206 0.011309 0.0105445 0.00752052 0.0122045 0.00849636 0.0113249 0.00950693 0.00775034 0.0129373 0.010784 0.0114393 0.0119846 0.0088356 0.00808703 0.0103027 0.0111316 0.0077251 0.0119095 0.00794289 0.0154611 0.0132691 0.0132304 0.0142578 
+0.0239072 0.00923091 0.0108958 0.0105975 0.0148438 0.0115835 0.0139692 0.00877158 0.00861952 0.016659 0.0123196 0.0199786 0.0134272 0.00917523 0.00795431 0.019383 0.0159615 0.0100525 0.00801319 0.0117417 0.00912759 0.0101336 0.00795618 0.0111197 0.0095205 0.0110377 0.0137116 0.0111116 0.0120717 0.0139439 0.0166161 0.0142446 0.00807274 0.00767242 0.0149445 0.0119278 0.013909 0.00800654 0.0104336 0.0131392 0.00808004 0.013861 0.00981833 0.0118265 0.0126384 0.0096132 0.0134302 0.0162862 0.0119054 
+0.0223962 0.0127152 0.00925128 0.0129104 0.00813518 0.0136416 0.0122769 0.0114984 0.0128947 0.00802646 0.00793451 0.00908589 0.0137168 0.00981374 0.0124787 0.0109228 0.0106603 0.0149199 0.00805851 0.00907345 0.011635 0.0144065 0.0131782 0.00814107 0.00788798 0.0169886 0.0138701 0.0125473 0.0075731 0.0134445 0.0179868 0.0129148 0.0148743 0.00902368 0.0182203 0.00754253 0.0160189 0.0109286 0.00852182 0.0159449 0.00850632 0.00917183 0.0176302 0.0150493 0.0106994 0.0141455 0.011174 0.00956637 0.0159048 
+0.0304368 0.00985075 0.00924785 0.00869394 0.0140252 0.0100286 0.0161972 0.0158769 0.0108712 0.00941128 0.0100974 0.0138416 0.00780139 0.00798227 0.0115011 0.0118319 0.0096153 0.00843833 0.0123238 0.00848155 0.0117371 0.01002 0.00968988 0.00976564 0.0102043 0.00838277 0.0199357 0.0101832 0.00840926 0.00891564 0.00995065 0.0111052 0.0101932 0.0129618 0.0135528 0.0115114 0.00822212 0.0105505 0.0133001 0.0105868 0.0126539 0.0100525 0.00810846 0.00946776 0.0081359 0.008567 0.00906365 0.0202011 0.0115662 
+0.0241134 0.0143089 0.00763809 0.0103048 0.0147389 0.0116759 0.0143365 0.00955372 0.0136892 0.0112737 0.0131253 0.0128577 0.0136494 0.00796086 0.0117908 0.0114703 0.0119028 0.00813583 0.0144375 0.00849329 0.0126432 0.0141733 0.0078145 0.0144172 0.00884723 0.0151062 0.008119 0.00825625 0.0101327 0.00828455 0.00826487 0.0114902 0.0122419 0.00858807 0.0128025 0.0150127 0.01324 0.00885035 0.0110488 0.011903 0.00945571 0.017715 0.00826096 0.00909538 0.0103109 0.0098135 0.00834678 0.016477 0.0128185 
+0.0256172 0.00805043 0.0158787 0.0123214 0.0124122 0.013729 0.0137244 0.00867987 0.0135993 0.00762924 0.00890781 0.0144716 0.00900865 0.00791389 0.0147116 0.0107582 0.0110171 0.012628 0.00907872 0.00827167 0.0120265 0.00826881 0.0151552 0.0113008 0.0116872 0.00987471 0.0148357 0.00969207 0.0142194 0.0111191 0.00852744 0.0129664 0.0123777 0.00889003 0.00941833 0.00866721 0.0112646 0.0138308 0.010994 0.00828151 0.0119669 0.0113105 0.0198909 0.0111647 0.0151214 0.00941323 0.0157938 0.00781977 0.0104112 
+0.0234519 0.0128553 0.00967367 0.00816595 0.0104776 0.00946706 0.00879412 0.0133896 0.0180495 0.0107266 0.0163932 0.00757559 0.0105604 0.0166635 0.0135827 0.0134133 0.0101368 0.00822928 0.00839668 0.0129781 0.0084781 0.0108501 0.00910018 0.0083439 0.00794161 0.00977823 0.0155439 0.0195073 0.0100602 0.0105728 0.0139483 0.0114243 0.0191313 0.00922986 0.00896528 0.0127137 0.0100254 0.00847564 0.00751717 0.0118792 0.0112046 0.0140585 0.0108731 0.0103888 0.00972049 0.0172753 0.00852407 0.00983208 0.0120798 
+0.0244513 0.00916373 0.0129416 0.00941483 0.0161396 0.00909459 0.00879067 0.00823309 0.0186154 0.00810004 0.00980972 0.0139805 0.0110224 0.0152112 0.00918473 0.00759198 0.0121576 0.00774559 0.0131761 0.0162813 0.0100207 0.0136941 0.00990834 0.0128233 0.0139276 0.00916473 0.0158321 0.00826871 0.0150633 0.0148289 0.0160133 0.0119603 0.0115088 0.0119952 0.0185482 0.0159011 0.012829 0.00832603 0.013801 0.00838511 0.0113083 0.0130678 0.00842253 0.00917392 0.00795815 0.0149192 0.0113192 0.00929634 0.0113226 
+0.0270594 0.00807159 0.00991026 0.0105547 0.0148077 0.0128059 0.0122153 0.0143011 0.00932909 0.0151779 0.0127106 0.00992389 0.0183903 0.0156485 0.0157064 0.0164128 0.0116402 0.00943215 0.0105756 0.00813119 0.0112977 0.0131093 0.0140546 0.0183521 0.010916 0.0138169 0.0102092 0.013503 0.00970905 0.00830962 0.0181664 0.00841476 0.0101295 0.0111494 0.00986364 0.0178961 0.0174996 0.0137928 0.0163185 0.0157177 0.017789 0.0110789 0.0117625 0.0113533 0.0087363 0.0103995 0.0153326 0.0196703 0.0113684 
+0.024149 0.0105725 0.00887366 0.00989522 0.0153129 0.0112372 0.0173714 0.0107147 0.0101773 0.0167279 0.0090978 0.0079373 0.0103145 0.0152955 0.012309 0.0210111 0.0080108 0.00998822 0.0157671 0.00933217 0.00820574 0.0187734 0.0177674 0.009834 0.00918752 0.0179541 0.0186268 0.00756802 0.0109617 0.00983035 0.0112832 0.00861512 0.0245526 0.00975396 0.0123681 0.0132452 0.00788526 0.0131673 0.018516 0.0118614 0.0147822 0.0079077 0.0167211 0.0108169 0.00820817 0.0117829 0.00824693 0.0122202 0.0116028 
+0.0207545 0.0125084 0.0108505 0.0104894 0.0128691 0.00994967 0.00832289 0.0130178 0.0150846 0.0160362 0.00750938 0.0108468 0.0160085 0.0127219 0.0144578 0.0124182 0.0105225 0.0101767 0.0107503 0.0144781 0.00883719 0.0102866 0.00984082 0.00817868 0.0163922 0.00778454 0.00821095 0.00961263 0.0127874 0.013317 0.0104098 0.0120479 0.0175316 0.00782725 0.00831699 0.0125904 0.015201 0.0105327 0.00952129 0.0111621 0.00871013 0.0141928 0.00771442 0.00870064 0.00921121 0.00832019 0.0095926 0.0152257 0.00842376 
+0.0313984 0.007982 0.00995331 0.00981045 0.00876915 0.0126173 0.00999689 0.00851341 0.00934417 0.00899455 0.0100468 0.0108085 0.00850033 0.0086989 0.0091643 0.00956867 0.0089853 0.00985235 0.0190657 0.010688 0.00827333 0.0099693 0.0134224 0.0174105 0.0152557 0.00929907 0.0161777 0.0150555 0.00806505 0.00850974 0.0096831 0.0104143 0.0118908 0.00930829 0.0125519 0.011415 0.00957852 0.00792941 0.00892285 0.0101932 0.0121507 0.0139159 0.0122315 0.00979685 0.0107431 0.00871917 0.0141024 0.00864975 0.0106822 
+0.0223997 0.0080833 0.0119063 0.0123641 0.00763316 0.0119364 0.00935 0.0110724 0.0114145 0.0095334 0.0119481 0.009164 0.0193016 0.0196916 0.00848646 0.00863559 0.0168786 0.0128225 0.0113463 0.00856433 0.00803072 0.0111494 0.012586 0.0175893 0.0178392 0.0103538 0.00967134 0.00806565 0.0128146 0.0199558 0.0162451 0.0099261 0.0121075 0.0158896 0.00812237 0.00779004 0.00800657 0.0109748 0.00877823 0.00833482 0.015287 0.00981958 0.012355 0.00902026 0.0118177 0.00912929 0.0143999 0.0107564 0.0103279 
+0.0227656 0.00873123 0.00783844 0.0155652 0.00943663 0.0100323 0.00815993 0.0101672 0.0132211 0.0153565 0.0105628 0.0158833 0.0152073 0.00801318 0.0109822 0.00822322 0.0110543 0.0120444 0.00895155 0.0150476 0.012667 0.00974858 0.0103141 0.00822587 0.00752527 0.0115945 0.00912847 0.0111587 0.00892064 0.0105066 0.0152925 0.00898104 0.0084449 0.0135059 0.0115401 0.00794416 0.00754046 0.00796978 0.0101548 0.00950692 0.0128038 0.0151233 0.00849278 0.00925549 0.00973895 0.0134272 0.0143091 0.00965296 0.00872116 
+0.0220867 0.00952122 0.0142567 0.00857123 0.0112302 0.00998641 0.0104499 0.0134628 0.00992557 0.0138055 0.0172484 0.00923038 0.00773323 0.0141641 0.0107527 0.0104726 0.0129019 0.00930548 0.00855496 0.0115993 0.0166085 0.0102857 0.00924597 0.0110356 0.0116808 0.00765342 0.00769771 0.00903775 0.0141177 0.00807407 0.0120771 0.00976994 0.0188901 0.0127345 0.00857711 0.0182242 0.0117 0.0091719 0.0172657 0.00978154 0.00936656 0.0093512 0.0121914 0.0165407 0.00926855 0.0129943 0.00847156 0.00906975 0.0107144 
+0.0219103 0.017343 0.0196978 0.0104879 0.0105515 0.00999576 0.00928981 0.0149162 0.0090834 0.0113828 0.0109014 0.0153987 0.00770247 0.00823608 0.0128081 0.00914814 0.00751693 0.0107139 0.0125536 0.0102118 0.0116546 0.0123145 0.00896101 0.0118348 0.0104389 0.0133669 0.0111383 0.0153073 0.00830141 0.00823326 0.0108762 0.00976553 0.00819337 0.00757586 0.0104232 0.0150566 0.0112633 0.00918437 0.0088266 0.0131901 0.0148984 0.0114526 0.0173006 0.0126395 0.0134126 0.00977013 0.00799374 0.0209565 0.0158383 
+0.024387 0.0100473 0.0085098 0.0110007 0.0102743 0.0145895 0.0142096 0.00932722 0.0115369 0.00849483 0.00933373 0.00822497 0.0101023 0.0129909 0.0124775 0.0171622 0.00770938 0.0136457 0.00845559 0.00977008 0.00957626 0.00809545 0.0189998 0.00803223 0.00904886 0.00753141 0.00798637 0.0117644 0.00897732 0.00867011 0.0091308 0.0118075 0.00975002 0.0132517 0.0159158 0.0167903 0.0115717 0.00884695 0.016522 0.00820418 0.0075003 0.0130556 0.00946251 0.00784477 0.00826967 0.013128 0.0123578 0.0170744 0.0188824 
+0.0288534 0.0134006 0.0110453 0.0140292 0.015017 0.0128366 0.00788706 0.0178822 0.0127398 0.00967744 0.00767562 0.00849709 0.0133257 0.00838418 0.0171635 0.0132189 0.0105086 0.0139051 0.00804162 0.0101824 0.0154833 0.0156085 0.00862076 0.0130732 0.014416 0.00788632 0.0104821 0.0128714 0.00923762 0.010608 0.0136589 0.0134713 0.0138101 0.00986637 0.00790978 0.0112465 0.0159947 0.00901536 0.0101493 0.0134912 0.00978309 0.015527 0.00969498 0.0126513 0.0111776 0.00755319 0.0184152 0.00789488 0.00930304 
+0.0217148 0.0087382 0.0210872 0.0105126 0.0127658 0.0112209 0.0115129 0.0113154 0.011329 0.0134951 0.00901304 0.00848824 0.0177882 0.0190767 0.0136747 0.010352 0.0152489 0.0109627 0.010929 0.0127208 0.0107025 0.0114791 0.00890777 0.0104932 0.0129995 0.0156713 0.00762406 0.0129343 0.0108742 0.0127172 0.0102928 0.00827948 0.0136815 0.0113224 0.00843217 0.0152795 0.0149782 0.007825 0.0109507 0.0137877 0.0127452 0.0136955 0.00991781 0.00827777 0.0179353 0.00886824 0.0141212 0.0168107 0.0173965 
+0.0219153 0.00879596 0.0139392 0.00853973 0.0096694 0.0162534 0.012773 0.00981645 0.00805063 0.00937836 0.0138217 0.0112746 0.0086497 0.0152205 0.00913559 0.0166785 0.0145056 0.00797746 0.00843824 0.00793002 0.0169129 0.00869163 0.0107405 0.00847737 0.01042 0.00827518 0.0117657 0.00869265 0.0109001 0.0162319 0.0165439 0.0132549 0.0102501 0.00996245 0.0114955 0.0101729 0.0117175 0.0174851 0.0104437 0.00795506 0.0126651 0.0108139 0.0143188 0.00871343 0.012538 0.0223205 0.00845874 0.00775098 0.0148045 
+0.0257533 0.00776964 0.00942699 0.0162227 0.0084621 0.00894997 0.00974841 0.0148166 0.00800937 0.0123893 0.00793666 0.0109363 0.0129667 0.011252 0.0132105 0.018058 0.0100507 0.00875572 0.010091 0.0192821 0.00799599 0.0140755 0.0137605 0.00786973 0.0154267 0.0131201 0.0200065 0.0200133 0.00802706 0.0111778 0.0172287 0.0111324 0.00835332 0.00857922 0.00900446 0.0134356 0.0128486 0.00765603 0.0153792 0.0136076 0.00936792 0.0111302 0.00957391 0.00810223 0.0118117 0.0133464 0.0127174 0.0105692 0.00892939 
+0.024227 0.0122199 0.00901293 0.0131072 0.0129681 0.0157613 0.00831601 0.0111685 0.0130465 0.00889493 0.012909 0.0119596 0.00750153 0.0109797 0.0112245 0.0117992 0.014789 0.0134853 0.0105948 0.00783141 0.0103964 0.0106642 0.00827252 0.0112292 0.00756793 0.0107174 0.0139279 0.0112388 0.00919734 0.00932427 0.00860366 0.0131025 0.0195901 0.00827442 0.0112736 0.00896313 0.0156302 0.0116154 0.00805878 0.014369 0.0132608 0.0105323 0.00876208 0.0118269 0.00961405 0.0105055 0.0126735 0.0082246 0.00777274 
+0.0232398 0.0182848 0.0100607 0.0125212 0.0160683 0.00981616 0.00925346 0.0075162 0.00980768 0.00900646 0.00890371 0.0136854 0.0145326 0.00856758 0.0126471 0.0137377 0.00961645 0.0078712 0.0150319 0.0114971 0.0110445 0.0115416 0.0126913 0.0116137 0.00759611 0.00829835 0.00931262 0.0165359 0.00807646 0.0112452 0.0129125 0.0138121 0.0144461 0.009263 0.00773189 0.0118874 0.0141273 0.0141659 0.00968382 0.0129926 0.0109789 0.0101879 0.0102743 0.0165389 0.0144569 0.00819232 0.0153246 0.00796191 0.00828517 
+0.0259787 0.010485 0.010609 0.00792477 0.0102463 0.0121859 0.0156888 0.0116376 0.0167157 0.0187747 0.00783081 0.0121942 0.00987135 0.0127848 0.00864133 0.00867839 0.0102234 0.0201533 0.00969555 0.00895597 0.00927638 0.0105126 0.0103961 0.0104856 0.0136381 0.00759534 0.0106049 0.0110775 0.00906157 0.0190068 0.00763674 0.00796651 0.00811015 0.00867397 0.00900911 0.00931233 0.00754657 0.00751041 0.00936925 0.0128884 0.0142454 0.0183237 0.00776128 0.00834111 0.0163471 0.0145873 0.0105058 0.0106947 0.0119503 
+0.0206671 0.00769482 0.0108698 0.00797924 0.00989985 0.00925665 0.0116527 0.0134322 0.0113285 0.0132066 0.0110814 0.016983 0.0160277 0.0102325 0.0121382 0.010984 0.00756652 0.0125121 0.0107692 0.00966968 0.00933033 0.0133006 0.0103347 0.00842852 0.008004 0.00865679 0.0131454 0.00917677 0.0194663 0.0112063 0.00876808 0.00773606 0.0201326 0.0100063 0.00910977 0.0113968 0.00985285 0.00829201 0.0115724 0.00918228 0.00913257 0.016697 0.0110031 0.011874 0.0143604 0.0129367 0.0122788 0.0141067 0.0165299 
+0.0235355 0.0115779 0.00907672 0.0159438 0.0174978 0.0170136 0.0141612 0.0139259 0.0126844 0.0096883 0.0111207 0.00927742 0.00780215 0.0112174 0.00753353 0.00820472 0.00798994 0.0111971 0.0129177 0.0120068 0.0165062 0.00856146 0.00820934 0.0128443 0.0101435 0.0123611 0.00848645 0.0159466 0.0117611 0.00778523 0.0108223 0.00792226 0.0146178 0.0167422 0.00888029 0.0084917 0.0103163 0.0108963 0.0131455 0.009296 0.00776834 0.0101146 0.0101946 0.0158065 0.0104041 0.0162822 0.0133552 0.01034 0.00806414 
+0.0223017 0.0102084 0.00847506 0.0122524 0.012433 0.0093421 0.0143454 0.0101233 0.0107128 0.0135012 0.00833962 0.0104653 0.00775288 0.0140836 0.0107977 0.00846482 0.0120958 0.0160775 0.0106818 0.00906504 0.00925368 0.0121868 0.0145626 0.0136249 0.00756399 0.0135822 0.0108359 0.00928053 0.0115864 0.0130171 0.00915042 0.00795784 0.00875577 0.00889915 0.00949089 0.0081355 0.00818921 0.0152822 0.0107207 0.0125019 0.00785935 0.00807379 0.0133005 0.0153737 0.00844602 0.008706 0.00832958 0.0176599 0.0113929 
+0.0222785 0.0115685 0.00882136 0.0115904 0.0101322 0.0149058 0.0100778 0.019607 0.0131377 0.0130118 0.0124715 0.0103685 0.00807073 0.00928566 0.0108924 0.0103663 0.0151538 0.0140399 0.0135388 0.0116879 0.00862708 0.0103798 0.0149153 0.0200176 0.00864548 0.0102865 0.0123699 0.0148262 0.0103001 0.0126099 0.0152885 0.0123521 0.0193287 0.00835033 0.00942435 0.0101484 0.014357 0.0137524 0.0175744 0.00881443 0.0139454 0.00923441 0.0154462 0.00888159 0.0111433 0.0163142 0.00983176 0.010001 0.00882299 
+0.0219002 0.00885172 0.00892213 0.0160531 0.0110302 0.0103801 0.0153188 0.012857 0.0122882 0.0084038 0.00827598 0.00821238 0.0100706 0.00916037 0.0145212 0.00997158 0.00996835 0.00804101 0.00832772 0.0085908 0.0136335 0.0131631 0.0118268 0.0083565 0.0106843 0.0181602 0.0116033 0.0126854 0.0160203 0.00970096 0.00926267 0.00816163 0.00921396 0.0150989 0.0111025 0.00957662 0.0173483 0.0118917 0.00918161 0.014099 0.0153688 0.0134076 0.0103182 0.013726 0.0180079 0.00983955 0.0120475 0.00939958 0.0153206 
+0.023467 0.0105336 0.00939131 0.0113621 0.0130088 0.0153581 0.00841515 0.00865124 0.0135816 0.0162395 0.0203847 0.0127909 0.00829511 0.00881781 0.0163614 0.00820557 0.0124668 0.0123785 0.00910737 0.0111985 0.00886406 0.0112144 0.00894312 0.0152993 0.0124651 0.0104476 0.0100528 0.0118992 0.00857994 0.0135462 0.0162855 0.00787341 0.00978844 0.00891056 0.00849655 0.0124034 0.011003 0.00954397 0.00756585 0.00871334 0.0111837 0.0148349 0.0131524 0.016365 0.00985213 0.0115147 0.00986463 0.00915865 0.00835631 
+0.0218976 0.0139627 0.0120246 0.0114489 0.011564 0.0173305 0.00881312 0.0141614 0.013083 0.00862102 0.00989734 0.0151296 0.0138561 0.00752894 0.0108879 0.0181947 0.00986264 0.0106516 0.00949566 0.0129874 0.00763186 0.0100316 0.0109182 0.0146944 0.0102073 0.00973839 0.0161131 0.0085112 0.015397 0.00965817 0.012005 0.00856396 0.0127106 0.018741 0.0150085 0.0141986 0.0113409 0.00952792 0.00785132 0.00985255 0.00927583 0.0122464 0.00818961 0.00821668 0.0136789 0.0129419 0.0121544 0.0104701 0.0158305 
+0.0206795 0.0209386 0.00751626 0.0160865 0.0101277 0.00850899 0.00752594 0.0095737 0.0106809 0.00867979 0.0113247 0.012077 0.00994838 0.00854107 0.0155612 0.0108833 0.00920965 0.00920464 0.010517 0.00900608 0.0148061 0.00965102 0.0105772 0.008064 0.0109489 0.0120811 0.0133097 0.0132957 0.0141527 0.01249 0.0112634 0.00813683 0.0110562 0.0108294 0.00796001 0.0124461 0.0132342 0.010618 0.0143029 0.00839652 0.0109242 0.00987172 0.00908014 0.0119396 0.01279 0.014592 0.00924242 0.00931531 0.00795973 
+0.0211289 0.0132589 0.0104337 0.00948422 0.0117414 0.0210517 0.0137955 0.0129675 0.0138387 0.00893837 0.0151963 0.0171065 0.00775741 0.0101589 0.0141498 0.0187153 0.00947764 0.00774628 0.0154054 0.00989548 0.0152872 0.009876 0.01436 0.0128548 0.0133433 0.0126873 0.0165809 0.00761219 0.00873266 0.0176218 0.00890786 0.007627 0.00928098 0.0118897 0.0171019 0.0098913 0.013492 0.0121169 0.011258 0.0122963 0.0108707 0.011473 0.0107655 0.0148202 0.00982258 0.00957453 0.0111155 0.0100285 0.00813544 
+0.0249937 0.0132628 0.00989043 0.00808503 0.00865407 0.0113607 0.00909992 0.013825 0.0130143 0.0134441 0.0126256 0.0119627 0.0108432 0.0130967 0.00952968 0.0164724 0.0111507 0.0160875 0.00754196 0.00870893 0.0187831 0.00819476 0.0120628 0.00911874 0.00942726 0.0179704 0.0110552 0.00994693 0.0129673 0.00918318 0.00912005 0.00789547 0.0103583 0.00988415 0.00999427 0.0121406 0.0157666 0.00870823 0.0119195 0.013 0.00982782 0.008185 0.0125848 0.0125986 0.00854327 0.00809625 0.0127336 0.0165125 0.0135715 
+0.0260616 0.0122205 0.011663 0.0103692 0.0140887 0.0106316 0.0132454 0.0104939 0.00926397 0.0120671 0.0103348 0.00877237 0.0225838 0.0107394 0.0126268 0.00957499 0.00950101 0.0147754 0.0101303 0.00940025 0.01413 0.0137589 0.00832324 0.0161982 0.00979315 0.00869996 0.00832121 0.0110396 0.0102091 0.00861289 0.011158 0.00943551 0.00805955 0.0097125 0.013317 0.0124797 0.0093203 0.0134614 0.00954158 0.0125645 0.0152999 0.0152032 0.0124181 0.0149455 0.00942742 0.0148204 0.0132616 0.0178735 0.0104298 
+0.0257762 0.0110571 0.0105898 0.013532 0.0162459 0.0142268 0.0115755 0.0158183 0.0118954 0.0168663 0.0120186 0.0114937 0.00980749 0.0129433 0.00975411 0.0131276 0.010905 0.012303 0.0130469 0.00818084 0.0160419 0.0112622 0.019154 0.010586 0.0114233 0.017696 0.010955 0.0108811 0.00757016 0.00925705 0.0138948 0.0151963 0.0101173 0.01329 0.00948939 0.0121616 0.0129249 0.013639 0.00887597 0.00781413 0.00790878 0.00990957 0.017771 0.00860841 0.0075033 0.0110234 0.0158632 0.0129937 0.0116417 
+0.0240191 0.0165563 0.0120537 0.0111116 0.0117274 0.00853298 0.00787973 0.00964887 0.0130147 0.00973637 0.0131743 0.00921314 0.0122116 0.0117603 0.0154865 0.010949 0.00978931 0.0098883 0.0172717 0.00869153 0.00793727 0.0112716 0.00787567 0.00865546 0.010282 0.0108673 0.0143578 0.0116831 0.0118659 0.00807395 0.00845995 0.0114815 0.00858862 0.00772967 0.00801831 0.0101241 0.0131679 0.00841491 0.0190119 0.0119131 0.00985021 0.00776973 0.0103715 0.0124589 0.0159059 0.00987744 0.0106877 0.00904324 0.0103904 
+0.0336864 0.0131397 0.00997658 0.013466 0.00918262 0.0102103 0.0102057 0.012457 0.00807424 0.0108472 0.00922633 0.0146097 0.0119745 0.011336 0.00792642 0.012814 0.0102497 0.00972321 0.00987833 0.0110496 0.0144665 0.00823416 0.0110999 0.0122386 0.0127554 0.0113965 0.0152782 0.0100133 0.00860568 0.00944747 0.0137201 0.00764315 0.018137 0.0117797 0.00834321 0.0131745 0.00909519 0.0186882 0.0136019 0.00959667 0.0138963 0.0240943 0.00922153 0.00795525 0.0123477 0.00760298 0.00935222 0.0116062 0.0117592 
+0.0211109 0.00823624 0.013151 0.00903996 0.00971909 0.00966642 0.00893984 0.0117034 0.0099061 0.0177389 0.0132994 0.012128 0.0102907 0.00821357 0.0130841 0.0103626 0.00939324 0.0128043 0.0114336 0.0183887 0.0122977 0.0136871 0.00766289 0.00772549 0.0091006 0.00988621 0.0140285 0.0132475 0.0149558 0.0140575 0.00941845 0.0135572 0.0127171 0.0101749 0.0133492 0.0123623 0.0157588 0.0132736 0.00873428 0.0232314 0.0144387 0.0107137 0.0114545 0.00904013 0.0133339 0.0114084 0.00760248 0.0116109 0.0140074 
+0.0225542 0.0177584 0.00820206 0.00960976 0.0100134 0.0109611 0.0153114 0.010387 0.0165983 0.0175927 0.0101028 0.0138415 0.0113752 0.00790347 0.00757291 0.00788049 0.0113011 0.00869304 0.0144222 0.00752977 0.0112919 0.0142053 0.00801023 0.0135512 0.0189808 0.0138823 0.0103193 0.0148885 0.00916007 0.0111217 0.0113887 0.00874071 0.0126873 0.00977365 0.0158879 0.0135432 0.008609 0.0177525 0.00934098 0.00775321 0.0200725 0.0138581 0.00929476 0.0103565 0.0155505 0.0114794 0.0086308 0.00882947 0.0145632 
+0.0250956 0.0119632 0.00759771 0.00978061 0.0117281 0.0104614 0.0106371 0.0119173 0.00801365 0.00928938 0.00869799 0.0130824 0.00826153 0.0113958 0.00902546 0.0158337 0.0127575 0.0108629 0.0108841 0.0122359 0.011006 0.0104098 0.0118249 0.00972292 0.00757526 0.00852472 0.0108496 0.0122731 0.00866414 0.0102172 0.0118904 0.00838573 0.00840182 0.0145641 0.0107593 0.0102488 0.00931913 0.00907936 0.00976757 0.0106548 0.00773216 0.00831631 0.0087845 0.00974962 0.0102509 0.0077432 0.011432 0.00869996 0.00920229 
+0.0235478 0.010933 0.0126415 0.0130523 0.00784461 0.00802316 0.00839757 0.0145396 0.00936429 0.0104733 0.0104434 0.0117082 0.0108046 0.0143165 0.00999919 0.0131738 0.00875868 0.00904078 0.0159884 0.0159185 0.0100566 0.0169482 0.011197 0.00924774 0.0083769 0.00853495 0.00776002 0.0107559 0.0076253 0.0110167 0.0175894 0.00870061 0.0108202 0.015549 0.00798324 0.018877 0.0218267 0.0131679 0.0118787 0.0100219 0.00780855 0.00804774 0.00867327 0.0110219 0.0084428 0.00776008 0.00766939 0.0120955 0.00801992 
+0.021955 0.00940322 0.0154049 0.0106521 0.00834238 0.0105714 0.0137723 0.0160339 0.0106749 0.0106381 0.0129054 0.0109451 0.0141554 0.0139853 0.00848258 0.0147618 0.00882912 0.0115394 0.0144487 0.0175726 0.00958771 0.0140424 0.01117 0.00765008 0.0151508 0.0153911 0.00774375 0.0158878 0.00870456 0.00851632 0.00798942 0.0155605 0.00763791 0.0151582 0.00907071 0.00860455 0.00927132 0.00840893 0.00814229 0.0173009 0.00972256 0.0114171 0.00879717 0.00768555 0.0100898 0.0108291 0.0117871 0.00967495 0.00888629 
+0.0257467 0.00900563 0.0171562 0.00903157 0.00825902 0.009528 0.0084389 0.00793054 0.0111465 0.0118638 0.00973942 0.0148717 0.0139167 0.00888057 0.010006 0.00958233 0.0114284 0.01154 0.00976147 0.0105533 0.0123792 0.0127316 0.0113913 0.00938501 0.0108933 0.0132494 0.0100134 0.00793871 0.0119074 0.0138443 0.00850183 0.0106248 0.00916611 0.0142109 0.0174247 0.00920319 0.00830646 0.00963313 0.00905203 0.0171472 0.0109715 0.0116792 0.00925263 0.0113661 0.00752427 0.00986478 0.0174423 0.00845846 0.00833033 
+0.0224793 0.0180033 0.00849516 0.00887345 0.0127341 0.0113676 0.00913553 0.0121502 0.00777487 0.00912217 0.00850362 0.0154226 0.0155775 0.00892688 0.0114641 0.00942511 0.0156517 0.015633 0.0160293 0.00924149 0.0129434 0.0100237 0.011184 0.0153932 0.0100437 0.0124477 0.0126745 0.0119709 0.0140577 0.0135944 0.0188757 0.0106933 0.010631 0.0137078 0.0129499 0.0105144 0.0119992 0.0131591 0.0118469 0.0149973 0.0176692 0.0125257 0.00758253 0.0138059 0.0134798 0.0110038 0.0132354 0.0107177 0.0134322 
+0.0210345 0.0102932 0.0132349 0.00963761 0.00906311 0.00865032 0.0109847 0.00928407 0.0077928 0.0135659 0.0130834 0.00843539 0.0185632 0.0078 0.0152633 0.00880949 0.0132717 0.0168182 0.0103695 0.0089427 0.0106294 0.0107249 0.0081818 0.0196271 0.00827723 0.0136525 0.00876158 0.00939363 0.0133215 0.0150743 0.0109293 0.011322 0.0127186 0.00856855 0.0225302 0.0100105 0.00911908 0.0105362 0.0105037 0.00855259 0.0135456 0.00982338 0.0107018 0.00759774 0.0152149 0.00881609 0.0143926 0.00817332 0.00882965 
+0.0222467 0.00938584 0.0102153 0.00996461 0.0157442 0.00896774 0.0112177 0.0102015 0.011268 0.0148865 0.0130086 0.00867837 0.0146227 0.00816429 0.00796441 0.00974237 0.00811556 0.00877084 0.00792424 0.00825762 0.00984428 0.00905027 0.0113013 0.0116487 0.0117235 0.01317 0.015996 0.0129839 0.0081974 0.00819008 0.00809835 0.00812309 0.0135022 0.0105967 0.0129249 0.00907971 0.011666 0.0102498 0.0102823 0.0127665 0.0177145 0.00864585 0.0109531 0.00915439 0.0163722 0.0169408 0.00839886 0.00808527 0.0132475 
+0.0351424 0.0123037 0.00996178 0.0133582 0.0122959 0.00961472 0.0170842 0.0097461 0.00928027 0.0188664 0.0103514 0.0090499 0.0162294 0.0104919 0.0103414 0.0151058 0.0105229 0.00968669 0.0112248 0.00925596 0.0118245 0.0125294 0.00951293 0.0091115 0.0100605 0.0151374 0.0113915 0.0181758 0.0174017 0.010752 0.013117 0.0109705 0.0178813 0.00835953 0.0152218 0.0105508 0.0112765 0.0148581 0.0117385 0.0142863 0.0142671 0.0102961 0.00755827 0.00936552 0.0149442 0.00892664 0.010704 0.00807923 0.0103055 
+0.0129276 0.0233393 0.0123679 0.00811758 0.0125915 0.0113035 0.0140784 0.00822778 0.011812 0.0077716 0.00817863 0.0158648 0.0134115 0.0114055 0.00879775 0.0143018 0.0127297 0.00820451 0.0140901 0.0148652 0.0105412 0.00750298 0.00978503 0.00777211 0.0106125 0.0137089 0.0100961 0.0106955 0.015458 0.0135725 0.00926069 0.00887185 0.00930858 0.00948317 0.0118273 0.0104596 0.0148373 0.0122002 0.0121313 0.00896792 0.00780052 0.0142384 0.0162418 0.0120706 0.0108821 0.00821499 0.00899263 0.0152771 0.00929117 
+0.00852234 0.0223121 0.00963208 0.0101909 0.0154755 0.0132661 0.00775918 0.00946033 0.00824469 0.0076171 0.0168582 0.0149323 0.012522 0.0119527 0.0135063 0.0140917 0.0138473 0.0136181 0.00928085 0.0120208 0.00863997 0.0101098 0.00867349 0.0127716 0.00905366 0.0104012 0.018715 0.0120801 0.0123961 0.00815842 0.0104416 0.00964878 0.0117088 0.00801172 0.0142401 0.0154078 0.0139241 0.0141611 0.00867801 0.0141621 0.0188441 0.00857864 0.00867845 0.0108114 0.00963661 0.0177781 0.0158615 0.00982639 0.0121654 
+0.0121402 0.0283287 0.0123046 0.012044 0.0151337 0.0107913 0.0132634 0.0144782 0.00976324 0.0160855 0.011114 0.0173437 0.0184064 0.00914569 0.00893225 0.00906138 0.0172909 0.0156876 0.0155012 0.0121186 0.00848431 0.0100921 0.00914746 0.0138674 0.00830907 0.00889417 0.00908697 0.0131617 0.00816596 0.00967272 0.0136794 0.0079061 0.0101782 0.0077368 0.0116617 0.00841962 0.0100697 0.0114808 0.0134463 0.0173815 0.00865972 0.0153556 0.0134127 0.00793805 0.0107085 0.00977577 0.0196041 0.0113622 0.0141046 
+0.0107332 0.0272767 0.00973959 0.0120877 0.0212239 0.0123182 0.0106044 0.0117571 0.00806122 0.0081122 0.0101717 0.0117068 0.00929421 0.012269 0.00839687 0.016317 0.00808212 0.0116174 0.010069 0.00762633 0.0118785 0.0170918 0.00906191 0.0167674 0.00790757 0.0109736 0.0122299 0.00811963 0.00960001 0.00925493 0.0123415 0.01325 0.00753833 0.0123176 0.0145587 0.0118021 0.00766919 0.0124873 0.00973067 0.00847831 0.012676 0.0123287 0.0097535 0.0106664 0.0107547 0.0138246 0.0195624 0.0100525 0.00962325 
+0.0119281 0.0251323 0.0113759 0.0101683 0.0130994 0.00879826 0.00823088 0.0114712 0.0161398 0.0100226 0.0100097 0.0125042 0.0138629 0.0117501 0.00756436 0.0144745 0.0136194 0.0124086 0.0114991 0.0143748 0.0181917 0.0136612 0.00962511 0.0103765 0.0101686 0.0113172 0.00870255 0.019323 0.00995193 0.0118664 0.00851607 0.00946212 0.00907452 0.00818943 0.0131628 0.00996086 0.0141944 0.0150947 0.0111328 0.0130214 0.00983337 0.0136919 0.0113602 0.00871084 0.0157483 0.010904 0.0236183 0.0143695 0.011746 
+0.016832 0.025437 0.0098946 0.0150509 0.00825244 0.0089719 0.012338 0.0111107 0.00807729 0.0130397 0.0134324 0.0135269 0.0137549 0.00965155 0.0143415 0.0147855 0.00917189 0.00983137 0.0147469 0.00907731 0.0109262 0.0137849 0.00978295 0.0082266 0.00806215 0.00863035 0.00929611 0.0115255 0.0100205 0.0107482 0.0104747 0.0106799 0.0162054 0.00826127 0.0137276 0.00833625 0.0130827 0.00891462 0.010127 0.0143162 0.00873058 0.0115233 0.013493 0.0151634 0.00956458 0.00948767 0.0135435 0.0170953 0.00955095 
+0.008911 0.0235109 0.00912204 0.0087342 0.00759654 0.00953132 0.019344 0.0164507 0.00760068 0.0114743 0.010967 0.0113929 0.00926275 0.00968558 0.0181604 0.0111836 0.010156 0.0166397 0.00962995 0.00851972 0.012249 0.00780138 0.0104243 0.0111163 0.0123718 0.00754142 0.00995157 0.00903349 0.0116911 0.0102157 0.00941913 0.0126701 0.0079727 0.0127494 0.016391 0.00818587 0.0112246 0.0139862 0.0083602 0.00781615 0.0123179 0.0201769 0.0151085 0.0138697 0.0108868 0.0123237 0.00820885 0.00938777 0.00885651 
+0.0149063 0.0212549 0.012959 0.020298 0.00900085 0.0138103 0.00818875 0.0148981 0.0153341 0.00844291 0.00993773 0.0079899 0.0112637 0.0084996 0.00997534 0.0199401 0.0107206 0.0139009 0.00872373 0.00833983 0.0142801 0.0138285 0.0164602 0.0131497 0.00819378 0.00781263 0.0113703 0.00913357 0.0109295 0.0120916 0.0112749 0.0122972 0.0135997 0.0127951 0.0207119 0.0090782 0.0173306 0.0127178 0.0111936 0.00840975 0.0121481 0.0086159 0.00999495 0.00944532 0.0110877 0.0114007 0.00954282 0.00847484 0.0157283 
+0.0136951 0.0223832 0.0124686 0.00785013 0.00987724 0.0116808 0.00820833 0.00857517 0.0110849 0.0106047 0.00791872 0.0129586 0.0109218 0.0113774 0.0118702 0.0124808 0.0104141 0.0133277 0.0157505 0.0120121 0.00832354 0.0150398 0.0145849 0.0106571 0.00781995 0.00948235 0.00791023 0.0130278 0.0177706 0.00934947 0.00917786 0.00976387 0.011106 0.00872163 0.0125572 0.00872901 0.00833814 0.0119969 0.0114674 0.00899175 0.00761545 0.0152811 0.0113137 0.0112011 0.0155478 0.0090728 0.00907604 0.00936913 0.00997295 
+0.00950312 0.0266506 0.00981835 0.0127662 0.0137664 0.00990442 0.00792798 0.0123088 0.00768171 0.0101829 0.0113718 0.0184991 0.0102974 0.0118027 0.00956921 0.0102662 0.0093895 0.0101815 0.00963158 0.00755334 0.0108035 0.0190889 0.0148255 0.0130755 0.0107963 0.0128566 0.018184 0.00967175 0.0103734 0.0109859 0.0101677 0.0172851 0.0137611 0.00897412 0.0156099 0.00973854 0.00879859 0.0107113 0.0115928 0.0120723 0.00812917 0.0140335 0.0103966 0.00821714 0.00988885 0.00879647 0.00862479 0.0178016 0.0108844 
+0.00931047 0.0212717 0.0108262 0.0127634 0.0145661 0.00913274 0.00856461 0.00937716 0.0198238 0.00896952 0.0135313 0.00771199 0.0120329 0.0100075 0.0106133 0.00874778 0.0167496 0.00823572 0.00935717 0.00824935 0.0168475 0.00921845 0.00827007 0.0123167 0.00973159 0.00828868 0.00947891 0.0157224 0.0131671 0.00849454 0.0186665 0.00985705 0.00901789 0.0103838 0.00960163 0.014531 0.00818125 0.0126618 0.0158882 0.0115781 0.00928472 0.010543 0.0105983 0.0163608 0.00812475 0.00812854 0.0150405 0.00956537 0.0112847 
+0.0133507 0.0222575 0.0161251 0.0119533 0.0144081 0.0108558 0.0114905 0.0142285 0.0144361 0.00921056 0.00770848 0.00862711 0.0103536 0.00787416 0.00814622 0.00792632 0.0127709 0.0129858 0.0107349 0.0148318 0.00797624 0.00755296 0.00969787 0.0101482 0.00783036 0.017277 0.0134317 0.00821771 0.00886024 0.0104904 0.0176322 0.0121712 0.020734 0.0100178 0.00845033 0.0106503 0.00899612 0.0113405 0.0108297 0.0138385 0.0157567 0.0112884 0.0111557 0.0132281 0.00975073 0.014763 0.0136806 0.0146974 0.0131973 
+0.00757317 0.0316546 0.00916651 0.0139182 0.0100605 0.0148793 0.0132188 0.0123282 0.013338 0.0145837 0.00844133 0.0127421 0.013383 0.00855457 0.0122084 0.0159186 0.00929636 0.00840363 0.011132 0.00982903 0.0123923 0.0103834 0.00992641 0.0109025 0.0101771 0.0114342 0.0235065 0.00930357 0.0146577 0.0136719 0.0112625 0.0153417 0.0109942 0.0109348 0.0136389 0.0123666 0.00907794 0.00952423 0.0104219 0.0224332 0.00993761 0.0173772 0.012886 0.0150848 0.011544 0.00961976 0.00860173 0.0131099 0.0118314 
+0.0120723 0.0255761 0.0112617 0.01382 0.0132065 0.0130526 0.0215032 0.010341 0.00897479 0.0114267 0.0101744 0.0139777 0.0102806 0.010422 0.00785908 0.0123871 0.00796796 0.0106122 0.00882741 0.0149923 0.0127838 0.00863863 0.0122057 0.0079143 0.0122751 0.0116304 0.00776789 0.010707 0.0120565 0.00975561 0.00916567 0.0170236 0.00930545 0.00808708 0.0121516 0.00843588 0.0096937 0.00829772 0.0113528 0.0201463 0.0133134 0.0139057 0.00758521 0.00771479 0.0170862 0.012938 0.0088074 0.0134293 0.0132683 
+0.00803278 0.0209172 0.00757131 0.0113841 0.01333 0.010207 0.00833083 0.0115314 0.00901439 0.0172958 0.00783639 0.0117991 0.00905356 0.0187385 0.0125364 0.0124897 0.00777334 0.00762111 0.00764487 0.0100852 0.0112004 0.00766297 0.011141 0.011336 0.0116206 0.0153244 0.0110559 0.0111093 0.0191496 0.00825756 0.00835087 0.00920095 0.0086951 0.00975748 0.00775385 0.00784449 0.0105598 0.0146987 0.0149052 0.0099039 0.00847096 0.00765091 0.00821896 0.0133852 0.0151837 0.00769759 0.0084672 0.00772901 0.0108049 
+0.0109955 0.0249024 0.0101091 0.00983563 0.00974612 0.0156559 0.0156009 0.0117273 0.00984906 0.00939121 0.0091798 0.0079146 0.00850079 0.0120497 0.0192706 0.00796791 0.0100349 0.00803927 0.00944602 0.0123078 0.0188128 0.0121223 0.0138691 0.0171691 0.0190786 0.00755588 0.0131079 0.0122201 0.0131311 0.00985387 0.0129397 0.00832782 0.00770978 0.0146897 0.0085377 0.0093607 0.0136283 0.00861774 0.0119357 0.00754826 0.00933493 0.0165091 0.0150146 0.0129198 0.00897361 0.0110699 0.010965 0.00913158 0.0094458 
+0.00816776 0.0255658 0.0108657 0.016728 0.0186649 0.00843576 0.00754841 0.0118891 0.0167045 0.00990853 0.00915239 0.0145487 0.0115878 0.0171883 0.0120623 0.0104243 0.0156599 0.0133134 0.00761177 0.0082783 0.00844763 0.0192286 0.0158587 0.0111308 0.0165118 0.008163 0.0112963 0.0105155 0.00872855 0.0145013 0.00883721 0.00903406 0.0113706 0.013245 0.0101755 0.0113627 0.00977604 0.00805712 0.0135069 0.0075737 0.0138943 0.0169186 0.0117394 0.0116851 0.0101898 0.00902569 0.00750745 0.0104441 0.0180939 
+0.00852116 0.0205726 0.0121222 0.0149015 0.0154244 0.0121064 0.0109069 0.0118412 0.0111598 0.00890083 0.00910357 0.00800454 0.00882927 0.0133648 0.0176747 0.00950788 0.00813835 0.00806194 0.0166886 0.0144441 0.0104103 0.0146281 0.0100274 0.0149547 0.00857742 0.009651 0.00830572 0.0122816 0.0108633 0.0082782 0.0141194 0.0094251 0.0115744 0.00767372 0.0124238 0.00836088 0.0103497 0.0103135 0.00950646 0.0139942 0.00843365 0.00868824 0.00761651 0.013486 0.0103854 0.01307 0.0100172 0.00912684 0.00886648 
+0.0129786 0.0289352 0.00844202 0.0107492 0.011887 0.00897751 0.0107938 0.00837794 0.00976195 0.0118045 0.0125038 0.00778407 0.0152679 0.00939623 0.0104466 0.0149055 0.0114223 0.0165696 0.0162068 0.0150471 0.00937458 0.0120342 0.00796215 0.00998259 0.0107024 0.0128497 0.00851376 0.0148639 0.00797989 0.0115649 0.0105127 0.0177058 0.00805555 0.0109158 0.0128019 0.0157836 0.0116487 0.0133724 0.0135865 0.0131214 0.0112112 0.0115187 0.0121206 0.0104741 0.015845 0.00882153 0.0111644 0.0139008 0.0116919 
+0.00971783 0.0229114 0.0128805 0.0158851 0.0152331 0.0124343 0.00779956 0.0124177 0.0105913 0.0126525 0.0141405 0.0128456 0.0110557 0.0107332 0.00822904 0.0100937 0.00954246 0.0124383 0.0128021 0.0089498 0.0117146 0.0163783 0.0152088 0.0155538 0.0101817 0.0108035 0.00916592 0.00766908 0.0132058 0.0115911 0.00851837 0.0150466 0.0152156 0.0208577 0.00839355 0.0097649 0.00816572 0.0139933 0.0124919 0.0141569 0.0139173 0.00758375 0.0133833 0.0100039 0.0092417 0.0150639 0.011603 0.016477 0.00799832 
+0.00790319 0.0235324 0.0111015 0.00967112 0.0174059 0.0109606 0.00799718 0.0125871 0.00852947 0.00817606 0.00753688 0.00934729 0.00901239 0.0083186 0.0115995 0.0111184 0.0128168 0.0170916 0.0140882 0.00984907 0.0203378 0.00933781 0.00873858 0.0169729 0.0124777 0.0121524 0.0134012 0.00895315 0.0130562 0.0123688 0.020535 0.007508 0.0156554 0.0117507 0.0156376 0.0104359 0.00988385 0.0104138 0.0108033 0.0123854 0.0102539 0.00805637 0.00824154 0.0169302 0.00815921 0.0112258 0.0111159 0.00820837 0.0173984 
+0.00838806 0.0253994 0.0164964 0.0153526 0.00792645 0.0142281 0.00823136 0.00793983 0.00896964 0.0125574 0.0084571 0.0114111 0.0103704 0.0115691 0.0117722 0.0105804 0.0136371 0.0147888 0.00789175 0.0116784 0.0118744 0.0103136 0.00823018 0.0119564 0.00910947 0.00955901 0.0107229 0.00882304 0.00809067 0.00863467 0.0158661 0.0128813 0.011772 0.00854266 0.013148 0.00895431 0.0164438 0.0112154 0.0157801 0.00851084 0.0146646 0.0104425 0.0123866 0.0101705 0.00863098 0.0105372 0.0190253 0.0105951 0.0120388 
+0.0126034 0.0248546 0.0105986 0.015903 0.0083998 0.0117779 0.0090602 0.0127347 0.01536 0.0162639 0.0141152 0.0124823 0.0123341 0.0163151 0.0103473 0.00849348 0.00778947 0.0170963 0.00797977 0.0108984 0.0117723 0.0113179 0.00856268 0.00920236 0.0170649 0.00845961 0.0111051 0.0125911 0.0125433 0.0120349 0.0164834 0.0151292 0.0137128 0.0111898 0.00755945 0.0119485 0.0173044 0.0139533 0.00882071 0.00964128 0.00760202 0.011044 0.00769635 0.0111013 0.00780582 0.0110244 0.010414 0.00832692 0.0165455 
+0.00973368 0.0213454 0.00933092 0.00870622 0.00874845 0.0126756 0.00966286 0.0126159 0.0104429 0.0100714 0.00799636 0.0120667 0.0149694 0.00789823 0.00942509 0.0126152 0.0108931 0.0138853 0.0129878 0.0120057 0.014164 0.00845237 0.0188999 0.0101422 0.00995276 0.00941929 0.010031 0.00774711 0.0128427 0.0128063 0.0120045 0.0134737 0.00866746 0.00988377 0.01029 0.0146154 0.0109321 0.0106991 0.0123186 0.00965139 0.0144665 0.0137031 0.0128053 0.0171564 0.0106439 0.00871359 0.00933303 0.00854897 0.00801469 
+0.0207639 0.0302596 0.00921102 0.0112939 0.00895949 0.0195714 0.0082511 0.00851709 0.0201907 0.0104338 0.00789831 0.00779601 0.0116592 0.0115129 0.00933967 0.0106008 0.00993621 0.00977606 0.00969845 0.00859994 0.010113 0.0101853 0.00768342 0.00829759 0.00896101 0.00914328 0.0121011 0.00789938 0.0114143 0.00778448 0.00930274 0.0110046 0.0151578 0.0115392 0.0119695 0.00916508 0.0113961 0.00925488 0.0119853 0.0115414 0.00800942 0.0085351 0.013696 0.00978607 0.0117781 0.00836317 0.0115831 0.00784782 0.00988281 
+0.0125919 0.0225245 0.00872785 0.0079511 0.010217 0.0138643 0.0128534 0.0106925 0.0134129 0.0123817 0.0148298 0.0159778 0.0112608 0.0126049 0.00781257 0.00897772 0.00770906 0.0101846 0.00861081 0.0173156 0.0113365 0.0206655 0.00952747 0.00824274 0.0130284 0.00934343 0.0162803 0.0106074 0.0105148 0.0114925 0.00898429 0.0081491 0.00924381 0.00865159 0.0085172 0.0127236 0.0155168 0.0151714 0.0129874 0.00857607 0.00787606 0.0125513 0.0118531 0.0168491 0.00761251 0.00816883 0.0097344 0.0145175 0.00858486 
+0.0116113 0.0273119 0.0139355 0.0114967 0.0115408 0.0161081 0.0152266 0.00898215 0.0111644 0.00940009 0.00859884 0.0155261 0.0139528 0.0109696 0.0111752 0.00762303 0.0168868 0.00788242 0.0123671 0.0113137 0.0150454 0.00905556 0.012208 0.0104372 0.0109824 0.00886656 0.00773069 0.0139625 0.0166492 0.0121905 0.0106001 0.00935319 0.0106352 0.0126785 0.0141775 0.00966866 0.00980012 0.0101705 0.0105319 0.0129313 0.0111178 0.0124385 0.0111511 0.010093 0.00932455 0.0104925 0.00981385 0.0129528 0.00940077 
+0.0102708 0.025453 0.0161008 0.0115701 0.00894551 0.0206021 0.0126973 0.0127642 0.0127999 0.0137975 0.00949311 0.00851046 0.00922536 0.0171908 0.0110097 0.0131312 0.0114842 0.00888155 0.00923136 0.0114026 0.0123358 0.0091245 0.00871509 0.0131133 0.00930696 0.01481 0.00886211 0.0118571 0.0133691 0.00892296 0.0153625 0.0128242 0.0164881 0.0186806 0.00840839 0.0115504 0.0157651 0.0100688 0.0193315 0.0142515 0.0177245 0.00852747 0.0148372 0.00774316 0.0124167 0.0227457 0.00919254 0.0167126 0.0101306 
+0.00949793 0.022419 0.0114635 0.00787325 0.0151513 0.00994148 0.011955 0.0144922 0.0139474 0.00849281 0.0123349 0.0154528 0.0175107 0.0146296 0.00787882 0.01191 0.0135602 0.0111431 0.00991613 0.00928491 0.00862271 0.014956 0.0114031 0.0141196 0.00886118 0.00936798 0.0124562 0.00866346 0.0109636 0.00942369 0.0158231 0.0107395 0.0148939 0.00867654 0.00843151 0.00843801 0.00987098 0.0136324 0.0125352 0.0113096 0.0100173 0.00963262 0.0106216 0.0133212 0.00942482 0.008157 0.0149036 0.00775486 0.0110296 
+0.0112069 0.0272446 0.00854308 0.0178902 0.011465 0.00939404 0.017011 0.00996359 0.0145181 0.0150909 0.0097186 0.00829955 0.0109255 0.0141977 0.0144254 0.00969187 0.0159607 0.0150539 0.0202983 0.00827695 0.00834562 0.0174092 0.0174481 0.00861657 0.0168125 0.0195186 0.00948454 0.0140216 0.0160348 0.014776 0.00958188 0.0156618 0.0095608 0.0120682 0.00889043 0.0132259 0.0133092 0.00934697 0.0138392 0.0139268 0.0097421 0.017502 0.0113001 0.0105501 0.0128437 0.0114893 0.0129684 0.0103808 0.010232 
+0.0111391 0.0237827 0.0145166 0.0132462 0.0184881 0.0136112 0.00767246 0.01169 0.0131049 0.0162347 0.0135606 0.0165905 0.0164936 0.014142 0.014366 0.00985739 0.0147803 0.00989201 0.00982342 0.0089444 0.0119379 0.00867752 0.00979992 0.0116547 0.0134995 0.010695 0.0128166 0.013538 0.0174419 0.0100733 0.0092548 0.012256 0.0110311 0.00905062 0.00870131 0.0110167 0.00758026 0.00787282 0.00750765 0.0129866 0.00770725 0.0115635 0.0110494 0.0100412 0.00816833 0.0167224 0.00758952 0.0105225 0.0102737 
+0.00911587 0.0233013 0.0083151 0.0173036 0.00955425 0.015144 0.0181004 0.00954625 0.0116367 0.00798605 0.00866689 0.00782728 0.0114024 0.00900233 0.0160443 0.0112031 0.0139449 0.0156776 0.00907191 0.00975704 0.0111857 0.0083113 0.0136895 0.00835854 0.0130534 0.0109855 0.00986861 0.0101119 0.0130915 0.019609 0.00949442 0.0103352 0.0137966 0.0144428 0.00892645 0.013021 0.00753629 0.00814756 0.0114642 0.0114017 0.0151005 0.0080772 0.010644 0.0096878 0.00945567 0.00820002 0.0107892 0.0122035 0.0146939 
+0.00948907 0.021767 0.00802098 0.00817442 0.0149274 0.0139743 0.00988414 0.0159532 0.0154766 0.0123433 0.013743 0.0141259 0.0112577 0.0153047 0.00914512 0.0102263 0.00841865 0.010485 0.0125646 0.0149482 0.0106739 0.0116981 0.0103237 0.0148731 0.00752372 0.0131573 0.00956262 0.0100887 0.00842314 0.0131717 0.0100196 0.00845348 0.0108719 0.0134481 0.00998926 0.0116886 0.00990498 0.0151376 0.0132805 0.0138879 0.0133914 0.00917293 0.00870911 0.0135626 0.00899951 0.00796089 0.00878003 0.0101218 0.0130878 
+0.00879304 0.026332 0.0115077 0.0102817 0.0102767 0.0194288 0.00758405 0.0123007 0.00910844 0.0144404 0.0103388 0.00984202 0.0104896 0.00955573 0.0140721 0.00806365 0.00959985 0.0147091 0.00897474 0.00800116 0.0110669 0.0132495 0.0110214 0.0207907 0.00835903 0.00918193 0.0146073 0.00812745 0.0118054 0.0112085 0.00760006 0.00766575 0.00827451 0.0131362 0.0170556 0.00837341 0.0179868 0.0092481 0.0102636 0.00962309 0.0152318 0.0102423 0.0152355 0.00750615 0.00829509 0.00889198 0.0081398 0.00808986 0.014526 
+0.0153858 0.0243443 0.0108733 0.00761468 0.00765843 0.0135658 0.0102784 0.00755595 0.00932887 0.0112079 0.0136589 0.02018 0.00867432 0.0102834 0.00880161 0.0146448 0.00882262 0.00901113 0.00898133 0.0114991 0.0116231 0.008646 0.0125625 0.0115526 0.00796062 0.012466 0.00944426 0.0104905 0.0106008 0.00849779 0.0144311 0.0121217 0.00969907 0.0114571 0.0108629 0.0142184 0.0102308 0.00876892 0.0154134 0.0109281 0.00946842 0.0131872 0.017346 0.0128602 0.0123659 0.00997315 0.00909212 0.0164329 0.0179937 
+0.0097369 0.0206338 0.0117702 0.0130346 0.0137561 0.0110761 0.0138221 0.0140112 0.00990652 0.00863303 0.0125001 0.00816226 0.00878419 0.00831256 0.0143745 0.007553 0.00898112 0.00903825 0.00786233 0.00799705 0.0111157 0.00762704 0.0161722 0.0115543 0.00996799 0.0121211 0.0143124 0.0133965 0.00932923 0.0100121 0.0162585 0.00809587 0.0110415 0.00755567 0.0113767 0.0149726 0.0165917 0.00937406 0.0232884 0.0101733 0.00872945 0.0133672 0.0103846 0.0186145 0.0145743 0.00837608 0.00988489 0.0123534 0.0146489 
+0.0101712 0.0204331 0.00790912 0.00809119 0.00777641 0.0122152 0.00949551 0.0132367 0.00839406 0.00817484 0.0133188 0.00973259 0.00891589 0.00838892 0.00983956 0.0112519 0.0101722 0.0121647 0.0125072 0.0110025 0.0104913 0.0109601 0.0106955 0.00833219 0.0148282 0.0119938 0.0132905 0.00949414 0.0133034 0.00764992 0.0133496 0.0129774 0.012058 0.0133022 0.011975 0.0130977 0.0095401 0.010568 0.0106268 0.00852929 0.00910748 0.00767768 0.0137776 0.00760561 0.0190109 0.0110245 0.0128955 0.00939386 0.0113458 
+0.00827374 0.0251757 0.0121353 0.00850211 0.00797146 0.0169248 0.0157808 0.0108443 0.00988492 0.0153356 0.0110898 0.010639 0.0187146 0.0110922 0.00757705 0.0155794 0.00939305 0.00778932 0.015371 0.0122381 0.0101932 0.0103607 0.0136196 0.0128625 0.0137172 0.010893 0.0114684 0.0117487 0.0138237 0.00908936 0.0124191 0.00794224 0.012688 0.0197028 0.0127473 0.00949431 0.00853454 0.0115539 0.0169602 0.0121847 0.00826065 0.0144138 0.00930039 0.00873084 0.0136588 0.00828384 0.0114327 0.00953625 0.0113293 
+0.0113127 0.0210537 0.0143738 0.0137528 0.00951468 0.0102551 0.00902772 0.0128791 0.0110488 0.0191901 0.0145764 0.013239 0.00838651 0.0144335 0.0084625 0.0125919 0.00979329 0.00937839 0.0105994 0.0124972 0.00838924 0.0126485 0.00853518 0.00761188 0.0188739 0.0107682 0.00819324 0.0117259 0.0158024 0.0118522 0.0108466 0.00811003 0.0106455 0.0116571 0.00797254 0.0123537 0.0168134 0.0115286 0.0127207 0.0167774 0.00837524 0.00882726 0.00842221 0.00756125 0.0122617 0.0088207 0.0097199 0.00908593 0.0083942 
+0.011705 0.0207155 0.0152487 0.0075282 0.0152447 0.0101465 0.00843718 0.0118198 0.014138 0.0113742 0.00841356 0.00994094 0.00884302 0.0094945 0.00967041 0.0155148 0.00886344 0.00783294 0.0133801 0.00961572 0.00752072 0.0113831 0.00910355 0.0195317 0.00937186 0.0121532 0.0140828 0.0118588 0.00977985 0.00851993 0.0161894 0.012683 0.0130459 0.0100294 0.00818087 0.00970356 0.0171704 0.0123165 0.00926904 0.00861139 0.00852465 0.0163877 0.0111038 0.0140121 0.0102067 0.0101279 0.0084616 0.00768695 0.0082938 
+0.0109417 0.0278678 0.0115426 0.00929491 0.00946448 0.00760588 0.00768034 0.0141711 0.00756956 0.0106113 0.0120755 0.0129529 0.00958833 0.0121574 0.00889081 0.0133243 0.00849482 0.0103025 0.00886195 0.0115098 0.0123605 0.009946 0.00855143 0.00836292 0.0109544 0.00941135 0.0132882 0.00839464 0.00937392 0.0105541 0.0130762 0.0129366 0.0127021 0.0143427 0.00868624 0.0251505 0.0195877 0.00831681 0.0180369 0.0100545 0.00755814 0.0123776 0.0141274 0.00763343 0.00753381 0.0226265 0.0129672 0.00837964 0.0146988 
+0.00983075 0.022107 0.0173331 0.0129429 0.0176057 0.0134848 0.010576 0.0139869 0.0105101 0.0130117 0.0140576 0.0105017 0.0102882 0.0105781 0.00897215 0.00867277 0.0110143 0.016271 0.0121182 0.0116707 0.00795979 0.00865818 0.0144063 0.0127252 0.0144501 0.0101421 0.013125 0.0196909 0.00898426 0.010581 0.0100945 0.00801164 0.0135221 0.0111198 0.00806585 0.0213253 0.0100274 0.0144109 0.0153699 0.0122143 0.00918134 0.00916568 0.011621 0.0150439 0.00872375 0.0176738 0.00782867 0.0151941 0.0106163 
+0.0126987 0.0229007 0.0075607 0.0143304 0.0114757 0.0109198 0.0078244 0.0127706 0.0100428 0.0101192 0.0165348 0.0103811 0.00811427 0.0101145 0.00897922 0.0139112 0.0123679 0.00917989 0.0123572 0.0149642 0.00855637 0.00881887 0.00820185 0.00787485 0.012718 0.0205514 0.0110306 0.0117908 0.00860124 0.0112494 0.0149107 0.0148037 0.0150269 0.00883738 0.0127948 0.0110679 0.01341 0.00750559 0.0121756 0.0131417 0.01218 0.012626 0.0100698 0.00782545 0.00828973 0.00868378 0.00840721 0.00818535 0.0133931 
+0.0102257 0.027918 0.00787297 0.0133354 0.0147737 0.0109556 0.0152169 0.0132703 0.010289 0.0115668 0.0091825 0.0107671 0.0133121 0.0122393 0.010704 0.0120498 0.00803717 0.0106021 0.00829115 0.0113567 0.0107112 0.0117996 0.00896112 0.0118919 0.0123738 0.0126902 0.0093068 0.00859058 0.0105693 0.0122587 0.00888327 0.00759207 0.0191256 0.0107541 0.00983975 0.0117202 0.0105958 0.0128959 0.00800243 0.0115068 0.0107724 0.00840248 0.00908611 0.0129777 0.0126822 0.0127465 0.00822219 0.00994533 0.0139303 
+0.01393 0.0233185 0.00984235 0.00967116 0.015753 0.00814746 0.0085956 0.0130108 0.0110582 0.0096444 0.0114632 0.0217317 0.0161044 0.015933 0.014491 0.0115375 0.00760199 0.016022 0.00850128 0.0107254 0.0109347 0.0101816 0.00847551 0.0157225 0.010204 0.00792328 0.0103398 0.0119777 0.00946288 0.00792099 0.011687 0.0117966 0.00933882 0.00921819 0.011287 0.0107074 0.0210967 0.013812 0.0102911 0.0125356 0.00977311 0.0148956 0.00945254 0.0188149 0.0116953 0.0106693 0.0184994 0.013314 0.0148077 
+0.00987667 0.02296 0.00831947 0.00776761 0.0117147 0.00911469 0.010755 0.0119091 0.00809856 0.00948884 0.0117268 0.00885928 0.0107636 0.0100157 0.0103407 0.0112012 0.0132345 0.0083606 0.0108678 0.0137489 0.00812977 0.012016 0.0123967 0.00838596 0.0104066 0.00985035 0.0173604 0.00935084 0.0215164 0.00841426 0.0129268 0.0104227 0.0119512 0.00862789 0.0146201 0.00877835 0.013587 0.0130225 0.0119491 0.0172831 0.0176339 0.00777948 0.0109782 0.0140325 0.0168951 0.00762716 0.00774235 0.0196693 0.00880236 
+0.00765074 0.0232714 0.0105446 0.0136031 0.014729 0.0135182 0.00854858 0.011058 0.0125898 0.0130737 0.0145321 0.00869487 0.00764535 0.012576 0.00932989 0.00794551 0.0164383 0.0158679 0.00991578 0.00959083 0.0120535 0.0176219 0.00925489 0.010526 0.0155791 0.00919009 0.0123899 0.0125916 0.00847203 0.00970496 0.00848572 0.0134509 0.00966093 0.0163208 0.0110939 0.0159553 0.014289 0.00771749 0.0110259 0.013375 0.00922792 0.00821641 0.0116486 0.0143069 0.0136127 0.0157618 0.0105326 0.00899571 0.00774907 
+0.00909697 0.0214162 0.0158611 0.00768949 0.0110213 0.00873957 0.0127007 0.00765071 0.00816289 0.0125967 0.0118592 0.0102903 0.0130878 0.0187603 0.0180501 0.0105242 0.0154561 0.014303 0.0133177 0.0110271 0.0116074 0.00769644 0.0191824 0.00866946 0.0114176 0.0111474 0.00947195 0.00801283 0.00759541 0.0083278 0.00937849 0.0155311 0.0119323 0.0175447 0.00873595 0.0125612 0.0163533 0.0132767 0.0126399 0.0145919 0.0134597 0.0115377 0.0172491 0.00863526 0.0101643 0.0145975 0.00913221 0.0131374 0.0113838 
+0.00788248 0.021498 0.00827927 0.0127234 0.0229448 0.0107585 0.0140549 0.00968618 0.0123351 0.0117977 0.00892402 0.00999836 0.00759201 0.0140604 0.0131339 0.00844792 0.00935001 0.0101972 0.0108008 0.0139108 0.00766936 0.00847795 0.00800967 0.0162745 0.0144014 0.0116628 0.00989151 0.0123791 0.00944051 0.0137685 0.0101494 0.0100635 0.00902156 0.0123614 0.0182608 0.0138456 0.00829875 0.0103482 0.0197994 0.00903373 0.0132794 0.0091919 0.00880481 0.009586 0.0114356 0.0113806 0.0130965 0.00800096 0.00866452 
+0.0201254 0.0222382 0.00944811 0.0101604 0.0100038 0.0131513 0.0132618 0.00803858 0.013152 0.0101965 0.0122165 0.0141532 0.0130336 0.0131043 0.00830514 0.0137026 0.0117982 0.00825661 0.0102438 0.00785197 0.00820101 0.00848317 0.0111416 0.0164459 0.019478 0.0193448 0.0159232 0.0113944 0.0221068 0.0110601 0.0129015 0.0126173 0.0122514 0.0162261 0.00813594 0.00801 0.0104284 0.0145931 0.0117998 0.0103684 0.0153803 0.0123371 0.0163547 0.0152386 0.00832999 0.00846248 0.0156608 0.0112197 0.00823777 
+0.00764168 0.0126484 0.0230105 0.0104165 0.0156115 0.00841817 0.0135565 0.00871995 0.0153296 0.00771436 0.00880684 0.00967623 0.00797091 0.00959075 0.0177778 0.0089844 0.00782083 0.00774189 0.0148468 0.0102188 0.0189107 0.0108572 0.0129114 0.0108314 0.010969 0.0120859 0.0168999 0.00769284 0.00897377 0.00794035 0.0173879 0.0141403 0.00866515 0.0150242 0.0122666 0.0181444 0.0114562 0.0113557 0.0135812 0.00901969 0.0098019 0.013859 0.0129119 0.00848159 0.013666 0.00950731 0.00813539 0.012093 0.00940506 
+0.0075106 0.00905345 0.0224812 0.0102122 0.00890096 0.0151734 0.0144503 0.00806235 0.0148231 0.0182744 0.00867049 0.0123656 0.0116066 0.00767773 0.011868 0.00997198 0.0131304 0.0131469 0.0107145 0.00806584 0.0140984 0.00771124 0.0121408 0.0131947 0.00906139 0.0159156 0.00861005 0.0107816 0.0213066 0.00855229 0.0127981 0.0166143 0.0108062 0.0158642 0.00831508 0.00823525 0.0101702 0.0122099 0.0146826 0.0130393 0.0132136 0.0112152 0.00995753 0.0114338 0.00855652 0.0124759 0.00911219 0.00860892 0.00787939 
+0.0113172 0.0112069 0.0239106 0.011519 0.0100094 0.00769998 0.00884556 0.0108098 0.0115115 0.0100932 0.0105567 0.00840721 0.00959704 0.0115982 0.01481 0.011289 0.0140722 0.0108015 0.00941295 0.015969 0.0177054 0.0120477 0.00875758 0.0119941 0.0115399 0.0147543 0.0133939 0.0218081 0.00814408 0.00804521 0.0165441 0.0107537 0.0145468 0.00955961 0.011432 0.0103134 0.0137152 0.0098306 0.0114712 0.00875457 0.020783 0.0124064 0.010763 0.00851077 0.00974855 0.01161 0.00758752 0.0099065 0.0129115 
+0.0104302 0.020681 0.0341576 0.0119758 0.00779795 0.010878 0.0129573 0.0148032 0.0101535 0.00782686 0.00949278 0.00986503 0.017033 0.014135 0.0122921 0.010136 0.00760893 0.0134817 0.0148405 0.0151213 0.00820814 0.00898511 0.0106513 0.00863311 0.0108657 0.0119623 0.0120308 0.0108416 0.0114063 0.0145675 0.017643 0.00770984 0.0166035 0.00972981 0.0113184 0.00930196 0.00752699 0.0161066 0.0183984 0.0130279 0.0109317 0.00750941 0.0101594 0.0130217 0.00968345 0.0199435 0.016425 0.00926529 0.0108241 
+0.00842985 0.0116583 0.0239661 0.0193098 0.0160614 0.0106777 0.0136834 0.0128487 0.008635 0.0117674 0.00988806 0.00797833 0.0101427 0.0126712 0.00935477 0.0111085 0.00847387 0.00904561 0.0145591 0.0093536 0.00875963 0.00769103 0.0104163 0.00839756 0.0148411 0.00887104 0.0141076 0.0136605 0.0133822 0.0177023 0.0131952 0.0105551 0.0177548 0.0117743 0.015945 0.00839919 0.0173193 0.00985752 0.00984488 0.00793236 0.00821181 0.0155575 0.0108483 0.0137763 0.0144704 0.0184506 0.0136829 0.0121279 0.0122903 
+0.0150556 0.0144245 0.0320712 0.0079419 0.0102347 0.00828416 0.00997947 0.0148921 0.0178198 0.0125915 0.0106039 0.00976146 0.00790479 0.0143079 0.0079744 0.0083561 0.0191093 0.00961766 0.00754107 0.0153945 0.01789 0.0083027 0.00840771 0.0110594 0.0182871 0.0135208 0.0104117 0.013121 0.0132309 0.0118168 0.0103677 0.014463 0.00996247 0.0114514 0.0132421 0.0112798 0.0127602 0.00985035 0.0124105 0.0145872 0.0171717 0.0164293 0.00813592 0.0104492 0.0138917 0.0113236 0.00914717 0.00947034 0.0142414 
+0.0123704 0.00783529 0.0210885 0.0127015 0.0127479 0.0137197 0.0174801 0.0104512 0.014899 0.0119558 0.0078553 0.0115179 0.00816551 0.0077331 0.00888904 0.00995901 0.00851453 0.0177223 0.00861482 0.0130459 0.0133419 0.0161999 0.0075903 0.0105314 0.00866041 0.00995943 0.00821797 0.0100352 0.014138 0.0133265 0.0141092 0.00950137 0.00856584 0.00778119 0.016309 0.00924851 0.010069 0.0115427 0.0207616 0.00829647 0.0128691 0.0167283 0.0112638 0.0157415 0.00794426 0.0089895 0.0159371 0.0093016 0.00909894 
+0.0100089 0.0139208 0.0235168 0.00771069 0.0105458 0.00879662 0.0134831 0.0126753 0.0103809 0.0113404 0.00974845 0.0130035 0.0133396 0.00870825 0.0127044 0.00881419 0.00813668 0.013053 0.00899747 0.00909621 0.0126025 0.010059 0.0110158 0.00885818 0.0127099 0.0125338 0.0109845 0.00756816 0.0104291 0.0126954 0.0121969 0.00804388 0.0143856 0.00854188 0.0115734 0.0102478 0.0187246 0.0112893 0.0150119 0.00977175 0.0118713 0.0122003 0.0084612 0.0090514 0.0101114 0.0104241 0.00799327 0.0185359 0.00973914 
+0.00962295 0.0113295 0.0257345 0.00893206 0.00997418 0.0101334 0.0115945 0.0126395 0.00904426 0.0103381 0.00910468 0.00981648 0.0116458 0.0100866 0.0145031 0.00926139 0.0134746 0.0117218 0.0116152 0.00871872 0.00847218 0.010985 0.00958883 0.0136977 0.0112829 0.0113791 0.0092345 0.00845356 0.0137225 0.0120203 0.0141264 0.020026 0.0096278 0.0122282 0.0180575 0.01084 0.0091869 0.0108086 0.00966318 0.00913813 0.0091568 0.0112805 0.00764796 0.00802639 0.00837611 0.00758026 0.0131511 0.0101646 0.0123427 
+0.0161961 0.0101601 0.0233183 0.00860849 0.0109066 0.0144746 0.0130843 0.0127659 0.0135531 0.0186367 0.0186101 0.00853118 0.00771378 0.00818142 0.00835735 0.0140488 0.0144469 0.010657 0.010951 0.00945288 0.0115504 0.0123779 0.0110594 0.0164135 0.0137209 0.00929199 0.00914049 0.0105935 0.0170752 0.0194339 0.0110833 0.0104639 0.00934008 0.0108329 0.0083529 0.00968863 0.0112075 0.00896931 0.0148413 0.0103983 0.0134676 0.0121498 0.0119313 0.00776565 0.009707 0.0103674 0.0084636 0.0128797 0.00836471 
+0.00868783 0.0125536 0.0253106 0.0087953 0.00996643 0.0109312 0.00907963 0.00978649 0.0107347 0.0122464 0.0127795 0.00836102 0.01342 0.0100521 0.015077 0.0120222 0.015058 0.0089105 0.0147931 0.0138512 0.0121619 0.0116615 0.0187896 0.0107166 0.00908538 0.008893 0.00943697 0.0108028 0.0124738 0.0094311 0.00947726 0.0128943 0.0113159 0.0127814 0.00846501 0.0126336 0.00906144 0.0104129 0.00956887 0.0125608 0.00919229 0.0128424 0.0104112 0.012688 0.00808584 0.0105043 0.00911026 0.0123063 0.0139241 
+0.0139146 0.0125389 0.0252688 0.00861564 0.0183847 0.011136 0.010072 0.00902131 0.0154392 0.00852487 0.00973004 0.0104679 0.00860781 0.0130281 0.0140839 0.0120112 0.0101229 0.0079446 0.0081942 0.00843187 0.00837764 0.00755949 0.0125053 0.0123848 0.00982596 0.0100942 0.00754747 0.00863919 0.0095872 0.0103136 0.0124733 0.00985111 0.00996041 0.00828818 0.015331 0.00817586 0.00797321 0.0104069 0.011341 0.014998 0.0117018 0.00924404 0.00776046 0.0121482 0.0115773 0.0100378 0.0105344 0.0118343 0.0105633 
+0.0134106 0.00963797 0.0217813 0.0105701 0.00875567 0.0124175 0.00759184 0.0148966 0.00857576 0.014321 0.00973865 0.0107557 0.0154607 0.00855962 0.00964446 0.00904494 0.0109691 0.00863798 0.00752947 0.0120623 0.0100218 0.0106181 0.0202118 0.0140897 0.00968971 0.0131774 0.00981573 0.00935469 0.0141551 0.0144094 0.013019 0.00857478 0.0107662 0.0125932 0.00935797 0.00845116 0.00768182 0.00882847 0.015329 0.0155858 0.00807462 0.0193427 0.00973681 0.00832105 0.00871719 0.00931971 0.0139212 0.0120613 0.00987758 
+0.0109859 0.00885845 0.0241467 0.0115446 0.0140071 0.00815091 0.0129175 0.0118188 0.0122007 0.0138906 0.0100633 0.00814199 0.0101043 0.00977005 0.0145775 0.0115953 0.0130692 0.0132509 0.0145579 0.0102481 0.0188523 0.00931528 0.0131969 0.00813627 0.0139732 0.00753529 0.0141665 0.00811139 0.00968957 0.013399 0.0116497 0.0108029 0.0137134 0.0152664 0.0107242 0.00888746 0.00988936 0.00925132 0.0174508 0.016145 0.0134893 0.0113448 0.0144625 0.0148933 0.00918787 0.0121898 0.0138205 0.0134483 0.00825392 
+0.00995247 0.0131395 0.0247869 0.0125854 0.0138368 0.0080786 0.00801479 0.0159455 0.0109062 0.00832218 0.0141729 0.0127139 0.0134351 0.0105158 0.00852197 0.00791615 0.0107584 0.0117814 0.0238455 0.0163327 0.0106855 0.0138213 0.0148461 0.0112349 0.017634 0.0111021 0.0126928 0.0110513 0.00941875 0.0152726 0.0108583 0.0144212 0.0112602 0.011277 0.011598 0.0178644 0.0115558 0.00764972 0.0111486 0.0111895 0.0154236 0.0100611 0.0103689 0.021965 0.0142007 0.0101295 0.011833 0.00890362 0.0116748 
+0.00859156 0.00775239 0.0218845 0.0151381 0.020182 0.00952944 0.00790989 0.0193358 0.0106388 0.0106541 0.0099987 0.0134071 0.0182958 0.00776494 0.0126733 0.0101838 0.0130709 0.0192457 0.00885684 0.00915037 0.0123865 0.0157005 0.00884128 0.00951834 0.011077 0.0119219 0.0151222 0.00876509 0.0119032 0.0126904 0.0115483 0.0111714 0.0142407 0.00898703 0.0116697 0.00798791 0.010128 0.0080347 0.00839506 0.0119162 0.00930847 0.00884288 0.00898779 0.00923705 0.0101195 0.00768234 0.0099016 0.00921312 0.0094782 
+0.00774841 0.0115025 0.0258726 0.010439 0.01933 0.00831796 0.0112306 0.00901766 0.00800044 0.0111945 0.00978058 0.00863968 0.014603 0.0103301 0.00989767 0.0180338 0.0147345 0.00890185 0.0143002 0.0172769 0.0202698 0.0150716 0.0124557 0.00874757 0.00932352 0.00782629 0.0123121 0.00815988 0.0150227 0.0103242 0.00980298 0.013517 0.0106365 0.0187941 0.00968282 0.0125772 0.00918889 0.0122207 0.013369 0.0107439 0.0129822 0.011957 0.0152148 0.0105788 0.0138885 0.0153187 0.009106 0.0110161 0.00874364 
+0.0184227 0.00779894 0.0210283 0.008521 0.0124139 0.0118359 0.0199954 0.00838303 0.00966765 0.010522 0.0105595 0.01525 0.0142252 0.00978268 0.0111702 0.0132608 0.0160711 0.0124382 0.00783503 0.00763183 0.00876614 0.0177611 0.0116925 0.0116517 0.0108655 0.0139154 0.00868276 0.00936977 0.00950444 0.0121467 0.0161873 0.00990076 0.0137855 0.00769336 0.00868235 0.00773555 0.0135486 0.0130167 0.0151815 0.0185735 0.0147726 0.0216887 0.0117114 0.0174715 0.0191175 0.0163739 0.00888401 0.0171241 0.0171718 
+0.0198671 0.0158433 0.0252777 0.00773802 0.00793059 0.00895468 0.00808429 0.00895008 0.0117688 0.0134706 0.0099741 0.0121687 0.0143376 0.0110646 0.0125927 0.00936633 0.00802971 0.012236 0.0162339 0.011717 0.00910268 0.0140897 0.0135515 0.0124077 0.0116335 0.0176466 0.00777531 0.0202474 0.0102814 0.00853543 0.0212056 0.018903 0.0106519 0.00843905 0.00808486 0.0107486 0.014425 0.0138187 0.0111296 0.0109754 0.0159908 0.0129835 0.0150093 0.0104203 0.0134097 0.00855073 0.0108149 0.0149157 0.0116743 
+0.0152409 0.0192845 0.0292109 0.0156155 0.00851637 0.00999159 0.0146146 0.0115099 0.00829983 0.0107979 0.0180484 0.0108741 0.0104735 0.0136641 0.010342 0.0110425 0.00824294 0.0123612 0.00802886 0.0130029 0.0117876 0.0143988 0.00954937 0.00901185 0.00829761 0.0154293 0.0103108 0.00870391 0.0106134 0.0102049 0.00817554 0.0076947 0.0136909 0.0162045 0.00910305 0.0155838 0.00782449 0.00964188 0.0110266 0.00981736 0.0144848 0.0111642 0.0132136 0.00959523 0.0110957 0.0114765 0.00884466 0.013655 0.0194158 
+0.00768915 0.0174124 0.0279806 0.0123129 0.00901758 0.0104237 0.0090769 0.0120688 0.0169897 0.0116229 0.0161726 0.0157694 0.0110289 0.00946359 0.0133498 0.00877964 0.00957379 0.0104815 0.0174654 0.011165 0.01008 0.0108337 0.0158219 0.00819278 0.0150143 0.0129431 0.00960865 0.0124196 0.00751531 0.00911116 0.0104065 0.0160058 0.0114029 0.0127703 0.00868456 0.00913931 0.0152828 0.0116009 0.00787301 0.0141963 0.0170866 0.0101728 0.00788144 0.00858623 0.0141309 0.0115588 0.00991466 0.0134934 0.0077779 
+0.00932098 0.0159831 0.0211103 0.0117281 0.0154403 0.0125459 0.00794171 0.0124239 0.00978349 0.0157523 0.0114212 0.0130223 0.00839278 0.0116368 0.0124874 0.0127075 0.014662 0.00858233 0.0123881 0.0127916 0.00988765 0.00851219 0.00906771 0.0120899 0.0136004 0.0206259 0.0110238 0.00966049 0.0166852 0.0104116 0.0125121 0.00754788 0.0116692 0.00973615 0.00799964 0.00978443 0.00887666 0.0128453 0.023308 0.0100821 0.00863286 0.00795619 0.0116094 0.00924092 0.0146596 0.0101232 0.00992286 0.00820092 0.00951462 
+0.00751008 0.0140706 0.023813 0.0110305 0.015864 0.012396 0.00961872 0.013776 0.0112496 0.0108408 0.0125254 0.0122267 0.00820981 0.00784571 0.0103205 0.00879172 0.00751458 0.0101426 0.00816506 0.0141612 0.0111599 0.00803333 0.00923109 0.0075066 0.0102739 0.0100369 0.0102014 0.0116726 0.0114812 0.0135939 0.0148114 0.00960598 0.00977583 0.00821958 0.00872658 0.0119304 0.0193965 0.0127645 0.0166958 0.0168682 0.019195 0.014796 0.00983455 0.01345 0.00890569 0.00973891 0.00932483 0.0109065 0.0150562 
+0.0142091 0.0171275 0.0212959 0.0226795 0.00965348 0.00789792 0.0115653 0.00837956 0.0116296 0.0125248 0.00758683 0.0104345 0.0112091 0.013267 0.00861568 0.012174 0.00921962 0.00848854 0.0110919 0.0120665 0.011341 0.0106532 0.00987843 0.01072 0.0104443 0.0116164 0.00813353 0.0088635 0.00799066 0.0109428 0.0143597 0.0138117 0.0151204 0.0125059 0.012088 0.00908966 0.0111811 0.00750218 0.00891379 0.0107601 0.0124985 0.00946911 0.0106922 0.0160245 0.0103273 0.0111631 0.00846295 0.00792895 0.0130929 
+0.0160969 0.0165426 0.0215565 0.00830087 0.0119963 0.0145873 0.01132 0.0136657 0.0130255 0.010279 0.0117847 0.0146936 0.0164979 0.0161178 0.00904972 0.0149438 0.00922885 0.00960637 0.0154394 0.00855918 0.0126156 0.0164066 0.0123765 0.00758348 0.0129609 0.0150053 0.00921955 0.0106423 0.00992697 0.0129815 0.0190519 0.0108316 0.00852557 0.0124664 0.01052 0.0198776 0.0105744 0.0134703 0.0156568 0.0162452 0.013177 0.012689 0.0147488 0.0119753 0.0111859 0.00760898 0.0117605 0.0165987 0.0101982 
+0.00787813 0.0152792 0.0231161 0.00860357 0.0124436 0.0170042 0.0103345 0.0134063 0.0107158 0.0145966 0.0150371 0.0166037 0.00984823 0.0112391 0.0112079 0.0103534 0.00880878 0.014011 0.0109664 0.00887019 0.00869275 0.0169769 0.0101929 0.0135257 0.0117968 0.0152627 0.0117932 0.00992476 0.0149444 0.00956194 0.00768462 0.00801937 0.0136012 0.00815406 0.014295 0.00809303 0.0103223 0.00929317 0.0127022 0.0125694 0.00971197 0.0100839 0.0120315 0.0102075 0.014631 0.00764415 0.00873789 0.00987447 0.00987622 
+0.0117808 0.0131686 0.0260394 0.0124847 0.00917253 0.00935599 0.0121716 0.0102702 0.0119934 0.00960652 0.00970628 0.0140549 0.0122548 0.00875833 0.00919377 0.00800878 0.00767322 0.0143421 0.011313 0.00766405 0.00963594 0.00954734 0.0189531 0.00766549 0.0109007 0.00812593 0.00751261 0.0105147 0.00966133 0.0129068 0.0116813 0.0112717 0.00768834 0.014912 0.0111737 0.00960563 0.010495 0.00892403 0.0094268 0.0131169 0.00957561 0.0098624 0.00875655 0.00809833 0.00980761 0.0129471 0.0114696 0.0128509 0.0132198 
+0.0128728 0.00754542 0.0272263 0.00841808 0.00961898 0.0134181 0.0119189 0.00969064 0.0141764 0.00847745 0.0087991 0.00801522 0.00934744 0.013008 0.00884788 0.0114456 0.00988856 0.00826338 0.00976729 0.00765628 0.00786383 0.0110866 0.0136011 0.0142686 0.00886824 0.00878301 0.00908172 0.017851 0.0110583 0.0167745 0.00937047 0.0122023 0.0119426 0.00983617 0.00910604 0.0103776 0.0152968 0.0110378 0.0143341 0.0128733 0.0125787 0.0134641 0.00964661 0.0115224 0.0103523 0.00804197 0.0094482 0.0086975 0.00818573 
+0.00834996 0.00860843 0.0220945 0.0156014 0.00955845 0.0126231 0.00927052 0.013229 0.00944917 0.0111339 0.0163859 0.0091304 0.0135686 0.00793309 0.0126384 0.00932867 0.0128817 0.0151253 0.0150509 0.00851554 0.0149856 0.015832 0.0118321 0.0101956 0.00880305 0.0126798 0.0100365 0.00992585 0.0125127 0.00956911 0.0162798 0.0156639 0.0107167 0.00869821 0.012255 0.00816853 0.0192703 0.0143898 0.00874272 0.00905154 0.0142665 0.00990381 0.0113684 0.00759074 0.00783852 0.00756475 0.0101522 0.0110912 0.00986131 
+0.00788678 0.00754396 0.0213054 0.00952812 0.00790573 0.00924967 0.0146543 0.0125513 0.0186887 0.0107692 0.00781497 0.0113856 0.00978296 0.0107904 0.0209126 0.0183813 0.00983343 0.0173344 0.0116196 0.0104162 0.0118127 0.00889438 0.00896459 0.00983978 0.00912694 0.0106256 0.0132608 0.0125923 0.00989816 0.00935849 0.00809012 0.00796844 0.00824349 0.00800019 0.00794825 0.0145521 0.00910509 0.0121976 0.0126351 0.014056 0.0189283 0.0132789 0.00778931 0.0111159 0.0167268 0.0077924 0.0100442 0.00754377 0.00846491 
+0.0151892 0.0147033 0.0214282 0.0158833 0.0140911 0.0170617 0.0173505 0.016094 0.0125712 0.00973822 0.0156904 0.00969671 0.0131546 0.00913591 0.0117313 0.0076096 0.0102728 0.0100895 0.00836626 0.00794622 0.00752702 0.00830944 0.0134547 0.0131475 0.0127175 0.0145938 0.011852 0.0137899 0.00786304 0.0097474 0.0132174 0.0132138 0.00766026 0.0107259 0.0141722 0.0114705 0.0125668 0.0137401 0.0117221 0.0100631 0.00867171 0.0124746 0.00915375 0.0159038 0.0098235 0.0116108 0.0136184 0.0100869 0.0132627 
+0.00850717 0.0125984 0.0317246 0.0128163 0.00898107 0.0112215 0.0159226 0.0104672 0.00830461 0.0105821 0.0136896 0.00808372 0.00906894 0.010944 0.00916257 0.00798513 0.0139255 0.0113831 0.0111963 0.0293453 0.0171321 0.0123739 0.0138761 0.0109913 0.0114357 0.0109147 0.0130646 0.00904413 0.0142583 0.0125932 0.0134521 0.00784065 0.00888373 0.0121247 0.00871721 0.00841431 0.012501 0.0165497 0.0090014 0.0128262 0.00779781 0.00886985 0.0153363 0.0121441 0.0149739 0.0157173 0.00927695 0.0173405 0.00943229 
+0.0108811 0.0100375 0.0211207 0.00758668 0.0167146 0.009191 0.01282 0.00817154 0.0143318 0.0163734 0.0103566 0.00970306 0.00882377 0.00941159 0.0111139 0.00913247 0.0086223 0.0190639 0.0121515 0.00903945 0.00845229 0.0175803 0.0138564 0.00849259 0.0137876 0.0119419 0.012902 0.0132621 0.00769724 0.00858538 0.012828 0.0156855 0.00763261 0.00928921 0.00959716 0.00909133 0.00911388 0.00811706 0.0141702 0.00982251 0.0146678 0.0157889 0.00855938 0.0124337 0.0136499 0.00760498 0.00857573 0.011739 0.00828991 
+0.0127836 0.0158323 0.0229342 0.013479 0.01336 0.0100643 0.0115649 0.0101129 0.0115251 0.00990176 0.0102713 0.0143071 0.01646 0.0206183 0.0145796 0.00980051 0.0105814 0.00800557 0.0115127 0.0118343 0.00764139 0.011606 0.00808887 0.0156069 0.0142965 0.00920225 0.00901175 0.0179186 0.010476 0.0126085 0.0113398 0.0165795 0.0102332 0.0130093 0.0108143 0.0107896 0.0136776 0.00758194 0.00898096 0.0112934 0.0088035 0.00899996 0.00900983 0.0181271 0.0104256 0.0135279 0.0207845 0.00974133 0.0132526 
+0.00827218 0.0112403 0.0234895 0.0136407 0.0127061 0.0100592 0.0211344 0.00832512 0.0102033 0.0101187 0.00950312 0.00794106 0.00881176 0.00794202 0.0126941 0.00758617 0.00982952 0.0118735 0.00805153 0.0121912 0.0120513 0.00924288 0.0123826 0.0130124 0.012946 0.00779521 0.00853779 0.00885318 0.0112932 0.0114906 0.0140359 0.00815076 0.00869559 0.00935492 0.0162081 0.0145135 0.0104643 0.00800823 0.0121961 0.0111726 0.00941698 0.0156518 0.00814818 0.008761 0.0108818 0.00998925 0.0260524 0.0119787 0.0188741 
+0.0136581 0.00830176 0.0237861 0.00768103 0.0108723 0.0136541 0.0129849 0.0119139 0.012533 0.00948092 0.00884711 0.00838237 0.0148984 0.0151726 0.0137164 0.010204 0.00813674 0.0151501 0.00755114 0.0109602 0.00879855 0.0085259 0.012194 0.00771172 0.0199299 0.0143162 0.00899754 0.00780947 0.0110992 0.0142764 0.0117287 0.00809462 0.00883272 0.010033 0.0139142 0.0142002 0.00956673 0.0078916 0.0131095 0.00961118 0.00801448 0.0101322 0.00757168 0.0102547 0.0105793 0.00825964 0.0127969 0.00964856 0.0119805 
+0.0117749 0.0168925 0.0295973 0.0126909 0.0136641 0.00774103 0.00904598 0.0144074 0.0134041 0.0162224 0.00875431 0.0176688 0.0166057 0.011937 0.00911534 0.0128318 0.0115654 0.0125339 0.00820005 0.0099213 0.015953 0.0143193 0.0127352 0.00866696 0.00986538 0.00955488 0.00925682 0.0115961 0.00904329 0.015993 0.010365 0.0112922 0.00769039 0.00946341 0.0151136 0.00863344 0.00960083 0.0107055 0.0166727 0.0115462 0.0102101 0.0148867 0.0127146 0.00799505 0.00755202 0.00821962 0.0173684 0.0114934 0.0100888 
+0.0115904 0.0161935 0.0210645 0.0128695 0.0102525 0.0130026 0.0110466 0.013117 0.0153459 0.0111719 0.0105414 0.0168115 0.0153026 0.0117334 0.00847964 0.013288 0.00948692 0.00893241 0.0157404 0.00783662 0.0155561 0.0126165 0.0137172 0.00754896 0.00946696 0.00857036 0.00921242 0.00842347 0.0113927 0.00835872 0.011205 0.0112152 0.0139044 0.00771779 0.00880467 0.00810825 0.00943809 0.0114274 0.00999356 0.00844256 0.0116472 0.0112303 0.0126282 0.00864799 0.0141594 0.0133817 0.0139686 0.013671 0.00757762 
+0.00847853 0.0101838 0.0207027 0.00823753 0.0193805 0.0173396 0.011372 0.0100498 0.0124164 0.0102363 0.0122627 0.0119874 0.00884046 0.0120114 0.0126816 0.00805045 0.011121 0.00980431 0.00838928 0.012769 0.00859078 0.0120737 0.00768856 0.0123908 0.0133008 0.0107473 0.0111521 0.00875186 0.0079327 0.0092638 0.00846935 0.0115291 0.0127159 0.0149261 0.021916 0.0132002 0.00803489 0.0114366 0.0125612 0.011724 0.0152145 0.0104299 0.00761108 0.0108463 0.011324 0.00971698 0.00889901 0.0170197 0.0101838 
+0.0087501 0.0129461 0.0210226 0.00982926 0.00857503 0.0105955 0.00903077 0.00786965 0.0171157 0.0142813 0.0120519 0.00772997 0.0173771 0.0159128 0.0117969 0.00982372 0.00779776 0.00996083 0.0109033 0.0136487 0.00972399 0.014807 0.0109314 0.0132037 0.0137179 0.0086105 0.0113871 0.0091689 0.0111315 0.012598 0.0212768 0.0174293 0.01008 0.0129319 0.0103472 0.007781 0.0148411 0.00853454 0.0171013 0.0123093 0.0127419 0.010979 0.0163594 0.00755121 0.00967992 0.00778865 0.00889473 0.016252 0.00858042 
+0.00816276 0.00893204 0.0241534 0.0078781 0.0111201 0.0107338 0.0131887 0.0127229 0.0182888 0.0102698 0.00759193 0.00924786 0.0126784 0.0105905 0.00906858 0.00832025 0.0110609 0.0181397 0.00853273 0.0139305 0.00870982 0.0149465 0.00776266 0.0083783 0.00855052 0.0114378 0.0144946 0.0105753 0.0211949 0.0108381 0.00935234 0.0164875 0.00878224 0.00805143 0.0123776 0.0180753 0.017165 0.00885738 0.0124324 0.0208931 0.0118545 0.0086379 0.0102169 0.0163785 0.012675 0.0116462 0.0117442 0.0119094 0.0110756 
+0.0148348 0.00961162 0.0226236 0.0148194 0.0153942 0.00757926 0.00982272 0.0109519 0.0139475 0.0106584 0.0130521 0.0169529 0.00899622 0.0108567 0.0140035 0.0114788 0.00942968 0.00925525 0.0114391 0.00774959 0.0143579 0.0204858 0.0155225 0.018395 0.0138634 0.0162887 0.0111597 0.0176738 0.0111368 0.0117033 0.0101117 0.00934881 0.0152992 0.00881209 0.0107505 0.00776317 0.00879125 0.00964793 0.0103612 0.00949678 0.00965235 0.0102129 0.0132147 0.00974566 0.0113424 0.0079461 0.0125196 0.0129287 0.00783567 
+0.0120911 0.00817481 0.0237549 0.0147351 0.0117855 0.00928076 0.0107701 0.0121053 0.0115229 0.00856005 0.0138143 0.00909436 0.0102478 0.0157573 0.0143705 0.00929775 0.010034 0.00962566 0.00813062 0.00835003 0.0145278 0.0198229 0.0127871 0.0108061 0.0144633 0.00859746 0.0113239 0.0103966 0.0133368 0.0100164 0.0157657 0.0106799 0.0126575 0.0145848 0.0153154 0.0133791 0.0119951 0.00827273 0.0125771 0.021774 0.00865036 0.0144126 0.016041 0.00866422 0.00829539 0.0158461 0.0178761 0.0151354 0.0131831 
+0.00805311 0.0141055 0.0209319 0.0113789 0.00773719 0.0129798 0.00781845 0.00849937 0.0120866 0.0115675 0.0113208 0.0106093 0.00851479 0.0129702 0.0162205 0.0109245 0.00811679 0.00988323 0.0130492 0.0113375 0.0101404 0.00963577 0.0104726 0.00817791 0.00894249 0.00827902 0.0192529 0.00930379 0.0126582 0.0101482 0.0110285 0.00751677 0.0123917 0.00844846 0.0154179 0.00987267 0.011543 0.0107637 0.0128567 0.0098703 0.0121757 0.0136664 0.00861062 0.00847489 0.0113065 0.0101664 0.00782308 0.00762203 0.0126791 
+0.0123994 0.00981428 0.022354 0.0118202 0.010715 0.0132672 0.00989459 0.0100312 0.010663 0.0084528 0.01021 0.0130803 0.00750065 0.00968146 0.0104169 0.0118141 0.0112502 0.0169563 0.0119578 0.00754256 0.00986909 0.00764487 0.0183683 0.00903233 0.0113879 0.00957045 0.00900771 0.0102431 0.0083436 0.012388 0.0106586 0.00759898 0.00982518 0.00961964 0.00757725 0.00913905 0.0112435 0.00938767 0.00760967 0.0200915 0.00897976 0.0126304 0.00991396 0.0144085 0.00900377 0.0104148 0.0218281 0.00844023 0.009501 
+0.0111347 0.0120638 0.0236069 0.0121996 0.00845197 0.0221136 0.0116805 0.0171462 0.00976275 0.0104405 0.00774986 0.0158893 0.00811754 0.00844794 0.0108582 0.016115 0.0104086 0.00884339 0.00827747 0.0149474 0.00861338 0.0123303 0.00795884 0.0139507 0.0110319 0.015533 0.0149989 0.0168995 0.0091204 0.0130215 0.0143505 0.00871318 0.0148872 0.0190812 0.0117922 0.00763548 0.00917549 0.0109953 0.0135283 0.0120184 0.00878941 0.00815475 0.00850422 0.0103672 0.00964898 0.0097262 0.0141046 0.0140309 0.00879619 
+0.0105143 0.013703 0.0232045 0.0113357 0.00968691 0.00904893 0.00968514 0.012664 0.0127936 0.0077164 0.0152491 0.0159674 0.011685 0.00993868 0.00918175 0.00873503 0.00957997 0.00874447 0.0171354 0.00938301 0.0143542 0.00762579 0.0110455 0.012735 0.0125789 0.00779042 0.0104997 0.0157228 0.0182966 0.0109702 0.0127351 0.00883668 0.0119592 0.0081488 0.012322 0.0156327 0.0110513 0.00956541 0.00819633 0.0140311 0.0175009 0.00928975 0.00885438 0.0097019 0.00800561 0.00777336 0.0088903 0.00898481 0.0107819 
+0.0098046 0.0117494 0.0263231 0.015436 0.013662 0.00908872 0.00862825 0.0217945 0.00828076 0.00992346 0.00937373 0.00984248 0.0101574 0.0139904 0.0106609 0.00980107 0.0193559 0.0143811 0.00938213 0.00838145 0.01026 0.00908042 0.00957255 0.0142897 0.0115678 0.00837216 0.00847856 0.00750615 0.0170843 0.00771556 0.0136041 0.0085186 0.0117335 0.0104907 0.00772486 0.0120301 0.0104384 0.011429 0.00898434 0.00895333 0.00933175 0.0129645 0.012382 0.00803812 0.010185 0.0108926 0.0098767 0.00845662 0.0111648 
+0.00819075 0.00884651 0.0287952 0.00805318 0.0131068 0.0101037 0.011449 0.00829198 0.0132162 0.013026 0.0146038 0.0155428 0.00806449 0.00793688 0.00933993 0.0117829 0.0150817 0.0120513 0.0139549 0.0161088 0.0124043 0.00832278 0.0112157 0.00761119 0.0130748 0.00753709 0.00835639 0.0128375 0.0147134 0.00809342 0.0106003 0.0101066 0.0137563 0.00912825 0.0087958 0.0099307 0.010259 0.0100663 0.00872608 0.0132512 0.00883836 0.012254 0.013586 0.00992891 0.00758181 0.0143006 0.0089286 0.0088272 0.0107939 
+0.012967 0.00825558 0.0222214 0.0128302 0.0080524 0.0110999 0.015294 0.0135671 0.0122866 0.00918322 0.0107904 0.00987674 0.012935 0.0125545 0.0142801 0.0157007 0.0106536 0.0195497 0.0138837 0.015188 0.0127691 0.0171765 0.011912 0.0112012 0.0135172 0.011662 0.0192824 0.0142208 0.0133748 0.0117129 0.0131057 0.0140483 0.010904 0.00974626 0.0186114 0.00855291 0.0161563 0.0130663 0.0232055 0.0115444 0.0111065 0.0126702 0.00987692 0.011846 0.0147607 0.00895658 0.010586 0.0142193 0.00830078 
+
+150 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+150 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 >

+ 89 - 0
tests/Makefile.inc

@@ -0,0 +1,89 @@
+# BINARY-DIRECTORY-MAKEFILE
+# conventions:
+# - there are no subdirectories, they are ignored!
+# - all ".C", ".cpp" and ".c" files in the current directory are considered
+#   independent binaries, and linked as such.
+# - the binaries depend on the library of the parent directory
+# - the binary names are created with $(BINNAME), i.e. it will be more or less
+#   the name of the .o file
+# - all binaries will be added to the default build list ALL_BINARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+#SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+#include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# include the libdepend.inc file, which gives additional dependencies for the
+# libraries and binaries. additionally, an automatic dependency from the library
+# of the parent directory is added (commented out in the code below).
+
+-include $(SUBDIR)libdepend.inc
+
+PARENTDIR:=$(patsubst %/,%,$(dir $(patsubst %/,%,$(SUBDIR))))
+$(call PKG_DEPEND_INT,$(PARENTDIR))
+$(call PKG_DEPEND_EXT,CPPUNIT)
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+      $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+CHECKS:=$(BINDIR)$(call LIBNAME,$(SUBDIR))
+ALL_CHECKS+=$(CHECKS)
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. each binary depends on the corresponding .o file and
+# on the libraries specified by the INTLIBS/EXTLIBS. these dependencies can be
+# specified manually or they are automatically stored in a .bd file.
+
+$(foreach head,$(wildcard $(SUBDIR)*.h),$(eval $(shell grep -q Q_OBJECT $(head) && echo $(head) | sed -e's@^@/@;s@.*/\(.*\)\.h$$@$(BINDIR)\1:$(OBJDIR)moc_\1.o@')))
+$(eval $(foreach c,$(CHECKS),$(c):$(BUILDDIR)$(CPPUNIT_MAIN_OBJ) $(OBJS) $(call PRINT_INTLIB_DEPS,$(c),.a)))
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 1242 - 0
tests/TestFastHIK.cpp

@@ -0,0 +1,1242 @@
+#ifdef NICE_USELIB_CPPUNIT
+
+#include <string>
+#include <exception>
+
+#include <core/algebra/ILSConjugateGradients.h>
+#include <core/algebra/GMStandard.h>
+#include <core/basics/Timer.h>
+
+#include <gp-hik-core/tools.h>
+#include <gp-hik-core/kernels/IntersectionKernelFunction.h>
+#include <gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.h>
+#include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+
+#include "TestFastHIK.h"
+
+
+const bool verbose = false;
+const bool verboseStartEnd = true;
+const bool solveLinWithoutRand = false;
+const uint n = 20;//1500;//1500;//10;
+const uint d = 5;//200;//2;
+const uint numBins = 11;//1001;//1001;
+const uint solveLinMaxIterations = 1000;
+const double sparse_prob = 0.6;
+const bool smallTest = false;
+
+using namespace NICE;
+using namespace std;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestFastHIK );
+
+void TestFastHIK::setUp() {
+}
+
+void TestFastHIK::tearDown() {
+}
+
+bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
+{
+  bool result(true);
+  
+//   std::cerr << "A.size(): " << A.size() << " B.size(): " << B.size() << std::endl;
+  
+  NICE::VVector::const_iterator itA = A.begin();
+  NICE::VVector::const_iterator itB = B.begin();
+  
+  while ( (itA != A.end()) && ( itB != B.end()) )
+  {
+    if (itA->size() != itB->size())
+    {
+      result = false;
+      break;
+    } 
+    
+//     std::cerr << "itA->size(): " << itA->size() << "itB->size(): " << itB->size() << std::endl;
+    for(uint i = 0; (i < itA->size()) && (i < itB->size()); i++)
+    {
+      if (fabs((*itA)[i] - (*itB)[i]) > tolerance)
+      {
+        result = false;
+        break;        
+      }
+    }
+
+    if (result == false)
+          break;        
+    itA++;
+    itB++;
+//     std::cerr << "foo" << std::endl;
+  }
+  
+  return result;
+}
+
+bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const double & tolerance = 10e-8)
+{
+  bool result = true;
+  
+  for (int i = 0; i < size; i++)
+  {
+    if ( fabs(LUT1[i] - LUT2[i]) > tolerance)
+    {
+      result = false;
+      std::cerr << "problem in : " << i << " / " << size << " LUT1: " << LUT1[i] << " LUT2: " << LUT2[i] << std::endl;
+      break;
+    }
+  }
+  
+  return result;
+}
+
+void TestFastHIK::testKernelMultiplication() 
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelMultiplication ===================== " << std::endl;
+  vector< vector<double> > dataMatrix;
+
+  generateRandomFeatures ( d, n, dataMatrix );
+
+  int nrZeros(0);
+  for ( uint i = 0 ; i < d; i++ )
+  {
+    for ( uint k = 0; k < n; k++ )
+      if ( drand48() < sparse_prob ) 
+      {
+        dataMatrix[i][k] = 0.0;
+        nrZeros++;
+      }
+  }
+
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+    
+  if ( (n*d)>0)
+  {
+    CPPUNIT_ASSERT_DOUBLES_EQUAL(fmk.getSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
+    if (verbose)
+      std::cerr << "fmk.getSparsityRatio(): " << fmk.getSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
+  }
+  
+  GMHIKernel gmk ( &fmk );
+  if (verbose)
+    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+  else
+    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+
+  Vector y ( n );
+  for ( uint i = 0; i < y.size(); i++ )
+    y[i] = sin(i);
+ 
+  Vector alpha;
+  
+  gmk.multiply ( alpha, y );
+  
+  NICE::IntersectionKernelFunction<double> hikSlow;
+  
+  // tic
+  time_t  slow_start = clock();
+  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+  transposeVectorOfVectors(dataMatrix_transposed);
+  NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
+  //toc
+  float time_slowComputation = (float) (clock() - slow_start);
+  std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;  
+
+  // tic
+  time_t  slow_sparse_start = clock();
+
+  NICE::Matrix KSparseCalculated (hikSlow.computeKernelMatrix(fmk.featureMatrix(), noise));
+  //toc
+  float time_slowComputation_usingSparsity = (float) (clock() - slow_sparse_start);
+  std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;    
+
+  if ( verbose ) 
+    cerr << "K = " << K << endl;
+
+  // check the trace calculation
+  //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
+
+  // let us compute the kernel multiplication with the slow version
+  Vector alpha_slow = K*y;
+
+  if (verbose)
+    std::cerr << "Sparse multiplication [alpha, alpha_slow]: " << std::endl <<  alpha << std::endl << alpha_slow << std::endl << std::endl;
+  
+  CPPUNIT_ASSERT_DOUBLES_EQUAL((alpha-alpha_slow).normL1(), 0.0, 1e-8);
+
+  // test the case, where we first transform and then use the multiply stuff
+  NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
+
+  NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
+  ParameterizedFunction *pf = new PFAbsExp( 1.2 );
+  fmk.applyFunctionToFeatureMatrix( pf );
+//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
+
+  Vector galpha;
+  gmk.multiply ( galpha, y );
+
+  Vector galpha_slow = gK * y;
+
+  CPPUNIT_ASSERT_DOUBLES_EQUAL((galpha-galpha_slow).normL1(), 0.0, 1e-8);
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelMultiplication done ===================== " << std::endl;
+}
+
+void TestFastHIK::testKernelMultiplicationFast() 
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelMultiplicationFast ===================== " << std::endl;
+  
+  Quantization q_gen ( numBins );
+  Quantization q ( 2*numBins -1);
+
+  // data is generated, such that there is no approximation error
+  vector< vector<double> > dataMatrix;
+  for ( uint i = 0; i < d ; i++ )
+  {
+    vector<double> v;
+    v.resize(n);
+    for ( uint k = 0; k < n; k++ ) {
+      if ( drand48() < sparse_prob ) {
+        v[k] = 0;
+      } else {
+        v[k] = q_gen.getPrototype( (rand() % numBins) );
+      }
+    }
+
+    dataMatrix.push_back(v);
+  }
+  
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+  
+  GMHIKernel gmk ( &fmk );
+  if (verbose)
+    gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+  else
+    gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution 
+
+  Vector y ( n );
+  for ( uint i = 0; i < y.size(); i++ )
+    y[i] = sin(i);
+   
+  ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
+  GMHIKernel gmkFast ( &fmk, pf, &q );
+
+//   pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
+    
+  Vector alpha;
+  
+  gmk.multiply ( alpha, y );
+  
+  Vector alphaFast;
+  
+  gmkFast.multiply ( alphaFast, y );
+  
+  NICE::IntersectionKernelFunction<double> hikSlow;
+  
+  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+  transposeVectorOfVectors(dataMatrix_transposed);
+
+  NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
+
+  if ( verbose ) 
+    cerr << "K = " << K << endl;
+
+  // check the trace calculation
+  //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
+
+  // let us compute the kernel multiplication with the slow version
+  Vector alpha_slow = K*y;
+
+  if ( verbose )
+    std::cerr << "Sparse multiplication [alpha, alphaFast, alpha_slow]: " << std::endl <<  alpha << std::endl << alphaFast << std::endl << alpha_slow << std::endl << std::endl;
+ 
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(0.0, (alphaFast-alpha_slow).normL1(), 1e-8);
+
+  // test the case, where we first transform and then use the multiply stuff
+  NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
+
+  NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
+  pf->parameters()[0] = 1.2;
+  fmk.applyFunctionToFeatureMatrix( pf );
+//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
+
+  Vector galphaFast;
+  gmkFast.multiply ( galphaFast, y );
+  
+  Vector galpha;
+  
+  gmk.multiply ( galpha, y );
+
+  Vector galpha_slow = gK * y;
+  
+  if (verbose)
+    std::cerr << "Sparse multiplication [galpha, galphaFast, galpha_slow]: " << std::endl <<  galpha << std::endl << galphaFast << std::endl << galpha_slow << std::endl << std::endl;
+
+  CPPUNIT_ASSERT_DOUBLES_EQUAL((galphaFast-galpha_slow).normL1(), 0.0, 1e-8);
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelMultiplicationFast done ===================== " << std::endl;
+}
+
+
+void TestFastHIK::testKernelSum() 
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelSum ===================== " << std::endl;
+  
+  vector< vector<double> > dataMatrix;
+  generateRandomFeatures ( d, n, dataMatrix );
+
+  int nrZeros(0);
+  for ( uint i = 0 ; i < d; i++ )
+  {
+    for ( uint k = 0; k < n; k++ )
+      if ( drand48() < sparse_prob ) 
+      {
+        dataMatrix[i][k] = 0.0;
+        nrZeros++;
+      }
+  }
+  
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+  
+  Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
+
+  NICE::VVector ASparse;
+  NICE::VVector BSparse;
+  fmk.hik_prepare_alpha_multiplications ( alpha, ASparse, BSparse ); 
+  
+  Vector xstar (d);
+  for ( uint i = 0 ; i < d ; i++ )
+    if ( drand48() < sparse_prob ) {
+      xstar[i] = 0.0;
+    } else {
+      xstar[i] = rand();
+    }
+  SparseVector xstarSparse ( xstar );
+    
+  double betaSparse;
+  fmk.hik_kernel_sum ( ASparse, BSparse, xstarSparse, betaSparse );
+  
+  if (verbose)
+    std::cerr << "kernelSumSparse done, now do the thing without exploiting sparsity" << std::endl;
+
+  
+  // checking the result
+  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+  transposeVectorOfVectors(dataMatrix_transposed);
+  NICE::IntersectionKernelFunction<double> hikSlow;
+
+  std::vector<double> xstar_stl;
+  xstar_stl.resize(d);
+  for ( uint i = 0 ; i < d; i++ )
+    xstar_stl[i] = xstar[i];
+  std::vector<double> kstar_stl = hikSlow.computeKernelVector ( dataMatrix_transposed, xstar_stl );
+  double beta_slow = 0.0;
+  for ( uint i = 0 ; i < n; i++ )
+    beta_slow += kstar_stl[i] * alpha[i];
+
+  if (verbose)
+    std::cerr << "difference of beta_slow and betaSparse: " << fabs(beta_slow - betaSparse) << std::endl;
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, betaSparse, 1e-8);
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelSum done ===================== " << std::endl;
+}
+
+
+void TestFastHIK::testKernelSumFast() 
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelSumFast ===================== " << std::endl;
+  
+  Quantization q ( numBins );
+
+  // data is generated, such that there is no approximation error
+  vector< vector<double> > dataMatrix;
+  for ( uint i = 0; i < d ; i++ )
+  {
+    vector<double> v;
+    v.resize(n);
+    for ( uint k = 0; k < n; k++ ) {
+      if ( drand48() < sparse_prob ) {
+        v[k] = 0;
+      } else {
+        v[k] = q.getPrototype( (rand() % numBins) );
+      }
+    }
+
+    dataMatrix.push_back(v);
+  }
+  
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+  Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
+  if ( verbose )
+    std::cerr << "alpha = " << alpha << endl;
+
+  // generate xstar
+  Vector xstar (d);
+  for ( uint i = 0 ; i < d ; i++ )
+    if ( drand48() < sparse_prob ) {
+      xstar[i] = 0;
+    } else {
+      xstar[i] = q.getPrototype( (rand() % numBins) );
+    }
+
+  // convert to STL vector
+  vector<double> xstar_stl;
+  xstar_stl.resize(d);
+  for ( uint i = 0 ; i < d; i++ )
+    xstar_stl[i] = xstar[i];
+
+  if ( verbose ) 
+    cerr << "xstar = " << xstar << endl;
+ 
+  for ( double gamma = 1.0 ; gamma < 2.0; gamma += 0.5 ) 
+  {
+    if (verbose)
+      std::cerr << "testing hik_kernel_sum_fast with ghik parameter: " << gamma << endl;
+
+    PFAbsExp pf ( gamma );
+
+//     pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
+    fmk.applyFunctionToFeatureMatrix( &pf );
+
+    NICE::VVector A;
+    NICE::VVector B;
+    if (verbose)
+      std::cerr << "fmk.hik_prepare_alpha_multiplications ( alpha, A, B ) " << std::endl;
+    fmk.hik_prepare_alpha_multiplications ( alpha, A, B ); 
+
+    if (verbose)
+      //std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast( A, B, q )" << std::endl;
+      std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast_alltogether( alpha, q, &pf )" << std::endl;
+    double *TlookupOld = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, &pf ); 
+    double *TlookupNew = fmk.hikPrepareLookupTable( alpha, q, &pf ); 
+    
+    int maxAcces(numBins*d);
+    
+    if (verbose)
+    {
+      std::cerr << "TlookupOld:  " << std::endl;
+      for (int i = 0; i < maxAcces; i++)
+      {
+        std::cerr << TlookupOld[i] << " ";
+        if ( (i%numBins) == (numBins-1))
+          std::cerr << std::endl;
+      }
+      std::cerr << "TlookupNew:  " << std::endl;
+      for (int i = 0; i < maxAcces; i++)
+      {
+        std::cerr << TlookupNew[i] << " ";
+        if ( (i%numBins) == (numBins-1))
+          std::cerr << std::endl;
+      }    
+    }
+    
+    if (verbose)
+      std::cerr << "fmk.hik_kernel_sum_fast ( Tlookup, q, xstar, beta_fast )" << std::endl;
+    
+    double beta_fast;
+    fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar, beta_fast );
+    
+    NICE::SparseVector xstar_sparse(xstar);
+    
+    double beta_fast_sparse;
+    fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar_sparse, beta_fast_sparse );
+    
+    double betaSparse;
+    fmk.hik_kernel_sum ( A, B, xstar_sparse, betaSparse, &pf );
+
+    // checking the result
+    std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+    transposeVectorOfVectors(dataMatrix_transposed);
+    NICE::GeneralizedIntersectionKernelFunction<double> hikSlow (gamma);
+
+    vector<double> kstar_stl = hikSlow.computeKernelVector ( dataMatrix_transposed, xstar_stl );
+    double beta_slow = 0.0;
+    for ( uint i = 0 ; i < n; i++ )
+      beta_slow += kstar_stl[i] * alpha[i];
+
+    if (verbose)
+      std::cerr << "beta_slow: " << beta_slow << std::endl << "beta_fast: " << beta_fast << std::endl << "beta_fast_sparse: " << beta_fast_sparse << std::endl << "betaSparse: " << betaSparse<< std::endl;
+    CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, beta_fast_sparse, 1e-8);
+  
+    delete [] TlookupNew;
+    delete [] TlookupOld;
+  }
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelSumFast done ===================== " << std::endl;
+
+}
+
+void TestFastHIK::testLUTUpdate()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testLUTUpdate ===================== " << std::endl;
+
+  Quantization q ( numBins );
+
+  // data is generated, such that there is no approximation error
+  vector< vector<double> > dataMatrix;
+  for ( uint i = 0; i < d ; i++ )
+  {
+    vector<double> v;
+    v.resize(n);
+    for ( uint k = 0; k < n; k++ ) {
+      if ( drand48() < sparse_prob ) {
+        v[k] = 0;
+      } else {
+        v[k] = q.getPrototype( (rand() % numBins) );
+      }
+    }
+
+    dataMatrix.push_back(v);
+  }
+  
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+  
+  ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
+
+  Vector alpha ( n );
+  for ( uint i = 0; i < alpha.size(); i++ )
+    alpha[i] = sin(i);
+  
+  if (verbose)
+    std::cerr << "prepare LUT" << std::endl;
+  double * T = fmk.hikPrepareLookupTable(alpha, q, pf);
+  if (verbose)
+    std::cerr << "preparation done -- printing T" << std::endl;
+  
+  int maxAcces(numBins*d);
+  if (verbose)
+  {
+    for (int i = 0; i < maxAcces; i++)
+    {
+      std::cerr << T[i] << " ";
+      if ( (i%numBins) == (numBins-1))
+        std::cerr << std::endl;
+    }    
+  }
+
+  //lets change index 2
+  int idx(2);
+  double valAlphaOld(alpha[idx]);
+  double valAlphaNew(1.2); //this value is definitely different from the previous one
+      
+  Vector alphaNew(alpha);
+  alphaNew[idx] = valAlphaNew;
+  
+  double * TNew = fmk.hikPrepareLookupTable(alphaNew, q, pf);
+  if (verbose)
+    std::cerr << "calculated the new LUT, no print it: " << std::endl;
+  
+  if (verbose)
+  {
+    for (int i = 0; i < maxAcces; i++)
+    {
+      std::cerr << TNew[i] << " ";
+      if ( (i%numBins) == (numBins-1))
+        std::cerr << std::endl;
+    } 
+  }
+
+  if (verbose)
+    std::cerr << "change the old LUT by a new value for alpha_i" << std::endl;
+  fmk.hikUpdateLookupTable(T, valAlphaNew, valAlphaOld, idx, q, pf );
+  if (verbose)
+    std::cerr << "update is done, now print the updated version: " << std::endl;
+  
+  if (verbose)
+  {
+    for (int i = 0; i < maxAcces; i++)
+    {
+      std::cerr << T[i] << " ";
+      if ( (i%numBins) == (numBins-1))
+        std::cerr << std::endl;
+    } 
+  }
+  
+  
+  bool equal = compareLUTs(T, TNew, q.size()*d, 10e-8);
+  
+  if (verbose)
+  {
+    if (equal)
+      std::cerr << "LUTs are equal :) " << std::endl;
+    else
+    {
+      std::cerr << "T are not equal :( " << std::endl;
+      for (uint i = 0; i < q.size()*d; i++)
+      {
+        if ( (i % q.size()) == 0)
+          std::cerr << std::endl;
+        std::cerr << T[i] << " ";
+      }
+      std::cerr << "TNew: "<< std::endl;
+      for (uint i = 0; i < q.size()*d; i++)
+      {
+        if ( (i % q.size()) == 0)
+          std::cerr << std::endl;
+        std::cerr << TNew[i] << " ";
+      }     
+    
+    }    
+  }
+  
+  CPPUNIT_ASSERT(equal == true);
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testLUTUpdate done ===================== " << std::endl;
+  
+    delete [] T;
+    delete [] TNew;
+}
+
+void TestFastHIK::testLinSolve()
+{
+
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testLinSolve ===================== " << std::endl;
+
+  Quantization q ( numBins );
+
+  // data is generated, such that there is no approximation error
+  vector< vector<double> > dataMatrix;
+  for ( uint i = 0; i < d ; i++ )
+  {
+    vector<double> v;
+    v.resize(n);
+    for ( uint k = 0; k < n; k++ ) {
+      if ( drand48() < sparse_prob ) {
+        v[k] = 0;
+      } else {
+        v[k] = q.getPrototype( (rand() % numBins) );
+      }
+    }
+
+    dataMatrix.push_back(v);
+  }
+  
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+  
+  ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
+  fmk.applyFunctionToFeatureMatrix( pf );
+//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
+
+  Vector y ( n );  
+  for ( uint i = 0; i < y.size(); i++ )
+    y[i] = sin(i);
+  
+  Vector alpha;
+  Vector alphaRandomized;
+
+  std::cerr << "solveLin with randomization" << std::endl;
+  // tic
+  Timer t;
+  t.start();
+  //let's try to do 10.000 iterations and sample in each iteration 30 examples randomly
+  fmk.solveLin(y,alphaRandomized,q,pf,true,solveLinMaxIterations,30);
+  //toc
+  t.stop();
+  float time_randomizedSolving = t.getLast();
+  std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;  
+  
+  // test the case, where we first transform and then use the multiply stuff
+  std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
+  transposeVectorOfVectors(dataMatrix_transposed);
+  
+  NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.0 );
+  NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
+  
+  Vector K_alphaRandomized;
+  K_alphaRandomized.multiply(gK, alphaRandomized);
+  
+  if (solveLinWithoutRand)
+  {
+    std::cerr << "solveLin without randomization" << std::endl;
+    fmk.solveLin(y,alpha,q,pf,false,1000);
+    Vector K_alpha;
+    K_alpha.multiply(gK, alpha);
+    std::cerr << "now assert that K_alpha == y" << std::endl;
+    std::cerr << "(K_alpha-y).normL1(): " << (K_alpha-y).normL1() << std::endl;
+  }
+   
+//   std::cerr << "alpha: " << alpha << std::endl;
+//   std::cerr << "K_times_alpha: " << K_alpha << std::endl;
+//   std::cerr << "y: " << y << std::endl;
+//   
+//   Vector test_alpha;
+//   ILSConjugateGradients cgm;
+//   cgm.solveLin( GMStandard(gK),y,test_alpha);
+//   
+//   K_alpha.multiply( gK, test_alpha);
+//   
+//   std::cerr << "test_alpha (CGM): " << test_alpha << std::endl;
+//   std::cerr << "K_times_alpha (CGM): " << K_alpha << std::endl;
+  
+  std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
+  std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl;
+  
+
+//   CPPUNIT_ASSERT_DOUBLES_EQUAL((K_alphaRandomized-y).normL1(), 0.0, 1e-6);
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testLinSolve done ===================== " << std::endl;
+}
+
+void TestFastHIK::testKernelVector()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelVector ===================== " << std::endl;  
+  
+  std::vector< std::vector<double> > dataMatrix;
+  
+  std::vector<double> dim1; dim1.push_back(0.2);dim1.push_back(0.1);dim1.push_back(0.0);dim1.push_back(0.0);dim1.push_back(0.4); dataMatrix.push_back(dim1);
+  std::vector<double> dim2; dim2.push_back(0.3);dim2.push_back(0.6);dim2.push_back(1.0);dim2.push_back(0.4);dim2.push_back(0.3); dataMatrix.push_back(dim2);
+  std::vector<double> dim3; dim3.push_back(0.5);dim3.push_back(0.3);dim3.push_back(0.0);dim3.push_back(0.6);dim3.push_back(0.3); dataMatrix.push_back(dim3);
+  
+  if ( verbose ) {
+    std::cerr << "data matrix: " << std::endl;
+    printMatrix ( dataMatrix );
+    std::cerr << endl;
+  }
+
+  double noise = 1.0;
+  FastMinKernel fmk ( dataMatrix, noise );
+
+  std::vector<double> xStar; xStar.push_back(0.2);xStar.push_back(0.7);xStar.push_back(0.1);
+  NICE::Vector xStarVec (xStar);
+  std::vector<double> x2; x2.push_back(0.7);x2.push_back(0.3);xStar.push_back(0.0);
+  NICE::Vector x2Vec (x2);
+  
+  NICE::SparseVector xStarsparse( xStarVec );
+  NICE::SparseVector x2sparse( x2Vec );
+  
+  NICE::Vector k1;
+  fmk.hikComputeKernelVector( xStarsparse, k1 );
+  
+  NICE::Vector k2;
+  fmk.hikComputeKernelVector( x2sparse, k2 );
+   
+  NICE::Vector k1GT(5); k1GT[0] = 0.6; k1GT[1] = 0.8; k1GT[2] = 0.7; k1GT[3] = 0.5; k1GT[4] = 0.6;
+  NICE::Vector k2GT(5); k2GT[0] = 0.5; k2GT[1] = 0.4; k2GT[2] = 0.3; k2GT[3] = 0.3; k2GT[4] = 0.7;
+  
+  if (verbose)
+  {
+    std::cerr << "k1: " << k1 << std::endl;
+    std::cerr << "GT: " << k1GT << std::endl;
+    std::cerr << "k2: " << k2 << std::endl;
+    std::cerr << "GT: " << k2GT << std::endl;
+  }
+    
+  for (int i = 0; i < 5; i++)
+  {
+    CPPUNIT_ASSERT_DOUBLES_EQUAL(k1[i]-k1GT[i], 0.0, 1e-6);
+    CPPUNIT_ASSERT_DOUBLES_EQUAL(k2[i]-k2GT[i], 0.0, 1e-6);
+  }
+
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testKernelVector done ===================== " << std::endl;
+  
+}
+
+void TestFastHIK::testAddExample()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testAddExample ===================== " << std::endl;  
+  
+  std::vector< std::vector<double> > dataMatrix;
+  int dim = 3;
+  int number = 5;
+  
+  if (!smallTest)
+  {
+    dim = d;
+    number = n;
+  }
+  
+  if (smallTest)
+  {
+    dataMatrix.resize(3);
+    //we explicitely give some values which can easily be verified
+    dataMatrix[0].push_back(0.2);dataMatrix[0].push_back(0.1);dataMatrix[0].push_back(0.0);dataMatrix[0].push_back(0.0);dataMatrix[0].push_back(0.4); 
+    dataMatrix[1].push_back(0.3);dataMatrix[1].push_back(0.6);dataMatrix[1].push_back(1.0);dataMatrix[1].push_back(0.4);dataMatrix[1].push_back(0.3);
+    dataMatrix[2].push_back(0.5);dataMatrix[2].push_back(0.3);dataMatrix[2].push_back(0.0);dataMatrix[2].push_back(0.6);dataMatrix[2].push_back(0.3);
+  }
+  else
+  {
+    // randomly generate features
+    generateRandomFeatures ( dim, number, dataMatrix );
+
+    // and make them sparse
+    int nrZeros(0);
+    for ( int i = 0 ; i < dim; i++ )
+    {
+      for ( int k = 0; k < number; k++ )
+        if ( drand48() < sparse_prob ) 
+        {
+          dataMatrix[i][k] = 0.0;
+          nrZeros++;
+        }
+    }    
+  }
+  
+  if ( verbose ) {
+    std::cerr << "data matrix: " << std::endl;
+    printMatrix ( dataMatrix );
+    std::cerr << endl;
+  }
+  
+  double noise = 1.0;
+  //check the features stored in the fmk
+  FastMinKernel fmk ( dataMatrix, noise );  
+  NICE::Vector alpha;
+  
+  ParameterizedFunction *pf = new PFAbsExp( 1.2 ); //1.0 is okay
+  fmk.applyFunctionToFeatureMatrix( pf );
+//   pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );  
+  
+  std::cerr << "generate alpha" << std::endl;
+  
+  if (smallTest)
+  {
+    //we explicitely give some values which can easily be verified
+    alpha = Vector(5,1.0);
+    alpha[0] = 0.1;alpha[1] = 0.2;alpha[2] = 0.4;alpha[3] = 0.8;alpha[4] = 1.6;
+  }
+  else
+  {  // randomly generate features
+     alpha = Vector::UniformRandom( number, 0.0, 1.0, 0 );
+  }
+  
+  
+  std::cerr << "generate xStar" << std::endl;
+  std::vector<double> xStar;
+  if (smallTest)
+  {
+    // we check the following cases: largest elem in dim, smallest elem in dim, zero element
+    // remember to adapt the feature in some lines apart as well    
+    xStar.push_back(0.9);xStar.push_back(0.0);xStar.push_back(0.1);
+  }
+  else
+  {
+    // again: random sampling
+    for ( int i = 0 ; i < dim; i++ )
+    {
+      if ( drand48() < sparse_prob ) 
+        xStar.push_back(0.0);
+      else
+        xStar.push_back(drand48());
+    }
+  }
+  NICE::Vector xStarVec (xStar);
+  NICE::SparseVector xStarSV (xStarVec);
+  
+  // check the alpha-preparations
+  NICE::VVector A;
+  NICE::VVector B;
+  fmk.hik_prepare_alpha_multiplications( alpha, A, B );
+  
+  //check the quantization and LUT construction
+  Quantization q ( numBins );  
+  //direct
+//   double * LUT = fmk.hikPrepareLookupTable(alpha, q);
+  //indirect
+  double * LUT = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, pf );
+  
+  //check for kernel vector norm approximation
+  NICE::VVector AForKVN;
+  fmk.hikPrepareKVNApproximation(AForKVN);
+  
+  //check the LUTs for fast kernel vector norm approximation
+  //direct
+  double* LUT_kernelVectorNormDirect = fmk.hikPrepareLookupTableForKVNApproximation(q, pf );
+  //indirect
+  double* LUT_kernelVectorNorm = fmk.hikPrepareKVNApproximationFast( AForKVN, q, pf );
+  
+  bool LUTKVN_equal( compareLUTs( LUT_kernelVectorNorm, LUT_kernelVectorNormDirect, q.size()*dim ) );
+  
+  if (verbose)
+  {
+    if (LUTKVN_equal == false)
+    {
+      std::cerr << "LUTKVN is not equal :( " << std::endl;
+        std::cerr << "LUT_kernelVectorNorm: " << std::endl;
+        for ( uint i = 0; i < q.size()*dim; i++ )
+        {
+          if ( (i % q.size()) == 0)
+            std::cerr << std::endl;
+          std::cerr << LUT_kernelVectorNorm[i] << " ";
+        }
+        std::cerr << "LUT_kernelVectorNormDirect: "<< std::endl;
+        for ( uint i = 0; i < q.size()*dim; i++ )
+        {
+          if ( (i % q.size()) == 0)
+            std::cerr << std::endl;
+          std::cerr << LUT_kernelVectorNormDirect[i] << " ";
+        }      
+    }
+  }
+  CPPUNIT_ASSERT( LUTKVN_equal == true );
+  
+  if (verbose)
+    std::cerr << "start the incremental learning part" << std::endl;
+
+  // ------  Incremental Learning -----
+  
+  double newAlpha;
+  if (smallTest) 
+    newAlpha = 3.2;
+  else
+    newAlpha = drand48();
+  alpha.append(newAlpha);
+   
+  // add an example
+  if (verbose)
+    std::cerr << "addExample" << std::endl;  
+  fmk.addExample( xStarSV, pf );  
+  
+  // update the alpha preparation
+  if (verbose)  
+    std::cerr << "update Alpha Preparation" << std::endl;
+  fmk.updatePreparationForAlphaMultiplications( xStarSV, newAlpha, A, B, pf );
+  
+  // update the LUT for fast multiplications
+  if (verbose)  
+    std::cerr << "update LUT" << std::endl;
+  fmk.updateLookupTableForAlphaMultiplications( xStarSV, newAlpha, LUT, q, pf );
+  
+  //update VVector for Kernel vector norm
+  if (verbose)  
+    std::cerr << "update VVector for Kernel vector norm" << std::endl;
+  fmk.updatePreparationForKVNApproximation( xStarSV, AForKVN, pf );
+  
+  // update LUT for kernel vector norm
+  if (verbose)  
+    std::cerr << "update LUT for kernel vector norm" << std::endl;
+  fmk.updateLookupTableForKVNApproximation( xStarSV, LUT_kernelVectorNorm, q, pf );
+  
+  //and batch retraining  
+  if (verbose)  
+    std::cerr << "perform batch retraining " << std::endl;  
+  for ( int i = 0 ; i < dim; i++ )
+    dataMatrix[i].push_back(xStar[i]);
+  
+  FastMinKernel fmk2 ( dataMatrix, noise );
+  fmk2.applyFunctionToFeatureMatrix( pf );
+  
+  NICE::VVector A2;
+  NICE::VVector B2;
+  if (verbose)  
+    std::cerr << "prepare alpha multiplications" << std::endl;
+  fmk2.hik_prepare_alpha_multiplications( alpha, A2, B2 );
+ 
+  // compare the content of the data matrix
+  if (verbose)  
+    std::cerr << "do the comparison of the resulting feature matrices" << std::endl;
+  if (verbose)
+  {
+    std::cerr << "fmk.featureMatrix().print()" << std::endl;
+    fmk.featureMatrix().print(std::cerr);
+  
+    std::cerr << "fmk2.featureMatrix().print()" << std::endl;
+    fmk2.featureMatrix().print(std::cerr);
+  }  
+  
+  CPPUNIT_ASSERT(fmk.featureMatrix() == fmk2.featureMatrix());
+
+  //compare the preparation for alpha multiplications
+  if (verbose)  
+    std::cerr << "do the comparison of the resulting matrices A and B" << std::endl;
+  CPPUNIT_ASSERT(compareVVector(A, A2));  
+  CPPUNIT_ASSERT(compareVVector(B, B2));
+  
+  if (verbose)
+  {
+    std::cerr << "compare the preparation for alpha multiplications" << std::endl;
+    std::cerr << "A: " << std::endl;
+    A.store(std::cerr);
+    std::cerr << "A2: " << std::endl;
+    A2.store(std::cerr);
+    std::cerr << "B: " << std::endl;
+    B.store(std::cerr);
+    std::cerr << "B2: " << std::endl;
+    B2.store(std::cerr);
+  }  
+  
+  // compare the resulting LUTs
+  if (verbose)
+    std::cerr << "prepare LUT" << std::endl;
+  double * LUT2 = fmk2.hikPrepareLookupTable( alpha, q, pf );    
+  if (verbose)
+    std::cerr << "do the comparison of the resulting LUTs" << std::endl;  
+  bool LUTequal( compareLUTs( LUT, LUT2, q.size()*dim) );
+  
+  if (verbose)
+  {
+    if ( LUTequal )
+      std::cerr << "LUTs are equal :) " << std::endl;
+    else
+    {
+      std::cerr << "LUTs are not equal :( " << std::endl;
+      std::cerr << "new feature vector: " << xStarVec << std::endl;
+      
+      std::cerr << "newAlpha: " << newAlpha <<  " alpha " << alpha << std::endl;
+      std::cerr << "LUT: " << std::endl;
+      for ( uint i = 0; i < q.size()*dim; i++ )
+      {
+        if ( (i % q.size()) == 0)
+          std::cerr << std::endl;
+        std::cerr << LUT[i] << " ";
+      }
+      std::cerr << "LUT2: "<< std::endl;
+      for ( uint i = 0; i < q.size()*dim; i++ )
+      {
+        if ( (i % q.size()) == 0)
+          std::cerr << std::endl;
+        std::cerr << LUT2[i] << " ";
+      }     
+    }
+  }
+  CPPUNIT_ASSERT( LUTequal );
+  
+  //check for kernel vector norm approximation
+  NICE::VVector A2ForKVN;
+  fmk2.hikPrepareKVNApproximation( A2ForKVN );
+  bool KVN_equal ( compareVVector(AForKVN, A2ForKVN) );
+ 
+  if (verbose)
+  {
+    if ( KVN_equal )
+      std::cerr << "VVectors for kernel vector norm are equal :) " << std::endl;
+    else
+    {
+      std::cerr << "VVectors for vector norm are not equal :( " << std::endl;
+      std::cerr << "new feature vector: " << xStarVec << std::endl;
+      
+      std::cerr << "AForKVN: " << std::endl;
+      AForKVN.store(std::cerr);
+      
+      std::cerr << "A2ForKVN: "<< std::endl;
+      A2ForKVN.store(std::cerr);
+    }
+  }  
+  
+  CPPUNIT_ASSERT( KVN_equal );  
+  
+  //check for kernel vector norm approximation with LUTs
+  if (verbose)
+    std::cerr << "prepare LUT for kernel vector norm" << std::endl;
+  double* LUT2_kernelVectorNorm = fmk2.hikPrepareLookupTableForKVNApproximation( q, pf );  
+  if (verbose)
+    std::cerr << "do the comparison of the resulting LUTs for kernel vector norm computation" << std::endl;
+  bool LUT_KVN_equal( compareLUTs ( LUT_kernelVectorNorm, LUT2_kernelVectorNorm, q.size()*dim ) );
+  
+  if (verbose)
+  {
+    if ( LUT_KVN_equal )
+      std::cerr << "LUTs for kernel vector norm are equal :) " << std::endl;
+    else
+    {
+      std::cerr << "LUTs kernel vector norm are not equal :( " << std::endl;
+      std::cerr << "new feature vector: " << xStarVec << std::endl;
+      
+      std::cerr << "LUT_kernelVectorNorm: " << std::endl;
+      for ( int i = 0; i < q.size()*dim; i++ )
+      {
+        if ( (i % q.size()) == 0)
+          std::cerr << std::endl;
+        std::cerr << LUT_kernelVectorNorm[i] << " ";
+      }
+      std::cerr << std::endl << "LUT2_kernelVectorNorm: "<< std::endl;
+      for ( uint i = 0; i < q.size()*dim; i++ )
+      {
+        if ( (i % q.size()) == 0)
+          std::cerr << std::endl;
+        std::cerr << LUT2_kernelVectorNorm[i] << " ";
+      }     
+    }
+  }  
+  
+  CPPUNIT_ASSERT( LUT_KVN_equal );
+  
+  delete [] LUT;
+  delete [] LUT2;
+  
+  delete [] LUT_kernelVectorNorm;
+  delete [] LUT2_kernelVectorNorm;
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testAddExample done ===================== " << std::endl;  
+}
+
+void TestFastHIK::testAddMultipleExamples()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testAddMultipleExamples ===================== " << std::endl;  
+  
+  std::vector< std::vector<double> > dataMatrix;
+  int dim = d;
+  int number = n;
+
+  // randomly generate features
+  generateRandomFeatures ( dim, number, dataMatrix );
+
+  // and make them sparse
+  int nrZeros(0);
+  for ( int i = 0 ; i < dim; i++ )
+  {
+    for ( int k = 0; k < number; k++ )
+      if ( drand48() < sparse_prob ) 
+      {
+        dataMatrix[i][k] = 0.0;
+        nrZeros++;
+      }
+  }    
+  
+  if ( verbose ) {
+    std::cerr << "data matrix: " << std::endl;
+    printMatrix ( dataMatrix );
+    std::cerr << endl;
+  }
+  
+  double noise = 1.0;
+  //check the features stored in the fmk
+  FastMinKernel fmk ( dataMatrix, noise );  
+  NICE::Vector alpha;
+  
+  ParameterizedFunction *pf = new PFAbsExp( 1.0 ); //1.0 is okay
+  fmk.applyFunctionToFeatureMatrix( pf );
+  
+  std::cerr << "generate alpha" << std::endl;  
+  
+  // randomly generate features
+  alpha = Vector::UniformRandom( number, 0.0, 1.0, 0 );
+   
+/*  // check the alpha-preparations
+  NICE::VVector A;
+  NICE::VVector B;
+  fmk.hik_prepare_alpha_multiplications( alpha, A, B );*/  
+  
+  if (verbose)
+    std::cerr << "start the incremental learning part" << std::endl;
+
+  // ------  Incremental Learning -----
+    
+  std::cerr << "generate xStar" << std::endl;
+  std::vector<NICE::SparseVector > newExamples;
+  int nrOfNewExamples(5);
+  // again: random sampling
+  for (int i = 0; i < nrOfNewExamples; i++)
+  {
+    NICE::Vector xStar(dim);
+    for ( int j = 0 ; j < dim; j++ )
+    {
+      if ( drand48() < sparse_prob ) 
+      {
+        xStar[j] = 0.0;
+        dataMatrix[j].push_back(0.0);
+      }
+      else
+      {
+        double tmp(drand48());
+        xStar[j] = tmp;
+        dataMatrix[j].push_back(tmp);
+      }
+    }
+    
+    NICE::SparseVector xStarSV (xStar);
+    newExamples.push_back(xStarSV);
+  }    
+
+  // add an example
+  if (verbose)
+    std::cerr << "addExample" << std::endl;  
+  for (int i = 0; i < nrOfNewExamples; i++)
+  {
+    fmk.addExample( newExamples[i], pf );  
+  }
+  
+  int oldSize(alpha.size());
+  alpha.resize( oldSize + nrOfNewExamples);
+  for (int i = 0; i < nrOfNewExamples; i++)
+  {
+    alpha[oldSize + i] = drand48();
+  }
+   
+  
+  // update the alpha preparation
+  if (verbose)  
+    std::cerr << "update Alpha Preparation" << std::endl;
+  // check the alpha-preparations
+  NICE::VVector A;
+  NICE::VVector B;
+  fmk.hik_prepare_alpha_multiplications( alpha, A, B );   
+  
+  FastMinKernel fmk2 ( dataMatrix, noise );
+  fmk2.applyFunctionToFeatureMatrix( pf );  
+  
+  NICE::VVector A2;
+  NICE::VVector B2;
+  fmk2.hik_prepare_alpha_multiplications( alpha, A2, B2 );
+  
+  bool equalA = compareVVector( A, A2 );
+  bool equalB = compareVVector( B, B2 );
+  
+  CPPUNIT_ASSERT(equalA == true);
+  CPPUNIT_ASSERT(equalB == true);  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFastHIK::testAddMultipleExamples done ===================== " << std::endl;  
+}
+
+#endif

+ 48 - 0
tests/TestFastHIK.h

@@ -0,0 +1,48 @@
+#ifndef _TESTFASTHIK_H
+#define _TESTFASTHIK_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <gp-hik-core/GMHIKernel.h>
+
+/**
+ * CppUnit-Testcase. 
+ * @brief CppUnit-Testcase to verify that all important methods of the gp-hik framework perform as desired
+ */
+class TestFastHIK : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestFastHIK );
+    
+    CPPUNIT_TEST(testKernelMultiplication);
+    CPPUNIT_TEST(testKernelMultiplicationFast);
+    CPPUNIT_TEST(testKernelSum);
+    CPPUNIT_TEST(testKernelSumFast);
+    CPPUNIT_TEST(testLUTUpdate);
+    CPPUNIT_TEST(testLinSolve);
+    CPPUNIT_TEST(testKernelVector);
+    CPPUNIT_TEST(testAddExample);
+    CPPUNIT_TEST(testAddMultipleExamples);
+    
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    /**
+    * Constructor / Destructor testing 
+    */  
+    void testKernelMultiplication();
+    void testKernelMultiplicationFast();
+    void testKernelSum();
+    void testKernelSumFast();
+    void testLUTUpdate();
+    void testLinSolve();
+    void testKernelVector();
+    void testAddExample();
+    void testAddMultipleExamples();
+
+};
+
+#endif // _TESTFASTHIK_H

+ 185 - 0
tests/TestFeatureMatrixT.cpp

@@ -0,0 +1,185 @@
+#ifdef NICE_USELIB_CPPUNIT
+
+#include <string>
+#include <exception>
+
+#include <core/matlabAccess/MatFileIO.h>
+
+#include <gp-hik-core/tools.h>
+
+#include "TestFeatureMatrixT.h"
+
+const bool verbose = false;
+const bool verboseStartEnd = true;
+const uint n = 15;
+const uint d = 3;
+const double sparse_prob = 0.8;
+
+using namespace NICE;
+using namespace std;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestFeatureMatrixT );
+
+void TestFeatureMatrixT::setUp() {
+}
+
+void TestFeatureMatrixT::tearDown() {
+}
+
+void TestFeatureMatrixT::testSetup()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFeatureMatrixT::testSetup ===================== " << std::endl;
+  
+	  std::vector< std::vector<double> > dataMatrix;
+
+  generateRandomFeatures ( d, n, dataMatrix );
+
+  int nrZeros(0);
+  for ( uint i = 0 ; i < d; i++ )
+  {
+    for ( uint k = 0; k < n; k++ )
+      if ( drand48() < sparse_prob ) 
+		{
+			dataMatrix[i][k] = 0.0;
+			nrZeros++;
+		}
+  }
+
+  if ( verbose ) {
+    cerr << "data matrix: " << endl;
+    printMatrix ( dataMatrix );
+    cerr << endl;
+  }
+  
+  transposeVectorOfVectors(dataMatrix);
+  NICE::FeatureMatrixT<double> fm(dataMatrix);
+  
+  if ( (n*d)>0)
+  {
+  if (verbose)
+    std::cerr << "fm.computeSparsityRatio(): " << fm.computeSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(fm.computeSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
+  }
+  
+  transposeVectorOfVectors(dataMatrix);
+  std::vector<std::vector<int> > permutations;
+  if (verbose)
+    std::cerr << "now try to set_features" << std::endl;
+  fm.set_features(dataMatrix, permutations);
+  if ( (n*d)>0)
+  {
+  if (verbose)
+    std::cerr << "fm.computeSparsityRatio(): " << fm.computeSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
+  CPPUNIT_ASSERT_DOUBLES_EQUAL(fm.computeSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
+  }
+  
+  NICE::MatrixT<double> matNICE;
+  fm.computeNonSparseMatrix(matNICE);
+  
+  if (verbose)
+  {
+    std::cerr << "converted NICE-Matrix" << std::endl;
+    std::cerr << matNICE << std::endl;
+  }
+  
+  std::vector<std::vector<double> > matSTD;
+  fm.computeNonSparseMatrix(matSTD);
+  
+  if (verbose)
+  {
+    std::cerr << "converted std-Matrix" << std::endl;
+    printMatrix(matSTD);
+  }
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFeatureMatrixT::testSetup done ===================== " << std::endl;  
+}
+
+
+void TestFeatureMatrixT::testMatlabIO()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestFeatureMatrixT::testMatlabIO ===================== " << std::endl;
+  
+  NICE::MatFileIO matfileIOA = MatFileIO("./sparse3x3matrixA.mat",MAT_ACC_RDONLY);
+  sparse_t sparseA;
+  matfileIOA.getSparseVariableViaName(sparseA,"A");
+  NICE::FeatureMatrixT<double> fmA(sparseA);//, 3);
+  if ( verbose )
+  {
+    fmA.print(std::cerr);
+  }
+  if (verbose)
+    std::cerr << "fmA.get_n(): " << fmA.get_n() << " fmA.get_d(): " << fmA.get_d() << std::endl;
+
+  NICE::MatFileIO matfileIOM = MatFileIO("./sparse20x30matrixM.mat",MAT_ACC_RDONLY);
+  sparse_t sparseM;
+  matfileIOM.getSparseVariableViaName(sparseM,"M");
+  NICE::FeatureMatrixT<double> fmM(sparseM);//, 20);
+  if ( verbose )
+  {
+    fmM.print(std::cerr);
+  }
+  if (verbose)
+    std::cerr << "fmM.get_n(): " << fmM.get_n() << " fmM.get_d(): " << fmM.get_d() << std::endl;
+
+  NICE::MatrixT<double> matNICE;
+  fmM.computeNonSparseMatrix(matNICE, true);
+  
+  if (verbose)
+  {
+    std::cerr << "converted NICE-Matrix" << std::endl;
+    std::cerr << matNICE << std::endl;
+  }
+  
+  std::vector<std::vector<double> > matSTD;
+  fmM.computeNonSparseMatrix(matSTD, true);
+  
+  if (verbose)
+  {
+    std::cerr << "converted std-Matrix" << std::endl;
+    printMatrix(matSTD);
+  }
+
+//   std::string filename = "/home/dbv/bilder/imagenet/devkit-1.0/demo/demo.train.mat";
+//   std::string dataMatrixMatlab = "training_instance_matrix";
+//   
+//   	//tic
+// 	time_t  readSparseMatlabMatrix_start = clock();
+//   
+//   std::cerr << "try to read " << filename << std::endl;
+//   MatFileIO matfileIO = MatFileIO(filename,MAT_ACC_RDONLY);
+//   std::cerr << "matfileIO successfully done"<< std::endl;
+//   
+//   sparse_t sparse;
+//   matfileIO.getSparseVariableViaName(sparse,dataMatrixMatlab);
+// 
+// 	//toc
+// 	float time_readSparseMatlabMatrix = (float) (clock() - readSparseMatlabMatrix_start);
+// 	std::cerr << "Time for reading  the sparse Matlab Matrix: " << time_readSparseMatlabMatrix/CLOCKS_PER_SEC << " s" << std::endl;
+//   
+//   std::cerr << "sparse-struct read, now try to give it to our FeatureMatrixT" << std::endl;
+//   
+//   	//tic
+// 	time_t  readSparseIntoFM_start = clock();
+// 
+// 	NICE::FeatureMatrixT<double> fm(sparse);
+// 	
+//   	//toc
+// 	float time_readSparseIntoFM = (float) (clock() - readSparseIntoFM_start);
+// 	std::cerr << "Time for parsing the sparse Matrix into our FeatureMatrixT-struct: " << time_readSparseIntoFM/CLOCKS_PER_SEC << " s" << std::endl;
+// 
+//   
+// 	std::cerr << "fm.get_n(): " << fm.get_n() << " fm.get_d(): " << fm.get_d() << std::endl;
+// 	std::cerr << "fm.computeSparsityRatio() of Imagenet: " << fm.computeSparsityRatio() << std::endl;
+
+  if (verboseStartEnd)
+    std::cerr << "================== TestFeatureMatrixT::testMatlabIO done===================== " << std::endl;
+  
+}
+
+
+#endif

+ 32 - 0
tests/TestFeatureMatrixT.h

@@ -0,0 +1,32 @@
+#ifndef _TESTFEATUREMATRIXT_H
+#define _TESTFEATUREMATRIXT_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <gp-hik-core/FeatureMatrixT.h>
+
+/**
+ * CppUnit-Testcase. 
+ * @brief CppUnit-Testcase to verify that all important methods of the Feature Matrix perform as desired
+ */
+class TestFeatureMatrixT : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestFeatureMatrixT );
+	 CPPUNIT_TEST(testSetup);
+	 CPPUNIT_TEST(testMatlabIO);
+      
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    /**
+    * Constructor / Destructor testing 
+    */  
+		void testSetup();
+		void testMatlabIO();
+};
+
+#endif // _TESTFEATUREMATRIXT_H

+ 190 - 0
tests/TestVectorSorter.cpp

@@ -0,0 +1,190 @@
+#ifdef NICE_USELIB_CPPUNIT
+
+#include <string>
+#include <exception>
+#include <map>
+
+#include <gp-hik-core/SortedVectorSparse.h>
+#include "TestVectorSorter.h"
+
+
+using namespace NICE;
+using namespace std;
+
+const bool verboseStartEnd = true;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestVectorSorter );
+
+void TestVectorSorter::setUp() {
+}
+
+void TestVectorSorter::tearDown() {
+}
+
+void TestVectorSorter::checkData ( const vector<double> & all_elements, const NICE::SortedVectorSparse<double> & vSS, double sparse_tolerance )
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestVectorSorter::checkData ===================== " << std::endl;
+  
+  vector< pair<double, int> > all_elements_sorted;
+
+  vector< pair<double, int> > nonzero_elements;
+  for (uint i = 0 ; i < all_elements.size(); i++ )
+  {
+    if ( fabs(all_elements[i]) > sparse_tolerance ) {
+      nonzero_elements.push_back( pair<double, int> ( all_elements[i], i ) );
+      all_elements_sorted.push_back( pair<double, int> ( all_elements[i], i ) );
+    } else {
+      all_elements_sorted.push_back( pair<double, int> ( 0.0, i ) );
+    }
+  }
+
+  sort ( nonzero_elements.begin(), nonzero_elements.end() );
+  sort ( all_elements_sorted.begin(), all_elements_sorted.end() );
+
+  // looping through all non-zero values
+  uint k = 0;
+  for (NICE::SortedVectorSparse<double>::const_elementpointer it = vSS.nonzeroElements().begin(); it != vSS.nonzeroElements().end(); it++,k++)
+	{
+    CPPUNIT_ASSERT_DOUBLES_EQUAL( nonzero_elements[k].first, it->first, 0.0 );
+    CPPUNIT_ASSERT_EQUAL( nonzero_elements[k].second, it->second.first );
+	}
+
+  // 2 3 0 1 5 4
+	std::vector<int> vSSPerm(vSS.getPermutation());
+	for (int k = 0;k < vSSPerm.size(); k++)
+	{
+    CPPUNIT_ASSERT_EQUAL( all_elements_sorted[k].second, vSSPerm[k] );
+	}
+	
+	std::vector<int> vSSPermNNZ (vSS.getPermutationNonZero());
+  vector<pair<int,double> > sv ( vSS.getOrderInSeparateVector() );
+	for (int k = 0;k < vSSPermNNZ.size(); k++)
+	{
+    CPPUNIT_ASSERT_EQUAL( nonzero_elements[k].second, vSSPermNNZ[k] );
+    CPPUNIT_ASSERT_EQUAL( sv[k].first, vSSPermNNZ[k] );
+    CPPUNIT_ASSERT_EQUAL( sv[k].second, vSS.access( sv[k].first ) );
+	}
+
+//   cerr << endl;
+  for (int k = 0;k < vSS.getN();k++)
+  {
+    CPPUNIT_ASSERT_DOUBLES_EQUAL( all_elements[k], vSS.access(k), sparse_tolerance ); 
+//     cerr << "Element " << k << " = " << vSS.access(k) << endl;
+  }
+//     vSS.print();
+
+  if (verboseStartEnd)
+    std::cerr << "================== TestVectorSorter::checkData done ===================== " << std::endl;  
+
+}
+
+void TestVectorSorter::testVectorSorter() 
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestVectorSorter::testVectorSorter ===================== " << std::endl;
+  
+  vector<double> all_elements;
+  all_elements.push_back(2);
+  all_elements.push_back(4);
+  all_elements.push_back(0);
+  all_elements.push_back(1e-7);
+  all_elements.push_back(7);
+  all_elements.push_back(5);
+
+  double sparse_tolerance = 1e-7;
+
+  // Now we put everything in a vectorsortersparse object
+  NICE::SortedVectorSparse<double> vSS;
+	vSS.setTolerance(sparse_tolerance);
+  for (uint i = 0 ; i < all_elements.size(); i++ )
+    vSS.insert( all_elements[i] );
+
+  checkData( all_elements, vSS );
+  
+
+//   cerr << endl;
+//   cerr << "v[1] = 3.0 ";
+  vSS.set(1, 3.0);
+  all_elements[1] = 3.0;
+  checkData( all_elements, vSS );
+  
+//   cerr << endl;
+//   cerr << "v[1] = 0.0 ";
+  vSS.set(1, 0.0);
+  all_elements[1] = 0.0;
+  checkData( all_elements, vSS );
+
+//   cerr << endl;
+//   cerr << "v[5] = -3.0 ";
+  vSS.set(5, -3.0);
+  all_elements[5] = -3.0;
+  checkData( all_elements, vSS );
+
+//   cerr << endl;
+//   cerr << "add 13.0 ";
+  vSS.insert(13.0);
+  all_elements.push_back(13.0);
+  checkData( all_elements, vSS );
+
+//   cerr << endl;
+//   cerr << "add 0.0 ";
+  vSS.insert(0.0);
+  all_elements.push_back(0.0);
+  checkData( all_elements, vSS );
+
+//   cerr << endl;
+//   cerr << "v[0] = -10.0 ";
+  vSS.set(0, -10.0);
+  all_elements[0] = -10.0;
+  checkData( all_elements, vSS );
+
+//   cerr << endl;
+//   cerr << "v[5] = 2.0 ";
+  vSS.set(5, 2.0);
+  all_elements[5] = 2.0;
+  checkData( all_elements, vSS );
+
+//   cerr << endl;
+//   cerr << "v[5] = 0.0 ";
+  vSS.set(5, 0.0);
+  all_elements[5] = 0.0;
+  checkData( all_elements, vSS ); 
+
+  SortedVectorSparse<double> vSS_copy;
+  vSS_copy = vSS;
+  checkData ( all_elements, vSS_copy );
+
+  SortedVectorSparse<double> vSS_all;
+  vSS_all.insert ( all_elements );
+  checkData ( all_elements, vSS_all, 0.0 );
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestVectorSorter::testVectorSorter done ===================== " << std::endl;  
+}
+
+void TestVectorSorter::testMultiMap()
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestVectorSorter::testMultiMap ===================== " << std::endl;
+  
+  multimap<int, double> d;
+  multimap<int, double>::iterator it1 = d.insert ( pair<int, double> ( 3, 3.0 ) );
+  multimap<int, double>::iterator it2 = d.insert ( pair<int, double> ( 1, 1.0 ) );
+  multimap<int, double>::iterator it3 = d.insert ( pair<int, double> ( 2, 2.0 ) );
+  multimap<int, double>::iterator it4 = d.insert ( pair<int, double> ( 5, 5.0 ) );
+
+  it1->second = 1.5;
+  CPPUNIT_ASSERT_EQUAL ( 1.5, it1->second );
+  CPPUNIT_ASSERT_EQUAL ( 1.0, it2->second );
+  CPPUNIT_ASSERT_EQUAL ( 2.0, it3->second );
+  CPPUNIT_ASSERT_EQUAL ( 5.0, it4->second );
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestVectorSorter::testMultiMap done ===================== " << std::endl;  
+}
+
+#endif

+ 36 - 0
tests/TestVectorSorter.h

@@ -0,0 +1,36 @@
+#ifndef _TESTVECTORSORTER_H
+#define _TESTVECTORSORTER_H
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <gp-hik-core/GMHIKernel.h>
+
+/**
+ * CppUnit-Testcase. 
+ * @brief CppUnit-Testcase to verify that all important methods of the SortedVectorSparse class perform as desired
+ */
+class TestVectorSorter : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestVectorSorter );
+    
+    CPPUNIT_TEST(testVectorSorter);
+    CPPUNIT_TEST(testMultiMap);
+    
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+    void checkData ( const std::vector<double> & all_elements, const NICE::SortedVectorSparse<double> & vSS, double sparse_tolerance = 1e-7 );
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    /**
+    * Constructor / Destructor testing 
+    */  
+    void testVectorSorter();
+
+    void testMultiMap();
+
+};
+
+#endif // _TESTFASTHIK_H

BIN
tests/sparse20x30matrixM.mat


BIN
tests/sparse3x3matrixA.mat


+ 42 - 0
tests/toyExample1.data

@@ -0,0 +1,42 @@
+39 x 2
+0.1394    0.3699
+0.1210    0.3260
+0.1164    0.2588
+0.1210    0.2032
+0.1417    0.1886
+0.1624    0.2325
+0.1624    0.3319
+0.1509    0.3114
+0.1417    0.2412
+0.1417    0.2763
+0.1279    0.3173
+0.3537    0.3582
+0.3306    0.3056
+0.3306    0.2471
+0.3376    0.2061
+0.3583    0.1740
+0.3698    0.1564
+0.3790    0.2558
+0.3744    0.3173
+0.3698    0.3406
+0.3583    0.2646
+0.3629    0.1944
+0.3468    0.3173
+0.3329    0.2588
+0.3514    0.1974
+0.2224    0.3436
+0.2270    0.3348
+0.2293    0.2675
+0.2339    0.2237
+0.2316    0.1623
+0.2408    0.1857
+0.2615    0.2763
+0.2638    0.3436
+0.2592    0.3904
+0.2477    0.4284
+0.2224    0.3582
+0.2177    0.2909
+0.2224    0.2178
+0.2500    0.1213
+39 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+39 < 0 0 0 0 0 0 0 0 0 0 0 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >

+ 9 - 0
tests/toyExample2.data

@@ -0,0 +1,9 @@
+6 x 2
+0.1    0.3
+0.1    0.2
+0.3    0.3
+0.2    0.2
+0.4    0.1
+0.1    0.5
+6 < 0 0 0 1 1 1 >
+6 < 0 0 3 3 1 1 >

+ 5 - 0
todo

@@ -0,0 +1,5 @@
+- separate verbose-flags (optimization, training, classification, ...) needed?
+
+- tutorials, numbers, demo-programs, ...
+
+- consistency checks (dimensions etc.)

+ 431 - 0
tools.h

@@ -0,0 +1,431 @@
+/** 
+* @file tools.h
+* @brief Some very basic methods (e.g. generation of random numbers) (Interface and Implementation)
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+#ifndef FASTHIK_TOOLSINCLUDE
+#define FASTHIK_TOOLSINCLUDE
+
+#include "core/vector/MatrixT.h"
+#include "core/vector/VectorT.h"
+#include <cstdlib>
+
+
+#include <vector>
+#include <algorithm>
+#include "core/vector/MatrixT.h"
+#include <iostream>
+#include <fstream>
+
+using namespace std;
+
+/** 
+* @brief float extension of rand
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline float frand() { return (float)rand() / (float)(RAND_MAX); }; //returns from 0 to 1
+/** 
+* @brief double extension of rand
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline double drand() { return (double)rand() / (double)(RAND_MAX); }; //returns from 0 to 1
+
+/** 
+* @brief generates a random matrix of size n x d with values between zero and one
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline void generateRandomFeatures(const int & n, const int & d , NICE::Matrix & features)
+{
+	features.resize(n,d);
+	for (int i = 0; i < n; i++)
+	{
+		for (int j = 0; j < d; j++)
+		{
+			features(i,j) = drand();
+		}
+	}
+}
+
+/** 
+* @brief generates a std::vector of NICE::Vector ( size n x d ) with values between zero and one
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline void generateRandomFeatures(const int & n, const int & d , std::vector<NICE::Vector> & features)
+{
+	features.clear();
+	for (int i = 0; i < n; i++)
+	{
+		NICE::Vector feature(d);
+		for (int j = 0; j < d; j++)
+		{
+			feature[i] = drand();
+		}
+		features.push_back(feature);
+		
+	}
+}
+
+/** 
+* @brief generates a std::vector of NICE::Vector ( size n x d ) with values between zero and one
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline void generateRandomFeatures(const int & n, const int & d , std::vector<std::vector<double> > & features)
+{
+	features.clear();
+	for (int i = 0; i < n; i++)
+	{
+		std::vector<double> feature;
+		feature.clear();
+		for (int j = 0; j < d; j++)
+		{
+			feature.push_back(drand());
+		}
+		features.push_back(feature);
+	}
+}
+
+/** 
+* @brief generates a std::vector of std::vector with size n and values between zero and one
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline void generateRandomFeatures(const int & n, std::vector<double> & features)
+{
+	features.clear();
+	for (int i = 0; i < n; i++)
+	{
+		features.push_back(drand());
+	}
+}
+
+/** 
+* @brief generates a std::vector of std::vector with size n and values between zero and one
+* @author Alexander Freytag
+* @date 12/06/2011
+*/
+inline void generateRandomFeatures(const int & n, NICE::Vector & features)
+{
+	features.resize(n);
+	for (int i = 0; i < n; i++)
+	{
+		features[i] = drand48();
+	}
+}
+
+/** 
+* @brief generates a NICE::Matrix with random entries between zero and range
+* @author Alexander Freytag
+* @date 05-01-2012 (dd-mm-yyyy)
+*/
+inline NICE::Matrix generateRandomMatrix(const int & rows, const int & cols, const bool & symmetric = false, const int & range=1)
+{
+	NICE::Matrix M(rows,cols);
+	if (symmetric)
+	{
+		//TODO check, wether is is more efficient to run first over j and then over i
+		for (int i = 0; i < rows; i++)
+			for (int j = i; j < cols; j++)
+			{
+				M(i,j) = drand()*range;
+				M(j,i) = M(i,j);
+			}
+	}
+	else
+	{
+		//TODO check, wether is is more efficient to run first over j and then over i
+		for (int i = 0; i < rows; i++)
+			for (int j = 0; j < cols; j++)
+				M(i,j) = drand()*range;
+	}
+	return M;
+}
+
+/** 
+* @brief generates a NICE::Matrix with random entries between zero and range
+* @author Alexander Freytag
+* @date 05-01-2012 (dd-mm-yyyy)
+*/
+inline void generateRandomMatrix(NICE::Matrix M, const int & rows, const int & cols, const bool & symmetric = false, const int & range=1)
+{
+	M.resize(rows,cols);
+	if (symmetric)
+	{
+		//TODO check, wether is is more efficient to run first over j and then over i
+		for (int i = 0; i < rows; i++)
+			for (int j = i; j < cols; j++)
+			{
+				M(i,j) = drand()*range;
+				M(j,i) = M(i,j);
+			}
+	}
+	else
+	{
+		//TODO check, wether is is more efficient to run first over j and then over i
+		for (int i = 0; i < rows; i++)
+			for (int j = 0; j < cols; j++)
+				M(i,j) = drand()*range;
+	}
+}
+
+/** 
+* @brief computes arbitrary Lp-Norm of a double-vector. Standard is L2-norm 
+* @author Alexander Freytag
+* @date 12/08/2011
+*/
+inline double vectorNorm(const std::vector<double> & a, const int & p=2)
+{
+	double norm(0.0);
+	for (int i = 0; i < (int) a.size(); i++)
+	{
+		norm += pow(fabs(a[i]),p);
+	}
+	norm = pow(norm, 1.0/p);
+	
+	return norm;
+}
+
+/** 
+* @brief Transposes a vector of vectors, assuming all inner vectors having the same length. Allocates space as much as already needed for the current data
+* @author Alexander Freytag
+* @date 12/08/2011
+*/
+template<class ElementType>
+inline void transposeVectorOfVectors(std::vector<std::vector<ElementType> > & features)
+{
+	//unsave! did not check wether all dimensions are equally filled
+	int d (features.size());
+	int n ( (features[0]).size() );
+	
+	std::vector<std::vector<ElementType> > old_features(features);
+	
+	int tmp(n);
+	n = d;
+	d = tmp;
+	features.resize(d);
+	
+	for (int dim = 0; dim < d; dim++)
+	{
+		features[dim].resize(n);
+		for (int feat = 0; feat < n; feat++)
+		{
+			(features[dim])[feat] =  (old_features[feat])[dim];
+		}
+	}
+}
+
+/** 
+* @brief Prints the whole Matrix (outer loop over dimension, inner loop over features)
+* @author Alexander Freytag
+* @date 12/07/2011
+*/
+inline void printMatrix(NICE::Matrix K)
+{
+   for (int row = 0; row < (int)K.rows(); row++)
+   {
+     for (int col = 0; col < (int)K.cols(); col++)
+     {
+       std::cerr << K(row,col) << " ";
+     }
+     std::cerr << std::endl;
+  }
+};
+
+/** 
+* @brief Prints the whole Matrix (outer loop over dimension, inner loop over features)
+* @author Alexander Freytag
+* @date 12/07/2011
+*/
+template <typename T>
+inline void printMatrix(const std::vector< std::vector<T> > & K)
+{
+   for (int row = 0; row < (int)K.size(); row++)
+   {
+     for (int col = 0; col < (int)K[row].size(); col++)
+     {
+       std::cerr << K[row][col] << " ";
+     }
+     std::cerr << std::endl;
+  }
+};
+
+template <class T>
+inline void read_values(const std::string & file_of_values, std::vector<T> & values)
+{
+  std::cerr << "read from file " << file_of_values << std::endl;
+  values.clear();
+  ifstream iss(file_of_values.c_str());
+  if ( !iss.good() )
+  {
+    std::cerr << "read_images::Unable to read the data!" << std::endl;
+    return;
+  }
+  
+  while ( ! iss.eof())
+  {
+    string str_float;
+    T val;
+    iss >> val; //str_float;
+//     str_float >> val;
+    values.push_back(val);
+    std::cerr << val << " ";
+  }
+  std::cerr << std::endl;
+  iss.close();
+}
+
+template <class T>
+inline void write_values(const std::string & destination, const std::vector<T> & values)
+{
+  ofstream oss(destination.c_str());
+  if ( !oss.good() )
+  {
+    std::cerr << "read_images::Unable to write the data!" << std::endl;
+    return;
+  }
+  
+  for (uint i = 0; i < values.size(); i++)
+//   for (typename std::vector<T>::const_iterator it = values.begin; it != values.end(); it++)
+  {
+    oss << values[i] << std::endl;
+  }
+  
+  oss.close();
+}
+
+template <class T>
+inline void calculating_mean(const std::vector<T> numbers, T & mean)
+{
+  mean = 0.0;
+  if (numbers.size() == 0) 
+  {
+    #ifndef NO_PROMPTS
+    cerr << "calculating_mean: No numbers given." << endl;
+    #endif
+    return;
+  }
+  for (typename std::vector<T>::const_iterator it = numbers.begin(); it != numbers.end(); it++)
+    mean += *it;
+  
+  if (numbers.size() > 0)
+    mean /= (T) ((double) numbers.size());
+  else
+    mean = (T) 0;
+}
+
+template <class T>
+inline void calculating_mean(const std::vector<T> numbers, std::vector<T> & means, const int & stepSize)
+{
+  means.clear();
+  double mean(0.0);
+  if (numbers.size() == 0) 
+  {
+    #ifndef NO_PROMPTS
+    cerr << "calculating_mean: No numbers given." << endl;
+    #endif
+    return;
+  }
+  
+  int cnt(0);
+  for (typename std::vector<T>::const_iterator it = numbers.begin(); it != numbers.end(); it++, cnt++)
+  {
+    mean += *it;
+    if ( cnt == (stepSize-1))
+    {
+      mean /= (T) ((double) stepSize);
+      means.push_back(mean);
+      mean = 0.0;
+      cnt = -1;
+    }
+  }
+
+}
+
+inline double calculating_mean(const std::vector<double> numbers)
+{
+  double mean(0.0);
+  if (numbers.size() == 0) 
+  {
+    #ifndef NO_PROMPTS
+    cerr << "calculating_mean: No numbers given." << endl;
+    #endif
+    return mean;
+  }
+  for (std::vector<double>::const_iterator it = numbers.begin(); it != numbers.end(); it++)
+    mean += *it;
+  
+  mean /= ((double) numbers.size());
+  
+  return mean;
+}
+
+inline void calculateMeanPerDimension(const std::vector<std::vector<double> > & numbers, std::vector<double> & meanValues)
+{
+  if (numbers.size() == 0)
+    return;
+  
+  meanValues.resize(numbers[0].size());
+  for (uint dim = 0; dim < numbers[0].size(); dim++)
+  {
+    meanValues[dim] = 0.0;
+  }
+  
+  for (uint i = 0; i < numbers.size(); i++)
+  {
+    for (uint dim = 0; dim < numbers[i].size(); dim++)
+    {
+      meanValues[dim] += numbers[i][dim];
+    }
+  }
+  
+  for (uint dim = 0; dim < numbers[0].size(); dim++)
+  {
+    meanValues[dim] /= (double) numbers.size();
+  }
+}
+
+
+inline void calculating_std_dev(const std::vector<double> numbers, const double & mean, double & std_dev)
+{
+  std_dev = 0.0;
+  if (numbers.size() == 0) 
+  {
+    #ifndef NO_PROMPTS
+    cerr << "calculating_mean: No numbers given." << endl;
+    #endif
+    return;
+  }
+  
+  for (std::vector<double>::const_iterator it = numbers.begin(); it != numbers.end(); it++)
+        std_dev += pow((*it) - mean,2);
+  
+  std_dev /= ((double) numbers.size());
+  std_dev = sqrt(std_dev);
+}
+
+inline double calculating_std_dev(const std::vector<double> numbers, const double & mean)
+{
+  double std_dev (0.0);
+  if (numbers.size() == 0) 
+  {
+    #ifndef NO_PROMPTS
+    cerr << "calculating_mean: No numbers given." << endl;
+    #endif
+    return std_dev;
+  }
+  
+  for (std::vector<double>::const_iterator it = numbers.begin(); it != numbers.end(); it++)
+        std_dev += pow((*it) - mean,2);
+  
+  std_dev /= ((double) numbers.size());
+  std_dev = sqrt(std_dev);
+  
+  return std_dev;
+}
+
+#endif

+ 40 - 0
tutorial/Makefile

@@ -0,0 +1,40 @@
+-include Makefile.cfg
+
+ifeq "$(COMPRESSION)" "0"
+DVIPDF_ARG+=-dAutoFilterColorImages=false -dColorImageFilter=/FlateEncode
+endif
+
+ifeq "$(LETTER)" "1"
+DVIPS_ARG+=-t letter
+endif
+
+#LATEX=latex
+LATEX=pdflatex
+
+.PRECIOUS:%.aux %.bbl
+
+%.dvi:%.tex
+
+%.aux:%.tex
+	$(LATEX) $<
+
+%.bbl:%.tex %.aux
+	bibtex $*
+	$(LATEX) $<
+	bibtex $*
+
+%.dvi:%.tex %.bbl
+	$(LATEX) $<
+
+%.ps:%.dvi
+	dvips -j0 -P generic $(DVIPS_ARG) $< -o $@
+
+#this is the old version using dvipdf, which can not handle letter papersize
+#%.pdf:%.dvi
+#	dvipdf $(DVIPDF_ARG) $<
+
+#this is the new version, going manually via dvips and ps2pdf
+#this is exactly the same as the dvipdf script does
+%.pdf:%.ps
+	ps2pdf14 $(DVIPDF_ARG) $< $@
+

+ 4 - 0
tutorial/al-base.sty

@@ -0,0 +1,4 @@
+\usepackage{amsmath,amsfonts,amssymb}
+\usepackage[utf8]{inputenc}
+\usepackage[ngerman]{babel}
+\usepackage[T1]{fontenc}

+ 44 - 0
tutorial/beamercolorthemeal.sty

@@ -0,0 +1,44 @@
+% Copyright 2007 by Till Tantau
+%
+% This file may be distributed and/or modified
+%
+% 1. under the LaTeX Project Public License and/or
+% 2. under the GNU Public License.
+%
+% See the file doc/licenses/LICENSE for more details.
+
+\ProvidesPackageRCS $Header: /cvsroot/latex-beamer/latex-beamer/themes/color/beamercolorthemecrane.sty,v 1.9 2007/01/28 20:48:24 tantau Exp $
+
+
+\mode<presentation>
+
+\definecolor{fsublue}{RGB}{62,106,190}
+\definecolor{craneblue}{RGB}{4,6,76}
+
+\setbeamercolor{structure}{fg=craneblue,border=red}
+
+\setbeamercolor{palette primary}{fg=craneblue,bg=fsublue!80}
+\setbeamercolor{palette secondary}{fg=craneblue,bg=white}
+\setbeamercolor{palette tertiary}{fg=white,bg=fsublue!80}
+\setbeamercolor{palette quaternary}{fg=fsublue,bg=white}
+
+\setbeamercolor{titlelike}{parent=palette quaternary}
+
+\setbeamercolor{block title}{fg=white,bg=fsublue!80}
+\setbeamercolor{block title alerted}{use=alerted text,fg=craneblue,bg=alerted text.fg!75!bg}
+\setbeamercolor{block title example}{use=example text,fg=craneblue,bg=example text.fg!75!bg}
+
+\setbeamercolor{block body}{fg=black,bg=fsublue!25}
+\setbeamercolor{block body alerted}{parent=normal text,use=block title alerted,bg=block title alerted.bg!25!bg}
+\setbeamercolor{block body example}{parent=normal text,use=block title example,bg=block title example.bg!25!bg}
+
+\setbeamercolor{palette sidebar primary}{fg=craneblue}
+\setbeamercolor{palette sidebar secondary}{fg=craneblue!75}
+\setbeamercolor{palette sidebar tertiary}{fg=craneblue!75}
+\setbeamercolor{palette sidebar quaternary}{fg=craneblue}
+
+\setbeamercolor*{separation line}{}
+\setbeamercolor*{fine separation line}{}
+
+\mode
+<all>

+ 53 - 0
tutorial/beamercolorthemefsu-blue.sty

@@ -0,0 +1,53 @@
+% Copyright 2007 by Till Tantau
+%
+% This file may be distributed and/or modified
+%
+% 1. under the LaTeX Project Public License and/or
+% 2. under the GNU Public License.
+%
+% See the file doc/licenses/LICENSE for more details.
+
+\ProvidesPackageRCS $Header: /cvsroot/latex-beamer/latex-beamer/themes/color/beamercolorthemecrane.sty,v 1.9 2007/01/28 20:48:24 tantau Exp $
+
+
+\mode<presentation>
+
+\definecolor{fsublue}{RGB}{62,106,190}
+\definecolor{craneblue}{RGB}{4,6,76}
+\definecolor{firebrick3}{RGB}{255,38,38}
+\definecolor{red3}{RGB}{205,0,0}
+
+% \setbeamercolor{normal text}{fg=craneblue}
+\setbeamercolor{normal text}{fg=black}
+
+% \setbeamercolor{structure}{fg=craneblue,border=red}
+\setbeamercolor{structure}{fg=craneblue}
+
+% \setbeamercolor{palette primary}{fg=craneblue,bg=fsublue!80}
+\setbeamercolor{palette primary}{fg=white,bg=fsublue!80}
+% \setbeamercolor{palette secondary}{fg=craneblue,bg=white}
+\setbeamercolor{palette secondary}{fg=fsublue,bg=white}
+\setbeamercolor{palette tertiary}{fg=white,bg=fsublue!80}
+\setbeamercolor{palette quaternary}{fg=fsublue,bg=white}
+% \setbeamercolor{palette quaternary}{fg=craneblue,bg=white}
+
+\setbeamercolor{titlelike}{parent=palette quaternary}
+
+\setbeamercolor{block title}{fg=white,bg=fsublue!80}
+\setbeamercolor{block title alerted}{use=alerted text,fg=craneblue,bg=alerted text.fg!75!bg}
+\setbeamercolor{block title example}{use=example text,fg=craneblue,bg=example text.fg!75!bg}
+
+\setbeamercolor{block body}{fg=black,bg=fsublue!25}
+\setbeamercolor{block body alerted}{parent=normal text,use=block title alerted,bg=block title alerted.bg!25!bg}
+\setbeamercolor{block body example}{parent=normal text,use=block title example,bg=block title example.bg!25!bg}
+
+\setbeamercolor{palette sidebar primary}{fg=craneblue}
+\setbeamercolor{palette sidebar secondary}{fg=craneblue!75}
+\setbeamercolor{palette sidebar tertiary}{fg=craneblue!75}
+\setbeamercolor{palette sidebar quaternary}{fg=craneblue}
+
+\setbeamercolor*{separation line}{}
+\setbeamercolor*{fine separation line}{}
+
+\mode
+<all>

+ 21 - 0
tutorial/beamerfontthemeal.sty

@@ -0,0 +1,21 @@
+% Copyright 2007 by Till Tantau
+%
+% This file may be distributed and/or modified
+%
+% 1. under the LaTeX Project Public License and/or
+% 2. under the GNU Public License.
+%
+% See the file doc/licenses/LICENSE for more details.
+
+\mode<presentation>
+
+\setbeamerfont{title}{series=\bfseries}
+\setbeamerfont{subtitle}{series=\bfseries}
+\setbeamerfont{author}{series=\bfseries}
+\setbeamerfont{institute}{series=\bfseries}
+\setbeamerfont{date}{series=\bfseries}
+
+\setbeamerfont{title in head/foot}{series=\itshape}
+
+\mode
+<all>

+ 95 - 0
tutorial/beamerouterthemeal.sty

@@ -0,0 +1,95 @@
+% File created by Daniel Haase based on 'beamerouterthemeinfolines.sty' by Till Tantau
+%
+% This file may be distributed and/or modified
+%
+% 1. under the LaTeX Project Public License and/or
+% 2. under the GNU Public License.
+%
+% See the file doc/licenses/LICENSE for more details.
+
+% translation for slide numbers shown in the footer, e.g. "7 of 18"
+\usepackage{translator}
+\deftranslation[to=English]{of}{of}
+\deftranslation[to=German]{of}{von}
+
+\setbeamertemplate{navigation symbols}{}
+
+\mode<presentation>
+
+% set some colors
+% \setbeamercolor*{author in head/foot}{parent=palette tertiary}
+% \setbeamercolor*{title in head/foot}{parent=palette secondary}
+% \setbeamercolor*{date in head/foot}{parent=palette primary}
+% 
+% \setbeamercolor*{section in head/foot}{parent=palette tertiary}
+% \setbeamercolor*{subsection in head/foot}{parent=palette primary}
+
+% headline
+\defbeamertemplate*{headline}{al theme}{%
+   \insertlogo%
+}
+
+% frame title
+\defbeamertemplate*{frametitle}{al theme}[1][center]{%
+   \vspace{2ex}%
+   \hbox{%
+      \begin{beamercolorbox}[wd=0.09\paperwidth]{}%
+      \end{beamercolorbox}%
+      \begin{beamercolorbox}[wd=0.55\paperwidth]{palette secondary}%
+         \usebeamerfont{headline}\insertsectionhead%
+         \ifx\insertsubsectionhead\@empty%
+         \else%
+         :~\insertsubsectionhead%
+         \fi%
+         \\[-1ex]%
+         {\usebeamerfont{frametitle}\insertframetitle\strut\par}%
+         \ifx\insertframesubtitle\@empty%
+         \else%
+         {\usebeamerfont{framesubtitle}\usebeamercolor{framesubtitle}\insertframesubtitle\\[-2.5em]\strut\par}%
+         \fi%
+      \end{beamercolorbox}
+   }%
+   \leavevmode\\[-0.5em]%
+}
+
+% right sidebar
+% is necessary, as this is the original position of the logo (see default outer theme)
+\defbeamertemplate*{sidebar right}{al theme}{}
+
+% footline
+\defbeamertemplate*{footline}{al theme}{%
+  \leavevmode%
+  \hbox{%
+  \begin{beamercolorbox}[wd=.4\paperwidth,ht=2.25ex,dp=1ex,center]{author in head/foot}%
+    \usebeamerfont{author in head/foot}\insertshortauthor~~%(\insertshortinstitute)
+  \end{beamercolorbox}%
+  \begin{beamercolorbox}[wd=.535\paperwidth,ht=2.25ex,dp=1ex,center]{title in head/foot}%
+    \usebeamerfont{title in head/foot}\insertshorttitle
+  \end{beamercolorbox}%
+% 
+  \begin{picture}(0,0)
+% 	  \@ifundefined{maskottchen}{}{
+%       \put(0,7){\includegraphics[width=.065\paperwidth]{\maskottchen}}}
+      \put(-2,0){
+	\begin{beamercolorbox}[wd=.065\paperwidth,ht=2.25ex,dp=1ex,right]{date in head/foot}%
+	%SHOW CURRENT DATE
+	% \usebeamerfont{date in head/foot}\insertshortdate{}\hspace*{2em}
+	%SHOW CURRENT NUMBER OF FRAME
+	\insertframenumber{}
+	%SHOW TOTAL NUMBER OF FRAMES 
+	    \hspace*{2ex} 
+	\end{beamercolorbox}%
+  	}
+  \end{picture}
+  }
+  \vskip0pt%
+}
+
+\setbeamersize{text margin left=1em,text margin right=1em}
+
+% we don't want no icon bar
+\defbeamertemplate*{sidebar right}{masl theme} {}
+\beamertemplatenavigationsymbolsempty      % deaktivieren der Navigationsleiste
+
+\mode
+<all>

+ 22 - 0
tutorial/beamerthemeJena.sty

@@ -0,0 +1,22 @@
+% Copyright 2007 by Till Tantau
+%
+% This file may be distributed and/or modified
+%
+% 1. under the LaTeX Project Public License and/or
+% 2. under the GNU Public License.
+%
+% See the file doc/licenses/LICENSE for more details.
+
+%\DeclareOptionBeamer{compress}{\beamer@compresstrue}
+%\ProcessOptionsBeamer
+
+\mode<presentation>
+
+\usecolortheme{fsu-blue}
+\useoutertheme{al}
+\usefonttheme{structurebold}
+\usefonttheme{al}
+\useinnertheme{rectangles}
+
+\mode
+<all>

BIN
tutorial/img/fsuText-en.pdf


BIN
tutorial/img/hanfried-en-blue.pdf


BIN
tutorial/img/logoV1.pdf


BIN
tutorial/img/logoV1blue.pdf


BIN
tutorial/img/logoV2.pdf


BIN
tutorial/img/logoV2blue.pdf


+ 1124 - 0
tutorial/latex12.bst

@@ -0,0 +1,1124 @@
+
+% ---------------------------------------------------------------
+%
+% $Id: latex8.bst,v 1.1 1995/09/15 15:13:49 ienne Exp $
+%
+% by Paolo.Ienne@di.epfl.ch
+%
+
+% ---------------------------------------------------------------
+%
+% no guarantee is given that the format corresponds perfectly to 
+% IEEE 8.5" x 11" Proceedings, but most features should be ok.
+%
+% ---------------------------------------------------------------
+%
+% `latex8' from BibTeX standard bibliography style `abbrv'
+% version 0.99a for BibTeX versions 0.99a or later, LaTeX version 2.09.
+% Copyright (C) 1985, all rights reserved.
+% Copying of this file is authorized only if either
+% (1) you make absolutely no changes to your copy, including name, or
+% (2) if you do make changes, you name it something other than
+% btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst.
+% This restriction helps ensure that all standard styles are identical.
+% The file btxbst.doc has the documentation for this style.
+
+ENTRY
+  { address
+    author
+    booktitle
+    chapter
+    edition
+    editor
+    howpublished
+    institution
+    journal
+    key
+    month
+    note
+    number
+    organization
+    pages
+    publisher
+    school
+    series
+    title
+    type
+    volume
+    year
+  }
+  {}
+  { label }
+
+INTEGERS { output.state before.all mid.sentence after.sentence after.block }
+
+FUNCTION {init.state.consts}
+{ #0 'before.all :=
+  #1 'mid.sentence :=
+  #2 'after.sentence :=
+  #3 'after.block :=
+}
+
+STRINGS { s t }
+
+FUNCTION {output.nonnull}
+{ 's :=
+  output.state mid.sentence =
+    { ", " * write$ }
+    { output.state after.block =
+ { add.period$ write$
+   newline$
+   "\newblock " write$
+ }
+ { output.state before.all =
+     'write$
+     { add.period$ " " * write$ }
+   if$
+ }
+      if$
+      mid.sentence 'output.state :=
+    }
+  if$
+  s
+}
+
+FUNCTION {output}
+{ duplicate$ empty$
+    'pop$
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.check}
+{ 't :=
+  duplicate$ empty$
+    { pop$ "empty " t * " in " * cite$ * warning$ }
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.bibitem}
+{ newline$
+  "\bibitem{" write$
+  cite$ write$
+  "}" write$
+  newline$
+  ""
+  before.all 'output.state :=
+}
+
+FUNCTION {fin.entry}
+{ add.period$
+  write$
+  newline$
+}
+
+FUNCTION {new.block}
+{ output.state before.all =
+    'skip$
+    { after.block 'output.state := }
+  if$
+}
+
+FUNCTION {new.sentence}
+{ output.state after.block =
+    'skip$
+    { output.state before.all =
+ 'skip$
+ { after.sentence 'output.state := }
+      if$
+    }
+  if$
+}
+
+FUNCTION {not}
+{   { #0 }
+    { #1 }
+  if$
+}
+
+FUNCTION {and}
+{   'skip$
+    { pop$ #0 }
+  if$
+}
+
+FUNCTION {or}
+{   { pop$ #1 }
+    'skip$
+  if$
+}
+
+FUNCTION {new.block.checka}
+{ empty$
+    'skip$
+    'new.block
+  if$
+}
+
+FUNCTION {new.block.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.block
+  if$
+}
+
+FUNCTION {new.sentence.checka}
+{ empty$
+    'skip$
+    'new.sentence
+  if$
+}
+
+FUNCTION {new.sentence.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.sentence
+  if$
+}
+
+FUNCTION {field.or.null}
+{ duplicate$ empty$
+    { pop$ "" }
+    'skip$
+  if$
+}
+
+FUNCTION {emphasize}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "{\em " swap$ * "}" * }
+  if$
+}
+
+INTEGERS { nameptr namesleft numnames }
+
+FUNCTION {format.names}
+{ 's :=
+  #1 'nameptr :=
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't :=
+      nameptr #1 >
+ { namesleft #1 >
+     { ", " * t * }
+     { numnames #2 >
+  { "," * }
+  'skip$
+       if$
+       t "others" =
+  { " et~al." * }
+  { " and " * t * }
+       if$
+     }
+   if$
+ }
+ 't
+      if$
+      nameptr #1 + 'nameptr :=
+
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+}
+
+FUNCTION {format.authors}
+{ author empty$
+    { "" }
+    { author format.names }
+  if$
+}
+
+FUNCTION {format.editors}
+{ editor empty$
+    { "" }
+    { editor format.names
+      editor num.names$ #1 >
+ { ", editors" * }
+ { ", editor" * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.title}
+{ title empty$
+    { "" }
+    { title "t" change.case$ }
+  if$
+}
+
+FUNCTION {n.dashify}
+{ 't :=
+  ""
+    { t empty$ not }
+    { t #1 #1 substring$ "-" =
+ { t #1 #2 substring$ "--" = not
+     { "--" *
+       t #2 global.max$ substring$ 't :=
+     }
+     {   { t #1 #1 substring$ "-" = }
+  { "-" *
+    t #2 global.max$ substring$ 't :=
+  }
+       while$
+     }
+   if$
+ }
+ { t #1 #1 substring$ *
+   t #2 global.max$ substring$ 't :=
+ }
+      if$
+    }
+  while$
+}
+
+FUNCTION {format.date}
+{ year empty$
+    { month empty$
+ { "" }
+ { "there's a month but no year in " cite$ * warning$
+   month
+ }
+      if$
+    }
+    { month empty$
+ 'year
+ { month " " * year * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.btitle}
+{ title emphasize
+}
+
+FUNCTION {tie.or.space.connect}
+{ duplicate$ text.length$ #3 <
+    { "~" }
+    { " " }
+  if$
+  swap$ * *
+}
+
+FUNCTION {either.or.check}
+{ empty$
+    'pop$
+    { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+  if$
+}
+
+FUNCTION {format.bvolume}
+{ volume empty$
+    { "" }
+    { "volume" volume tie.or.space.connect
+      series empty$
+ 'skip$
+ { " of " * series emphasize * }
+      if$
+      "volume and number" number either.or.check
+    }
+  if$
+}
+
+FUNCTION {format.number.series}
+{ volume empty$
+    { number empty$
+ { series field.or.null }
+ { output.state mid.sentence =
+     { "number" }
+     { "Number" }
+   if$
+   number tie.or.space.connect
+   series empty$
+     { "there's a number but no series in " cite$ * warning$ }
+     { " in " * series * }
+   if$
+ }
+      if$
+    }
+    { "" }
+  if$
+}
+
+FUNCTION {format.edition}
+{ edition empty$
+    { "" }
+    { output.state mid.sentence =
+ { edition "l" change.case$ " edition" * }
+ { edition "t" change.case$ " edition" * }
+      if$
+    }
+  if$
+}
+
+INTEGERS { multiresult }
+
+FUNCTION {multi.page.check}
+{ 't :=
+  #0 'multiresult :=
+    { multiresult not
+      t empty$ not
+      and
+    }
+    { t #1 #1 substring$
+      duplicate$ "-" =
+      swap$ duplicate$ "," =
+      swap$ "+" =
+      or or
+ { #1 'multiresult := }
+ { t #2 global.max$ substring$ 't := }
+      if$
+    }
+  while$
+  multiresult
+}
+
+FUNCTION {format.pages}
+{ pages empty$
+    { "" }
+    { pages multi.page.check
+ { "pages" pages n.dashify tie.or.space.connect }
+ { "page" pages tie.or.space.connect }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.vol.num.pages}
+{ volume field.or.null
+  number empty$
+    'skip$
+    { "(" number * ")" * *
+      volume empty$
+ { "there's a number but no volume in " cite$ * warning$ }
+ 'skip$
+      if$
+    }
+  if$
+  pages empty$
+    'skip$
+    { duplicate$ empty$
+ { pop$ format.pages }
+ { ":" * pages n.dashify * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.chapter.pages}
+{ chapter empty$
+    'format.pages
+    { type empty$
+ { "chapter" }
+ { type "l" change.case$ }
+      if$
+      chapter tie.or.space.connect
+      pages empty$
+ 'skip$
+ { ", " * format.pages * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.in.ed.booktitle}
+{ booktitle empty$
+    { "" }
+    { editor empty$
+ { "In " booktitle emphasize * }
+ { "In " format.editors * ", " * booktitle emphasize * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {empty.misc.check}
+
+{ author empty$ title empty$ howpublished empty$
+  month empty$ year empty$ note empty$
+  and and and and and
+  key empty$ not and
+    { "all relevant fields are empty in " cite$ * warning$ }
+    'skip$
+  if$
+}
+
+FUNCTION {format.thesis.type}
+{ type empty$
+    'skip$
+    { pop$
+      type "t" change.case$
+    }
+  if$
+}
+
+FUNCTION {format.tr.number}
+{ type empty$
+    { "Technical Report" }
+    'type
+  if$
+  number empty$
+    { "t" change.case$ }
+    { number tie.or.space.connect }
+  if$
+}
+
+FUNCTION {format.article.crossref}
+{ key empty$
+    { journal empty$
+ { "need key or journal for " cite$ * " to crossref " * crossref *
+   warning$
+   ""
+ }
+ { "In {\em " journal * "\/}" * }
+      if$
+    }
+    { "In " key * }
+  if$
+  " \cite{" * crossref * "}" *
+}
+
+FUNCTION {format.crossref.editor}
+{ editor #1 "{vv~}{ll}" format.name$
+  editor num.names$ duplicate$
+  #2 >
+    { pop$ " et~al." * }
+    { #2 <
+ 'skip$
+ { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+     { " et~al." * }
+     { " and " * editor #2 "{vv~}{ll}" format.name$ * }
+   if$
+ }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.book.crossref}
+{ volume empty$
+    { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
+      "In "
+    }
+    { "Volume" volume tie.or.space.connect
+      " of " *
+    }
+  if$
+  editor empty$
+  editor field.or.null author field.or.null =
+  or
+    { key empty$
+ { series empty$
+     { "need editor, key, or series for " cite$ * " to crossref " *
+       crossref * warning$
+       "" *
+     }
+     { "{\em " * series * "\/}" * }
+   if$
+ }
+ { key * }
+      if$
+    }
+    { format.crossref.editor * }
+  if$
+  " \cite{" * crossref * "}" *
+}
+
+FUNCTION {format.incoll.inproc.crossref}
+{ editor empty$
+  editor field.or.null author field.or.null =
+  or
+    { key empty$
+ { booktitle empty$
+     { "need editor, key, or booktitle for " cite$ * " to crossref " *
+       crossref * warning$
+       ""
+     }
+     { "In {\em " booktitle * "\/}" * }
+   if$
+ }
+ { "In " key * }
+      if$
+    }
+    { "In " format.crossref.editor * }
+  if$
+  " \cite{" * crossref * "}" *
+}
+
+FUNCTION {article}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title "title" output.check
+  new.block
+  crossref missing$
+    { journal emphasize "journal" output.check
+      format.vol.num.pages output
+      format.date "year" output.check
+    }
+    { format.article.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {book}
+{ output.bibitem
+  author empty$
+    { format.editors "author and editor" output.check }
+    { format.authors output.nonnull
+      crossref missing$
+ { "author and editor" editor either.or.check }
+ 'skip$
+      if$
+    }
+  if$
+  new.block
+  format.btitle "title" output.check
+  crossref missing$
+    { format.bvolume output
+      new.block
+      format.number.series output
+      new.sentence
+      publisher "publisher" output.check
+      address output
+    }
+    { new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  format.edition output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {booklet}
+{ output.bibitem
+  format.authors output
+  new.block
+  format.title "title" output.check
+  howpublished address new.block.checkb
+  howpublished output
+  address output
+  format.date output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {inbook}
+{ output.bibitem
+  author empty$
+    { format.editors "author and editor" output.check }
+    { format.authors output.nonnull
+
+      crossref missing$
+ { "author and editor" editor either.or.check }
+ 'skip$
+      if$
+    }
+  if$
+  new.block
+  format.btitle "title" output.check
+  crossref missing$
+    { format.bvolume output
+      format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.number.series output
+      new.sentence
+      publisher "publisher" output.check
+      address output
+    }
+    { format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  format.edition output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {incollection}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title "title" output.check
+  new.block
+  crossref missing$
+    { format.in.ed.booktitle "booktitle" output.check
+      format.bvolume output
+      format.number.series output
+      format.chapter.pages output
+      new.sentence
+      publisher "publisher" output.check
+      address output
+      format.edition output
+      format.date "year" output.check
+    }
+    { format.incoll.inproc.crossref output.nonnull
+      format.chapter.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {inproceedings}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title "title" output.check
+  new.block
+  crossref missing$
+    { format.in.ed.booktitle "booktitle" output.check
+      format.bvolume output
+      format.number.series output
+      format.pages output
+      address empty$
+ { organization publisher new.sentence.checkb
+   organization output
+   publisher output
+   format.date "year" output.check
+ }
+ { address output.nonnull
+   format.date "year" output.check
+   new.sentence
+   organization output
+   publisher output
+ }
+      if$
+    }
+    { format.incoll.inproc.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {conference} { inproceedings }
+
+FUNCTION {manual}
+{ output.bibitem
+  author empty$
+    { organization empty$
+ 'skip$
+ { organization output.nonnull
+   address output
+ }
+      if$
+    }
+    { format.authors output.nonnull }
+  if$
+  new.block
+  format.btitle "title" output.check
+  author empty$
+    { organization empty$
+ { address new.block.checka
+   address output
+ }
+ 'skip$
+      if$
+    }
+    { organization address new.block.checkb
+      organization output
+      address output
+    }
+  if$
+  format.edition output
+  format.date output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {mastersthesis}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title "title" output.check
+  new.block
+  "Master's thesis" format.thesis.type output.nonnull
+  school "school" output.check
+  address output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {misc}
+{ output.bibitem
+  format.authors output
+  title howpublished new.block.checkb
+  format.title output
+  howpublished new.block.checka
+  howpublished output
+  format.date output
+  new.block
+  note output
+  fin.entry
+  empty.misc.check
+}
+
+FUNCTION {phdthesis}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.btitle "title" output.check
+  new.block
+  "PhD thesis" format.thesis.type output.nonnull
+  school "school" output.check
+  address output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {proceedings}
+{ output.bibitem
+  editor empty$
+    { organization output }
+    { format.editors output.nonnull }
+
+  if$
+  new.block
+  format.btitle "title" output.check
+  format.bvolume output
+  format.number.series output
+  address empty$
+    { editor empty$
+ { publisher new.sentence.checka }
+ { organization publisher new.sentence.checkb
+   organization output
+ }
+      if$
+      publisher output
+      format.date "year" output.check
+    }
+    { address output.nonnull
+      format.date "year" output.check
+      new.sentence
+      editor empty$
+ 'skip$
+ { organization output }
+      if$
+      publisher output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {techreport}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title "title" output.check
+  new.block
+  format.tr.number output.nonnull
+  institution "institution" output.check
+  address output
+  format.date "year" output.check
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {unpublished}
+{ output.bibitem
+  format.authors "author" output.check
+  new.block
+  format.title "title" output.check
+  new.block
+  note "note" output.check
+  format.date output
+  fin.entry
+}
+
+FUNCTION {default.type} { misc }
+
+MACRO {jan} {"Jan."}
+
+MACRO {feb} {"Feb."}
+
+MACRO {mar} {"Mar."}
+
+MACRO {apr} {"Apr."}
+
+MACRO {may} {"May"}
+
+MACRO {jun} {"June"}
+
+MACRO {jul} {"July"}
+
+MACRO {aug} {"Aug."}
+
+MACRO {sep} {"Sept."}
+
+MACRO {oct} {"Oct."}
+
+MACRO {nov} {"Nov."}
+
+MACRO {dec} {"Dec."}
+
+MACRO {acmcs} {"ACM Comput. Surv."}
+
+MACRO {acta} {"Acta Inf."}
+
+MACRO {cacm} {"Commun. ACM"}
+
+MACRO {ibmjrd} {"IBM J. Res. Dev."}
+
+MACRO {ibmsj} {"IBM Syst.~J."}
+
+MACRO {ieeese} {"IEEE Trans. Softw. Eng."}
+
+MACRO {ieeetc} {"IEEE Trans. Comput."}
+
+MACRO {ieeetcad}
+ {"IEEE Trans. Comput.-Aided Design Integrated Circuits"}
+
+MACRO {ipl} {"Inf. Process. Lett."}
+
+MACRO {jacm} {"J.~ACM"}
+
+MACRO {jcss} {"J.~Comput. Syst. Sci."}
+
+MACRO {scp} {"Sci. Comput. Programming"}
+
+MACRO {sicomp} {"SIAM J. Comput."}
+
+MACRO {tocs} {"ACM Trans. Comput. Syst."}
+
+MACRO {tods} {"ACM Trans. Database Syst."}
+
+MACRO {tog} {"ACM Trans. Gr."}
+
+MACRO {toms} {"ACM Trans. Math. Softw."}
+
+MACRO {toois} {"ACM Trans. Office Inf. Syst."}
+
+MACRO {toplas} {"ACM Trans. Prog. Lang. Syst."}
+
+MACRO {tcs} {"Theoretical Comput. Sci."}
+
+READ
+
+FUNCTION {sortify}
+{ purify$
+  "l" change.case$
+}
+
+INTEGERS { len }
+
+FUNCTION {chop.word}
+{ 's :=
+  'len :=
+  s #1 len substring$ =
+    { s len #1 + global.max$ substring$ }
+    's
+  if$
+}
+
+FUNCTION {sort.format.names}
+{ 's :=
+  #1 'nameptr :=
+  ""
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { nameptr #1 >
+ { "   " * }
+ 'skip$
+      if$
+      s nameptr "{vv{ } }{ll{ }}{  f{ }}{  jj{ }}" format.name$ 't :=
+      nameptr numnames = t "others" = and
+ { "et al" * }
+ { t sortify * }
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+}
+
+FUNCTION {sort.format.title}
+{ 't :=
+  "A " #2
+    "An " #3
+      "The " #4 t chop.word
+    chop.word
+  chop.word
+  sortify
+  #1 global.max$ substring$
+}
+
+FUNCTION {author.sort}
+{ author empty$
+    { key empty$
+ { "to sort, need author or key in " cite$ * warning$
+   ""
+ }
+ { key sortify }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+
+FUNCTION {author.editor.sort}
+{ author empty$
+    { editor empty$
+ { key empty$
+     { "to sort, need author, editor, or key in " cite$ * warning$
+       ""
+     }
+     { key sortify }
+   if$
+ }
+ { editor sort.format.names }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+
+FUNCTION {author.organization.sort}
+{ author empty$
+
+    { organization empty$
+ { key empty$
+     { "to sort, need author, organization, or key in " cite$ * warning$
+       ""
+     }
+     { key sortify }
+   if$
+ }
+ { "The " #4 organization chop.word sortify }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+
+FUNCTION {editor.organization.sort}
+{ editor empty$
+    { organization empty$
+ { key empty$
+     { "to sort, need editor, organization, or key in " cite$ * warning$
+       ""
+     }
+     { key sortify }
+   if$
+ }
+ { "The " #4 organization chop.word sortify }
+      if$
+    }
+    { editor sort.format.names }
+  if$
+}
+
+FUNCTION {presort}
+{ type$ "book" =
+  type$ "inbook" =
+  or
+    'author.editor.sort
+    { type$ "proceedings" =
+ 'editor.organization.sort
+ { type$ "manual" =
+     'author.organization.sort
+     'author.sort
+   if$
+ }
+      if$
+    }
+  if$
+  "    "
+  *
+  year field.or.null sortify
+  *
+  "    "
+  *
+  title field.or.null
+  sort.format.title
+  *
+  #1 entry.max$ substring$
+  'sort.key$ :=
+}
+
+ITERATE {presort}
+
+SORT
+
+STRINGS { longest.label }
+
+INTEGERS { number.label longest.label.width }
+
+FUNCTION {initialize.longest.label}
+{ "" 'longest.label :=
+  #1 'number.label :=
+  #0 'longest.label.width :=
+}
+
+FUNCTION {longest.label.pass}
+{ number.label int.to.str$ 'label :=
+  number.label #1 + 'number.label :=
+  label width$ longest.label.width >
+    { label 'longest.label :=
+      label width$ 'longest.label.width :=
+    }
+    'skip$
+  if$
+}
+
+EXECUTE {initialize.longest.label}
+
+ITERATE {longest.label.pass}
+
+FUNCTION {begin.bib}
+{ preamble$ empty$
+    'skip$
+    { preamble$ write$ newline$ }
+  if$
+  "\begin{thebibliography}{"  longest.label  * 
+  "}\setlength{\itemsep}{-1ex}\small" * write$ newline$
+}
+
+EXECUTE {begin.bib}
+
+EXECUTE {init.state.consts}
+
+ITERATE {call.type$}
+
+FUNCTION {end.bib}
+{ newline$
+  "\end{thebibliography}" write$ newline$
+}
+
+EXECUTE {end.bib}
+
+% end of file latex8.bst
+% ---------------------------------------------------------------
+
+
+

+ 180 - 0
tutorial/latex12.sty

@@ -0,0 +1,180 @@
+
+% ---------------------------------------------------------------
+%
+% $Id: latex8.sty,v 1.2 1995/09/15 15:31:13 ienne Exp $
+%
+% by Paolo.Ienne@di.epfl.ch
+%
+% ---------------------------------------------------------------
+%
+% no guarantee is given that the format corresponds perfectly to
+% IEEE 8.5" x 11" Proceedings, but most features should be ok.
+%
+% ---------------------------------------------------------------
+% with LaTeX2e:
+% =============
+%
+% use as
+%   \documentclass[times,10pt,twocolumn]{article}
+%   \usepackage{latex8}
+%   \usepackage{times}
+%
+% ---------------------------------------------------------------
+
+% with LaTeX 2.09:
+% ================
+%
+% use as
+%   \documentstyle[times,art10,twocolumn,latex8]{article}
+%
+% ---------------------------------------------------------------
+% with both versions:
+% ===================
+%
+% specify \pagestyle{empty} to omit page numbers in the final
+% version
+%
+% specify references as
+%   \bibliographystyle{latex8}
+%   \bibliography{...your files...}
+%
+% use Section{} and SubSection{} instead of standard section{}
+%    and subsection{} to obtain headings in the form
+%    "1.3. My heading"
+%
+% ---------------------------------------------------------------
+
+\typeout{IEEE 8.5 x 11-Inch Proceedings Style `latex8.sty'.}
+
+% ten point helvetica bold required for captions
+% in some sites the name of the helvetica bold font may differ,
+% change the name here:
+%\font\tenhv  = phvb at 10pt
+\font\tenhv  = phvb7t at 10pt
+
+% eleven point times bold required for second-order headings
+\font\elvbf  = cmbx10 scaled 1100
+%\font\elvbf  = ptmb scaled 1100
+
+% set dimensions of columns, gap between columns, and paragraph indent
+%\setlength{\textheight}{8.875in}
+%\setlength{\textwidth}{6.875in}
+%\setlength{\columnsep}{0.3125in}
+%\setlength{\topmargin}{0in}
+%\setlength{\headheight}{0in}
+%\setlength{\headsep}{0in}
+%\setlength{\parindent}{1pc}
+%\setlength{\oddsidemargin}{-.304in}
+%\setlength{\evensidemargin}{-.304in}
+
+% dimensions adjusted by Pradeep Misra 2008-02-03
+\setlength{\textheight}{8.875in}
+\setlength{\textwidth}{6.5in}
+\setlength{\columnsep}{0.375in}
+\setlength{\topmargin}{0in}
+\setlength{\headheight}{0in}
+\setlength{\headsep}{0in}
+\setlength{\parindent}{1pc}
+%\setlength{\oddsidemargin}{-.1875in}
+%\setlength{\evensidemargin}{-.1875in}
+
+% memento from size10.clo
+% \normalsize{\@setfontsize\normalsize\@xpt\@xiipt}
+% \small{\@setfontsize\small\@ixpt{11}}
+% \footnotesize{\@setfontsize\footnotesize\@viiipt{9.5}}
+% \scriptsize{\@setfontsize\scriptsize\@viipt\@viiipt}
+% \tiny{\@setfontsize\tiny\@vpt\@vipt}
+% \large{\@setfontsize\large\@xiipt{14}}
+% \Large{\@setfontsize\Large\@xivpt{18}}
+% \LARGE{\@setfontsize\LARGE\@xviipt{22}}
+% \huge{\@setfontsize\huge\@xxpt{25}}
+% \Huge{\@setfontsize\Huge\@xxvpt{30}}
+
+\def\@maketitle
+   {
+   \newpage
+   \null
+   \vskip .375in
+   \begin{center}
+      {\Large \bf \@title \par}
+      % additional two empty lines at the end of the title
+      \vspace*{24pt}
+      {
+      \large
+      \lineskip .5em
+      \begin{tabular}[t]{c}
+         \@author
+      \end{tabular}
+      \par
+      }
+      % additional small space at the end of the author name
+      \vskip .5em
+      {
+       \large
+      \begin{tabular}[t]{c}
+         \@affiliation
+      \end{tabular}
+      \par
+      \ifx \@empty \@email
+      \else
+         \begin{tabular}{r@{~}l}
+            E-mail: & {\tt \@email}
+         \end{tabular}
+         \par
+      \fi
+      }
+      % additional empty line at the end of the title block
+      \vspace*{12pt}
+   \end{center}
+   }
+
+\def\abstract
+   {%
+   \centerline{\large\bf Abstract}%
+   \vspace*{12pt}%
+   \it%
+   }
+
+\def\endabstract
+   {
+   % additional empty line at the end of the abstract
+   \vspace*{12pt}
+   }
+
+\def\affiliation#1{\gdef\@affiliation{#1}} \gdef\@affiliation{}
+
+\def\email#1{\gdef\@email{#1}}
+\gdef\@email{}
+
+\newlength{\@ctmp}
+\newlength{\@figindent}
+\setlength{\@figindent}{1pc}
+
+\long\def\@makecaption#1#2{
+   \vskip 10pt
+   \setbox\@tempboxa\hbox{\tenhv\noindent #1.~#2}
+   \setlength{\@ctmp}{\hsize}
+   \addtolength{\@ctmp}{-\@figindent}\addtolength{\@ctmp}{-\@figindent}
+   % IF longer than one indented paragraph line
+   \ifdim \wd\@tempboxa >\@ctmp
+      % THEN set as an indented paragraph
+      \begin{list}{}{\leftmargin\@figindent \rightmargin\leftmargin}
+         \item[]\tenhv #1.~#2\par
+      \end{list}
+   \else
+      % ELSE center
+      \hbox to\hsize{\hfil\box\@tempboxa\hfil}
+   \fi}
+%\newfont{\fnt11}{times at 11pt}
+% correct heading spacing and type
+\def\section{\@startsection {section}{1}{\z@}
+   {14pt plus 2pt minus 2pt}{14pt plus 2pt minus 2pt} {\large\bf}}
+\def\subsection{\@startsection {subsection}{2}{\z@}
+   {13pt plus 2pt minus 2pt}{13pt plus 2pt minus 2pt} {\fontsize{11}{\f@baselineskip}\bf}}%\elvbf}}
+
+% add the period after section numbers
+\newcommand{\Section}[1]{\section{\hskip -1em.~#1}}
+\newcommand{\SubSection}[1]{\subsection{\hskip -1em.~#1}}
+
+% end of file latex8.sty
+% ---------------------------------------------------------------

+ 409 - 0
tutorial/notations.tex

@@ -0,0 +1,409 @@
+\usepackage{xspace}
+\renewcommand{\vec}[1]{\mathbf{\boldsymbol{#1}}}
+\newcommand{\mat}[1]{\mathbf{#1}}
+%\newcommand{\diag}{\mathrm{diag}}
+%\newcommand{\trace}{\mathrm{trace}}
+\newcommand\equationname{Eq.}
+\newcommand\eg{\textit{e.g.},\xspace}
+\newcommand\ie{\textit{i.e.},\xspace}
+\newcommand\cf{\textit{cf.}\xspace}
+\newcommand{\pderiv}[2]{\frac{\partial #1}{\partial #2}}
+\newcommand{\pderivk}[3]{\frac{\partial^{#3} #1}{\partial #2^{#3}}}
+\newcommand{\deriv}[2]{\frac{\operatorname{d} #1}{\operatorname{d} #2}}
+\newcommand{\derivk}[3]{\frac{\operatorname{d}^{#3} #1}{\operatorname{d} #2^{#3}}}
+\newcommand{\integral}[4]{\int_{#3}^{#4} #1 \operatorname{d}#2}
+%\newcommand{\argmax}{\operatorname{argmax}}
+%\newcommand{\argmin}{\operatorname{argmin}}
+%\newcommand{\sign}{\operatorname{sign}}
+\newcommand{\smalleq}{{\scriptstyle =}}
+\newcommand{\quotes}[1]{``#1''}
+\newcommand\landau{\mathcal{O}}
+\newcommand\CONDON{\,|\,}
+\newcommand{\vectornorm}[1]{\left|\left|#1\right|\right|}
+\newcommand\kernelFunctionHIK{\kernelFunction^{\text{\scriptsize HIK}}}
+\newcommand\kernelFunctionGHIK{\kernelFunction^{\text{\scriptsize GHIK}}}
+
+\newcommand\mattwo[4]{\left[\begin{array}{cc} #1 & #2\\ #3 & #4 \end{array} \right]}
+\newcommand\matthree[9]{\left[\begin{array}{ccc} #1 & #2 & #3\\ #4 & #5 & #6\\ #7 & #8 & #9 \end{array} \right]}
+\newcommand\vectwo[2]{\left[\begin{array}{c} #1 \\ #2 \end{array} \right]}
+\newcommand\vecthree[3]{\left[\begin{array}{c} #1 \\ #2 \\ #3 \end{array} \right]}
+
+% notations
+
+\DeclareMathOperator{\x}{\boldsymbol{x}}
+\DeclareMathOperator{\y}{\boldsymbol{y}}
+
+\newcommand\labelspace{\mathcal{Y}}
+\newcommand\inputspace{\mathcal{X}}
+\newcommand\inputsingle{\vec{x}}
+\newcommand\inputsinglecomp{x}
+\newcommand\labelsingle{y}
+\newcommand\labelspecific{k}
+\newcommand\labelrandom{\labelsingle}
+\newcommand\inputrandom{\inputsingle}
+\newcommand\dataset{\mathcal{D}}
+
+\newcommand\dimension{D}
+\newcommand\noe{n}
+\newcommand\numberOfExamples{\noe}
+
+\newcommand\inputnew{\inputsingle^*}
+\newcommand\labelnew{\labelsingle_*}
+\newcommand\inputmatrix{\mat{X}}
+\newcommand\expectation{\mathbb{E}}
+\newcommand\cfunction{\tilde{h}}
+\newcommand\cestimate{\hat{h}}
+\newcommand\impuls[1]{\delta\left[#1\right]}
+
+\usepackage{upgreek}
+\newcommand\impulsDiscrete[1]{\updelta \left( {#1} \right)}
+
+\newcommand\numberOfClasses{M}
+\newcommand\error{\mbox{\textit{err}}}
+
+
+% model selection
+\newcommand\cparameters{\vec{\theta}}
+\newcommand\parameterspace{\Theta}
+
+\newcommand\labelvector{\vec{y}}
+\newcommand\inputdataset{\mat{X}}
+
+% kernel stuff
+\newcommand\meanFunction{\mu}
+\newcommand\kernelFunction{K}
+\newcommand\kernelMatrix{\mat{K}}
+\newcommand\kernelMatrixValue{K}
+\newcommand\kernelVector{\vec{k}_{*}}
+%\newcommand\kernelVector{\kernelFunction(\inputdataset, \inputnew)}
+\newcommand\kernelSelf{\kernelFunction(\inputnew, \inputnew)}
+
+% latent functions
+\newcommand\latentFunction{f}
+\newcommand\latentvector{\vec{f}}
+\newcommand\latentfunctionvalue{f}
+\newcommand\latentnew{f_*}
+\newcommand\ftransform{\phi}
+\newcommand\distance{d}
+\newcommand\rbfparameter{\gamma}
+\newcommand\featureSpace{\mathcal{H}}
+\newcommand\gpsymbol{\mathcal{GP}}
+\newcommand\meanfunction{\mathcal{M}}
+
+% model and modelspace / hypothesis, hypothesispace
+\newcommand\hypothesis{h}
+\newcommand\hypothesisSpace{\mathbb{H}}
+
+\newcommand\mapestimate[1]{ {\hat{#1}}^{\text{MAP}} }
+\newcommand\mlestimate[1]{ {\hat{#1}}^{\text{ML}} }
+\newcommand\bayesestimate[1]{ {\hat{#1}}^{\text{Bayes}} }
+\newcommand\mmseestimate[1]{ {\hat{#1}}^{\text{MMSE}} }
+\newcommand\mss[1]{\mbox{\scriptsize #1}}
+\newcommand\baggingFraction{r_{\mss{B}}}
+
+\newcommand\assumeeq{\stackrel{*}{=}}
+\newcommand\assumepropto{\stackrel{*}{\propto}}
+
+\newcommand\ensembleSize{T}
+\newcommand\ensembleIndex{t}
+
+\newcommand\dtthreshold{\zeta}
+\newcommand\thresholdSet{Q}
+\newcommand\featureindex{r}
+\newcommand\rFeatureSet{\mathcal{R}}
+
+\newcommand\leafNode{\vartheta}
+\newcommand\numberOfLeaves{m_\ell}
+\newcommand\node{v}
+
+\newcommand\splitCriterion{\Gamma}
+\newcommand\impurityMeasure{\mathcal{J}}
+\newcommand\entropy{\mathcal{E}}
+
+\newcommand\impurityThreshold{\xi_\impurityMeasure}
+\newcommand\minexamplesThreshold{\xi_{\mss{n}}}
+\newcommand\maxdepthThreshold{\xi_{\mss{d}}}
+
+\newcommand\hyperplane{\vec{w}}
+%\newcommand\stepFunction[1]{\delta^s\left[ #1 \right]}
+\newcommand\stepFunction[1]{\mbox{sign}\left(#1\right)}
+
+\newcommand\numberOfKernels{R}
+\newcommand\bias{b}
+\newcommand\margin{\mbox{mg}}
+\DeclareMathOperator\maximize{\mbox{maximize}}
+\DeclareMathOperator\minimize{\mbox{minimize}}
+
+\newcommand\hingeLoss{H}
+\newcommand\lagrangeDual{g}
+
+\newcommand\hyperparameters{\vec{\eta}}
+\newcommand\hyperparameter{\eta}
+\newcommand\kernelweight{\beta}
+\newcommand\kernelweights{\vec{\beta}}
+\newcommand\variance{\sigma^2}
+\newcommand\stddev{\sigma}
+\newcommand\eigmax{\lambda_{\text{max}}}
+\newcommand\eigmin{\lambda_{\text{min}}}
+
+% GP related stuff
+\newcommand\gpregmean{\mu_*}
+\newcommand\gpregvariance{\sigma^2_*}
+\newcommand\gpregstddev{\sigma_*}
+
+% differential symbol for integrals
+\newcommand\diffd{d}
+
+\newcommand\kernelscaling{v_0}
+\newcommand\kernelbias{v_1}
+\newcommand\qexpgrad{g}
+%\newcommand\gpnoise{\sigma_{\varepsilon}^2}
+\newcommand\gpnoise{\sigma^2}
+\newcommand\gpnoisestddev{\sigma_{\varepsilon}}
+
+
+%\newcommand\identityMatrix[1]{\mat{I}_{(#1)}}
+\newcommand\identityMatrix[1]{\mat{I}}
+
+\newcommand\kernelStuff{\zeta_\kernelFunction}
+
+% gp classification
+\newcommand\cumgauss{\Phi}
+\newcommand\cumgaussLoss{L_{\cumgauss}}
+\DeclareMathOperator\sigmoid{\mbox{sig}}
+\newcommand\sigmoidLoss{L_{\scriptsize \text{sig}}}
+\DeclareMathOperator\erf{\mbox{erf}}
+% gp classification scaling factor
+\newcommand\gpnoiseC{\sigma_{c}^2}
+\newcommand\gpnoisestddevC{\sigma_{c}}
+
+% laplace methods
+\newcommand\laplaceMode{\vec{\hat{f}}}
+\newcommand\laplaceModeValue{\hat{f}}
+\newcommand\laplaceLog{L}
+\newcommand\approxP{q}
+\newcommand\constTerm{\text{\textit{const.}}}
+\newcommand\nhessianLikelihood{\mat{W}}
+\newcommand\nhessianLikelihoodValue{W}
+
+% gp multi
+\newcommand\ymulti{y_*^{\scriptsize \mbox{multi}}}
+\newcommand\ymultip{y_*^{\scriptsize \mbox{multi}}}
+
+% gp hyperparameter estimation
+\newcommand\kernelMatrixHyper{\mat{\tilde{K}}_\hyperparameters}
+
+% optimization problems
+
+\newcommand\optimizationProblem[5]{
+	\begin{equation}
+	\label{#1}
+	\begin{aligned}
+	& \underset{#3}{#2}
+	& & #4 \\
+	& \text{subject to}
+	& & #5 \enspace.
+	\end{aligned}
+	\end{equation}
+}
+
+\newcommand\optimizationProblemUnconstrained[4]{	
+	\begin{equation}
+	\label{#1}
+	\begin{aligned}
+	& \underset{#3}{#2}
+	& & #4 \enspace.\\
+	\end{aligned}
+	\end{equation}
+}
+% transfer learning framework
+%\newcommand\targetTask{\mathcal{T}}
+\newcommand\targetTask{\tau}
+\newcommand\supportTag{\mathcal{S}}
+\newcommand\datasetSupport[1]{{\dataset}^{\supportTag}_{#1}}
+\newcommand\datasetSupportSingle{{\dataset}^{\supportTask}}
+\newcommand\datasetTarget{{\dataset}^{\targetTask}}
+\newcommand\supportCollection{\mathfrak{D}^{\supportTag}}
+\newcommand\numberOfTasks{J}
+\newcommand\numberOfTasksMT{P}
+\newcommand\noeTarget{\tilde{\noe}}
+\newcommand\noeTotal{\noe}
+\newcommand\noePositive{\noe_1}
+\newcommand\noeSupport{\noe^{\supportTag}}
+\newcommand\supportClasses{\supportTag}
+\newcommand\supportClass{s}
+\newcommand\backgroundClass{\mathcal{B}}
+\newcommand\transferParameter{\vec{\theta}}
+\newcommand\tpSpace{\Theta}
+
+% regularized trees
+
+\newcommand\targetClass{\targetTask}
+\newcommand\rtPara{\vec{\theta}}
+\newcommand\rtParaValue{\theta}
+\newcommand\rtHyperMu{\vec{\mu}}
+\newcommand\rtHyperMuValue{\mu}
+%\newcommand\rtHyperSigma{\sigma_{\supportTag}}
+\newcommand\rtHyperVariance{\sigma^2}
+\newcommand\leafIndex{i}
+\newcommand\datasetLeaf[1]{\omega_{#1}}
+%\newcommand\datasetLeaf[1]{\dataset^{\ell}_{#1}}
+%\newcommand\rtLeafProbs[1]{\vec{t}_{\supportTag}^{#1}}
+\newcommand\rtLeafProbs[1]{\vec{t}^{(#1)}}
+\newcommand\rtLeafProb[2]{t^{(#1)}_{#2}}
+\newcommand\lagrange{L}
+%\newcommand\mcdata[1]{\dataset^{#1}}
+\newcommand\mcdata[1]{\dataset^{#1}}
+%\newcommand\rtML[2]{\hat{\theta}^{\mss{(ML)},#2}_{#1}}
+\newcommand\rtML[2]{t^{(#1)}_{#2}}
+\newcommand\rtMLv[1]{\vec{t}^{(#1)}}
+\newcommand\rtParaSpace{\Theta}
+
+% only for the target task
+\newcommand\rtMAPv{\vec{\hat{\theta}}^{\mss{MAP}}}
+
+\newcommand\leafCounts{\vec{c}}
+\newcommand\leafCount{c}
+\newcommand\leafNodeBinaryV{\ftransform}
+\newcommand\rtPostProbsV[1]{w^{\left(#1\right)}}
+\newcommand\leafNodeBinary{\ftransform}
+\newcommand\rtPostProbs[1]{\vec{w}^{\left(#1\right)}}
+
+% feature relevance
+\newcommand\featureSet{\mathcal{F}}
+\newcommand\featureFunction{g}
+\newcommand\frPara{\vec{\theta}}
+\newcommand\frParaValue{\theta}
+\newcommand\frBaseModel{h}
+\newcommand\frBaseModelSpace{H}
+\newcommand\frHyper{\vec{\beta}}
+\newcommand\frHyperValue{\beta}
+%\newcommand\
+
+% depgp
+\newcommand\depgpcorr{\rho}
+\newcommand\tT{\textcolor{green}{\targetTask}}
+\newcommand\tS{\textcolor{blue}{\supportTask}}
+%\newcommand\supportTask{\supportTag}
+\newcommand\supportTask{s}
+
+\newcommand\depgpKNoColor{
+	\left(\begin{array}{cc} \kernelMatrix_{\targetTask \targetTask} & \depgpcorr \kernelMatrix_{\targetTask \supportTask}\\ \depgpcorr \kernelMatrix_{\targetTask \supportTask}^T & \kernelMatrix_{\supportTask \supportTask}\end{array}\right)
+}
+
+\newcommand\depgpK{
+		\LARGE\left(\begin{array}{cc} \kernelMatrix_{\tT \tT} & \depgpcorr \kernelMatrix_{\tT \tS}\\ \depgpcorr \kernelMatrix_{\tT \tS}^T & \kernelMatrix_{\tS \tS}\end{array}\right)
+}
+\newcommand\depgpKNoColorInd{
+	\left(\begin{array}{cc} \kernelMatrix_{\targetTask \targetTask} & \mat{0}\\ \mat{0} & \kernelMatrix_{\supportTask \supportTask}\end{array}\right)
+}
+
+\newcommand\kernelFunctionX{\kernelFunction^{\inputspace}}
+\newcommand\kernelMatrixX{\kernelMatrix^{\inputspace}}
+\newcommand\kron{\otimes}
+\newcommand\taskIndex{j}
+
+\newcommand\kernelVectorTarget{\vec{k}_{\targetTask*}}
+\newcommand\kernelVectorSupport{\vec{k}_{\supportTask*}}
+\newcommand\labelvectorTarget{\labelvector_{\targetTask}}
+\newcommand\labelvectorSupport{\labelvector_{\supportTask}}
+\newcommand\inputdatasetTarget{\inputdataset_{\targetTask}}
+\newcommand\inputdatasetSupport{\inputdataset_{\supportTask}}
+\newcommand\loovariance{\tilde{\sigma}^2}
+\newcommand\loomean{\tilde{\mu}}
+
+\newcommand\kF{\mat{K}^{\latentfunction}}
+\newcommand\kFTasks[2]{K^{\latentfunction}_{#1 #2}}
+\newcommand\latentfunctionS{\tilde{\latentfunction}}
+\newcommand\kernelFunctionS{\tilde{\kernelFunction}}
+\newcommand\latentfunctionB{\bar{\latentfunction}}
+\newcommand\kernelFunctionB{\bar{\kernelFunction}}
+\newcommand\pilonettoWeight{\alpha}
+
+\newcommand\wnsim{d}
+
+% ------ tommasi
+\newcommand\tommasiBeta{\beta}
+\newcommand\hyperplaneTarget{\hyperplane^{(\targetTask)}}
+\newcommand\hyperplaneSupport{\hyperplane^{(\supportTask)}}
+\newcommand\alphaSupport{\vec{\alpha}^{(\supportTask)}}
+\newcommand\alphaTarget{\vec{\alpha}^{(\targetTask)}}
+\newcommand\alphaTargetValue{\alpha^{(\targetTask)}}
+
+% ------ occ
+\newcommand\occScore{\nu}
+\newcommand\occThreshold{\xi}
+
+\newcommand\ftMatrix{\mat{\Phi}}
+\newcommand\kernelMatrixReg{\kernelMatrix_{\mss{reg}}}
+\newcommand\covarianceReg{\mat{C}_{\mss{reg}}}
+\newcommand\covarianceMatrix{\mat{C}}
+\newcommand\ftmean{\vec{\mu}_{\ftMatrix}}
+\newcommand\ftransformC{\tilde{\ftransform}}
+\newcommand\squashFunction{\Phi}
+
+\newcommand\radiusBall{R}
+\newcommand\meanBall{\vec{m}}
+
+% ------- local features
+
+\newcommand\dimensionLF{S}
+\newcommand\localfeature{\vec{l}}
+\newcommand\lfposition{\vec{p}}
+\newcommand\numberOfLFeat{W}
+\newcommand\lfSet{\mathcal{L}}
+
+% -------------- comparing histograms
+
+\newcommand\setA{\mathcal{A}}
+\newcommand\setB{\mathcal{B}}
+\newcommand\baseSet{\mathcal{U}}
+\newcommand\powerSet[1]{\mathcal{P}\left(#1\right)}
+\newcommand\distSet{d}
+\newcommand\clusterq{q}
+\newcommand\numberOfClusters{n_q}
+
+% -------------- BoF
+
+\newcommand\bofHist{\vec{h}}
+\newcommand\bofHistValue{h}
+\newcommand\bofIndex{j}
+
+% -------------- SIFT
+\newcommand\aimg{\mathfrak{g}}
+\newcommand\apoint{\vec{p}}
+\newcommand\gaussianFilter[1]{\mathfrak{h}_{#1}}
+\newcommand\gaussianScale{\sigma}
+\newcommand\illuminationFunction{u}
+\newcommand\greyValue{g}
+\newcommand\conv{*}
+
+% --------- pyramid matching
+\newcommand\matchingError{\error_{\pi}}
+\newcommand\pmkLevel{\ell}
+\newcommand\numPMKLevels{L}
+\newcommand\pmkHist{\vec{h}}
+\newcommand\pmkHistValue{h}
+\newcommand\pmkData{\mat{H}}
+\newcommand\pmkSimilarity{\kernelFunction^{\mss{PMK}}}
+\newcommand\pmkSimilarityNormalized{\tilde{\kernelFunction}^{\mss{PMK}}}
+\newcommand\pmkMatches{I}
+
+% --------- experiments
+\newcommand\numRuns{Z}
+\newcommand\confusionMatrix{\mat{C}}
+\newcommand\confusionMatrixValue{C}
+\newcommand\noeTest{\noe^{\mss{t}}}
+%\newcommand\recogRate{\error^{\mss{ov}}}
+%\newcommand\avgRecogRate{\error^{\mss{avg}}}
+\newcommand\recogRate{\text{err-ov}}
+\newcommand\avgRecogRate{\text{err-avg}}
+
+\newcommand\ctp{\text{TP}}
+\newcommand\cfp{\text{FP}}
+\newcommand\cfn{\text{FN}}
+\newcommand\ctn{\text{TN}}
+\newcommand\numPositives{\noe_{\mss{pos}}}
+\newcommand\numNegatives{\noe_{\mss{neg}}}
+\newcommand\tprate{\text{TPR}}
+\newcommand\fprate{\text{FPR}}

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно