Browse Source

new indep. module as interface to vislearning

Alexander Freytag 12 năm trước cách đây
commit
894a1f2675
50 tập tin đã thay đổi với 12672 bổ sung0 xóa
  1. 242 0
      GPHIKClassifierNICE.cpp
  2. 95 0
      GPHIKClassifierNICE.h
  3. 8 0
      Makefile
  4. 103 0
      Makefile.inc
  5. 148 0
      configs/AL_predVar_fine.conf
  6. 48 0
      configs/AwA.conf
  7. 52 0
      configs/GP_IL_New_Examples.conf
  8. 41 0
      configs/ImagenetBinaryGP.conf
  9. 6 0
      configs/computeNormHistFeat.conf
  10. 19 0
      configs/createSIFTFeatures.conf
  11. 18 0
      configs/createSIFTFeaturesHSG.conf
  12. 9 0
      configs/scenes.reclassification.conf
  13. 9 0
      configs/scenes.smalltest.conf
  14. 9 0
      configs/scenes.std.conf
  15. 1679 0
      fast-hikDoxyConfig.txt
  16. 10 0
      libdepend.inc
  17. 694 0
      progs/IL_AL.cpp
  18. 783 0
      progs/IL_AL_Binary.cpp
  19. 902 0
      progs/IL_AL_Binary_GPBaseline.cpp
  20. 530 0
      progs/IL_NewExamples.cpp
  21. 571 0
      progs/IL_NewExamples_Comparison.cpp
  22. 88 0
      progs/Makefile.inc
  23. 921 0
      progs/activeLearningCheckerBoard.cpp
  24. 99 0
      progs/bovizeObjectBankFeatures.cpp
  25. 172 0
      progs/compressObjectBankFeatures.cpp
  26. 51 0
      progs/computeLocalFeatures.cpp
  27. 117 0
      progs/computeNormalizedHistogramFeatures.cpp
  28. 184 0
      progs/computeSIFTFeatures.cpp
  29. 102 0
      progs/datatools.h
  30. 117 0
      progs/eccv2012-15scenes-fasthik.cpp
  31. 147 0
      progs/eccv2012-15scenes.cpp
  32. 162 0
      progs/eccv2012-AwA.cpp
  33. 444 0
      progs/eccv2012-synthetic.cpp
  34. 149 0
      progs/saveImageNetBinary.cpp
  35. 165 0
      progs/testFPClassifier.cpp
  36. 141 0
      progs/testImageNetBinary.cpp
  37. 229 0
      progs/testImageNetBinaryGPBaseline.cpp
  38. 103 0
      progs/testImageNetMedian.cpp
  39. 266 0
      progs/testLinsolvers.cpp
  40. 131 0
      progs/testLogDetApproximation.cpp
  41. 95 0
      progs/testWackerOptimization.cpp
  42. 89 0
      tests/Makefile.inc
  43. 536 0
      tests/TestGPHIKClassifier.cpp
  44. 31 0
      tests/TestGPHIKClassifier.h
  45. BIN
      tests/sparse20x30matrixM.mat
  46. BIN
      tests/sparse3x3matrixA.mat
  47. 42 0
      tests/toyExample1.data
  48. 9 0
      tests/toyExample2.data
  49. 1502 0
      tests/toyExampleLargeLargeScale.data
  50. 604 0
      tests/toyExampleLargeScale.data

+ 242 - 0
GPHIKClassifierNICE.cpp

@@ -0,0 +1,242 @@
+/** 
+* @file GPHIKClassifierNICE.cpp
+* @brief feature pool interface for our GP HIK classifier
+* @author Alexander Freytag
+* @date 02/01/2012
+
+*/
+#include <iostream>
+
+#include "core/basics/numerictools.h"
+#include <core/basics/Timer.h>
+
+#include "GPHIKClassifierNICE.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+
+GPHIKClassifierNICE::GPHIKClassifierNICE( const Config *conf, const string & confSection ) 
+{
+  this->verbose = conf->gB(confSection, "verbose", false);
+  this->useSimpleBalancing = conf->gB(confSection, "use_simple_balancing", false);
+  this->minSamples = conf->gI(confSection, "min_samples", -1);  
+  
+  classifier = new GPHIKClassifier(conf, confSection);
+}
+
+GPHIKClassifierNICE::~GPHIKClassifierNICE()
+{
+  if ( classifier != NULL )
+    delete classifier;
+}
+
+ClassificationResult GPHIKClassifierNICE::classify ( Example & pe )
+{
+  const SparseVector *svec = pe.svec;
+
+  if ( svec == NULL )
+    fthrow(Exception, "GPHIKClassifierNICE requires example.svec (SparseVector stored in an Example struct)");
+ return this->classify( svec ); 
+}
+
+ClassificationResult GPHIKClassifierNICE::classify ( const NICE::SparseVector * example )
+{
+  NICE::SparseVector scores;
+  int result;
+  
+  double uncertainty;
+ 
+  classifier->classify ( example,  result, scores, uncertainty);
+  
+  if ( scores.size() == 0 ) {
+    fthrow(Exception, "Zero scores, something is likely to be wrong here: svec.size() = " << example->size() );
+  }
+  int classes = scores.getDim();
+  FullVector fvscores(classes);
+  
+  NICE::SparseVector::const_iterator it;
+  for(int c = 0; c < classes; c++)
+  {
+    it = scores.find(c);
+    if ( it == scores.end() )
+      fvscores[c] = -std::numeric_limits<double>::max();
+    else
+      fvscores[c] = it->second;
+  }
+
+  ClassificationResult r ( fvscores.maxElement(), fvscores );
+  r.uncertainty = uncertainty;
+  
+  if (verbose)
+  {
+    std::cerr << " GPHIKClassifierNICE::classify scores" << std::endl;
+    scores.store(std::cerr);
+    std::cerr << " GPHIKClassifierNICE::classify fvscores" << std::endl;
+    fvscores.store(std::cerr);
+  }
+
+  return r;
+}
+
+/** training process */
+void GPHIKClassifierNICE::train ( FeaturePool & fp, Examples & examples )
+{
+  // we completely ignore the feature pool :)
+  //
+  initRand(0);
+  Vector classCounts;
+  int minClass = -1;
+  
+  if (verbose) 
+    std::cerr << "GPHIKClassifierNICE::train" << std::endl;
+
+  if ( useSimpleBalancing)
+  {
+    classCounts.resize( examples.getMaxClassNo()+1 );
+    classCounts.set( 0.0 );
+    for ( uint i = 0 ; i < examples.size() ; i++ )
+      classCounts[ examples[i].first ]++;
+    // we need a probability distribution
+    //classCounts.normalizeL1();
+    // we need the class index of the class with the least non-zero examples
+    for ( uint i = 0 ; i < classCounts.size(); i++ )
+      if ( (classCounts[i] > 0) && ((minClass < 0) || (classCounts[i] < classCounts[minClass])) )
+        minClass = i;
+    if (verbose)
+    {
+      cerr << "Class distribution: " << classCounts << endl;
+      cerr << "Class with the least number of examples: " << minClass << endl;
+    }
+    if(minSamples < 0)
+      minSamples = classCounts[minClass];
+  }
+
+  // (multi-class) label vector
+  Vector y ( examples.size() /* maximum size */ );
+
+  // flat structure of our training data
+  std::vector< SparseVector * > sparseExamples;
+
+  if (verbose)
+    cerr << "Converting (and sampling) feature vectors" << endl;
+  for ( uint i = 0 ; i < examples.size() ; i++ )
+  {
+    const Example & example = examples[i].second;
+    int classno = examples[i].first;
+    
+    // simple weird balancing method
+    if ( useSimpleBalancing ) 
+    {
+      double t = randDouble() * classCounts[classno];
+      if ( t >= minSamples ) continue;
+    }
+
+    y[ sparseExamples.size() ] = classno;
+    if ( example.svec == NULL )
+      fthrow(Exception, "GPHIKClassifierNICE requires example.svec (SparseVector stored in an Example struct)");
+    sparseExamples.push_back( example.svec );    
+  }
+
+  // we only use a subset for training
+  y.resize( sparseExamples.size() );
+  
+  classifier->train(sparseExamples, y);
+}
+
+/** training process */
+void GPHIKClassifierNICE::train ( const std::vector< SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels )
+{
+  classifier->train(examples, binLabels);
+}
+
+void GPHIKClassifierNICE::clear ()
+{
+  if ( classifier != NULL )
+    delete classifier;
+  classifier = NULL;
+}
+
+FeaturePoolClassifier *GPHIKClassifierNICE::clone () const
+{
+  fthrow(Exception, "GPHIKClassifierNICE: clone() not yet implemented" );
+
+  return NULL;
+}
+
+void GPHIKClassifierNICE::predictUncertainty( Example & pe, NICE::Vector & uncertainties )
+{
+  const SparseVector *svec = pe.svec;  
+  if ( svec == NULL )
+    fthrow(Exception, "GPHIKClassifierNICE requires example.svec (SparseVector stored in an Example struct)");
+  classifier->predictUncertainty(svec, uncertainties);
+}
+   
+void GPHIKClassifierNICE::predictUncertainty( const NICE::SparseVector * example, NICE::Vector & uncertainties )
+{  
+  classifier->predictUncertainty(example, uncertainties);
+}
+
+//---------------------------------------------------------------------
+//                           protected methods
+//---------------------------------------------------------------------
+void GPHIKClassifierNICE::restore ( std::istream & is, int format )
+{
+  if (is.good())
+  {
+    classifier->restore(is, format);  
+  }
+  else
+  {
+    std::cerr << "GPHIKClassifierNICE::restore -- InStream not initialized - restoring not possible!" << std::endl;
+  }
+}
+
+void GPHIKClassifierNICE::store ( std::ostream & os, int format ) const
+{
+  if (os.good())
+  {
+    os.precision (numeric_limits<double>::digits10 + 1);
+    
+    classifier->store(os, format);
+  }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }
+}
+
+void GPHIKClassifierNICE::addExample( const Example & pe, const double & label, const bool & performOptimizationAfterIncrement)
+{
+  const SparseVector *svec = pe.svec;
+  classifier->addExample(svec, label, performOptimizationAfterIncrement);
+}
+
+void GPHIKClassifierNICE::addMultipleExamples( Examples & newExamples, const bool & performOptimizationAfterIncrement)
+{
+  //are new examples available? If not, nothing has to be done
+  if ( newExamples.size() < 1)
+    return;
+  
+  // (multi-class) label vector
+  Vector y ( newExamples.size() );
+
+  // flat structure of our training data
+  std::vector< const SparseVector * > sparseExamples;
+
+  if (verbose)
+    cerr << "Converting (and sampling) feature vectors" << endl;
+  for ( uint i = 0 ; i < newExamples.size() ; i++ )
+  {
+    const Example & example = newExamples[i].second;
+    int classno = newExamples[i].first;
+
+    y[ i ] = classno;
+    if ( example.svec == NULL )
+      fthrow(Exception, "GPHIKClassifierNICE requires example.svec (SparseVector stored in an Example struct)");
+    sparseExamples.push_back( example.svec );    
+  }  
+  
+  classifier->addMultipleExamples(sparseExamples, y, performOptimizationAfterIncrement);  
+}

+ 95 - 0
GPHIKClassifierNICE.h

@@ -0,0 +1,95 @@
+/** 
+* @file GPHIKClassifierNICE.h
+* @author Alexander Freytag, Erik Rodner
+* @date 02/01/2012
+
+*/
+#ifndef _NICE_GPHIKCLASSIFIERNICEINCLUDE
+#define _NICE_GPHIKCLASSIFIERNICEINCLUDE
+
+#include <string>
+#include "core/basics/Config.h"
+#include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
+
+#include <gp-hik-core/GPHIKClassifier.h>
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
+
+namespace OBJREC {
+  
+/** @class GPHIKClassifierNICE
+ * Wrapper class (feature pool interface) for our GP HIK classifier 
+ *
+ * @author Alexander Freytag, Erik Rodner
+ */
+class GPHIKClassifierNICE : public FeaturePoolClassifier
+{
+
+  protected:
+    
+    OBJREC::GPHIKClassifier * classifier;
+    
+    /** verbose flag for useful output*/
+    bool verbose;
+    
+    /** a simple balancing strategy: use only that many examples of each class, as the smallest class provides*/
+    bool useSimpleBalancing; 
+    int minSamples;
+
+  public:
+
+    /** simple constructor */
+    GPHIKClassifierNICE( const NICE::Config *conf, const std::string & confSection = "GPHIKClassifier" );
+      
+    /** simple destructor */
+    virtual ~GPHIKClassifierNICE();
+   
+    /** 
+    * @brief classify a given example with the previously learnt model
+    * @param pe example to be classified given in a sparse representation
+    */
+    virtual ClassificationResult classify ( OBJREC::Example & pe );
+    /** 
+     * @brief classify a given example with the previously learnt model
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples example to be classified given in a sparse representation
+     */    
+    ClassificationResult classify ( const NICE::SparseVector * example );
+
+    /** training process */
+    virtual void train ( OBJREC::FeaturePool & fp, OBJREC::Examples & examples );
+    /** 
+     * @brief train this classifier using a given set of examples and a given set of binary label vectors 
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples examples to use given in a sparse data structure
+     * @param binLabels corresponding binary labels with class no. There is no need here that every examples has only on positive entry in this set (1,-1)
+     */
+    void train ( const std::vector< NICE::SparseVector *> & examples, std::map<int, NICE::Vector> & binLabels );
+    
+    /** Persistent interface */
+    virtual void restore ( std::istream & is, int format = 0 );
+    virtual void store ( std::ostream & os, int format = 0 ) const;
+    virtual void clear ();
+
+    virtual FeaturePoolClassifier *clone () const;
+    
+    /** prediction of classification uncertainty */
+    void predictUncertainty( OBJREC::Example & pe, NICE::Vector & uncertainties );
+    /** 
+     * @brief prediction of classification uncertainty
+     * @date 19-06-2012 (dd-mm-yyyy)
+     * @author Alexander Freytag
+     * @param examples example for which the classification uncertainty shall be predicted, given in a sparse representation
+     * @param uncertainties contains the resulting classification uncertainties (1 entry for standard setting, m entries for binary-balanced setting)
+     */       
+    void predictUncertainty( const NICE::SparseVector * example, NICE::Vector & uncertainties );
+    
+    void addExample( const OBJREC::Example & pe, const double & label, const bool & performOptimizationAfterIncrement = true);
+    void addMultipleExamples( OBJREC::Examples & newExamples, const bool & performOptimizationAfterIncrement = true);
+};
+
+}
+
+#endif

+ 8 - 0
Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 148 - 0
configs/AL_predVar_fine.conf

@@ -0,0 +1,148 @@
+[train0]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run0.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test0]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run0.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train1]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run1.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test1]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run1.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train2]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run2.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test2]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run2.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train3]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run3.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test3]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run3.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train4]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run4.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test4]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run4.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train5]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run5.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test5]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run5.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train6]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run6.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test6]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run6.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train7]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run7.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test7]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run7.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train8]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run8.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test8]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run8.test
+classselection_test = "*"
+examples_test = seq * 50
+
+[train9]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run9.train
+classselection_train = "*"
+examples_train = seq * 100
+
+[test9]
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/run9.test
+classselection_test = "*"
+examples_test = seq * 50
+
+
+[cache]
+#root = "/home/rodner/3rdparty/imagenetBOF/niceFeatures/"
+root = "/home/luetz/data/feature-storage/15Scenes/niceFeatures/"
+
+[GP_IL]
+trainExPerClass = 1
+num_runs = 10
+do_classification = true
+incrementalAddSize = 3
+nrOfIncrements = 30
+
+[main]
+# extension of all files in the cache
+ext = ".feat"
+queryStrategy = gpPredVar
+
+[GPHIKClassifier]
+noise =  0.0000001
+# no uncertainty for standard classification
+uncertaintyPredictionForClassification = false
+#--define the uncertainty prediction scheme--
+# standatd predictive variance
+#uncertaintyPrediction = pred_variance
+# use the heuristic as proposed by Kapoor et al.
+#uncertaintyPrediction = heuristic
+# no classification uncertainty at all?
+#uncertaintyPrediction = none
+
+#--define the computation scheme for the predictive variance, if needed--
+#if we do not need any predictive variance for this experiment
+#varianceApproximation = none
+# predictive variance approximation useful for sparse features - really fast
+#varianceApproximation = approximate_rough 
+# predictive variance approximation with eigenvectors (finer)
+varianceApproximation = approximate_fine
+nrOfEigenvaluesToConsiderForVarApprox = 2
+#exact computation of predictive variance
+#varianceApproximation = exact
+
+#--define the optimization method--
+optimization_method = none
+#optimization_method = downhillsimplex
+parameter_lower_bound = 1.0
+parameter_upper_bound = 1.0 
+
+#--stuff for the IterativeLinearSolver--
+#ils_verbose = true

+ 48 - 0
configs/AwA.conf

@@ -0,0 +1,48 @@
+[traintest]
+#the original images are currently not awailable
+dataset = /home/luetz/data/Animals_with_Attributes/features/rgsift-hist/
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/phog-hist/
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/cq-hist/
+#classselection_train = "*"
+#classselection_test = "*"
+classselection_train = "pig, giant+panda, seal, raccoon, rat, hippopotamus, leopard, persian+cat, chimpanzee, humpback+whale"
+classselection_test = "pig, giant+panda, seal, raccoon, rat, hippopotamus, leopard, persian+cat, chimpanzee, humpback+whale"
+examples_train = random * 46
+#examples_train = random * 260
+examples_test = random * 46
+#examples_test = random * 40
+#examples_test = random pig 311, random giant+panda 922, random seal 489, random raccoon 613, random rat 283, random hippopotamus 703, random leopard 588, random persian+cat 694, random chimpanzee 
+#681, random humpback+whale 696
+#examples_train = random pig 165, random giant+panda 922, random seal 489, random raccoon 613, random rat 283, random hippopotamus 703, random leopard 588, random persian+cat 694, random chimpanzee
+#681, random humpback+whale 696
+
+
+#[train]
+##the original images are currently not awailable
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/
+#classselection_train = "*"
+#examples_train = random * 1
+
+#[test]
+##the original images are currently not awailable
+#dataset = /home/dbv/bilder/Animals_with_Attributes/features/
+#classselection_test = "*"
+#examples_test = random * 15
+
+[cache]
+root = "/home/luetz/data/Animals_with_Attributes/features/rgsift-hist/"
+#root = "/home/dbv/bilder/Animals_with_Attributes/features/phog-hist/"
+#root = "/home/dbv/bilder/Animals_with_Attributes/features/cq-hist/"
+
+[HIKGP]
+#parameter_lower_bound = 0.5
+#parameter_upper_bound = 2.5
+ils_max_iterations = 50
+ils_method = MINRES
+optimization_method = downhillsimplex
+optimize_noise = true
+
+[main]
+nrRuns = 10
+transform = absexp
+#transform = exp

+ 52 - 0
configs/GP_IL_New_Examples.conf

@@ -0,0 +1,52 @@
+[traintest]
+#dataset = /home/dbv/bilder/15Scenes/imagesScaled/
+dataset = /home/luetz/data/images/15Scenes/imagesScaled/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = random * 100
+examples_test = random * 50
+
+[cache]
+#root = "/home/rodner/3rdparty/imagenetBOF/niceFeatures/"
+root = "/home/luetz/data/feature-storage/15Scenes/niceFeatures/"
+
+[GP_IL]
+trainExPerClass = 10
+num_runs = 10
+do_classification = true
+incrementalAddSize = 1
+nrOfIncrements = 50
+
+[main]
+# extension of all files in the cache
+ext = ".feat"
+
+[GPHIKClassifier]
+noise =  0.01
+parameter_lower_bound = 0.5
+parameter_upper_bound = 2.0 
+#--define the uncertainty prediction scheme--
+# standatd predictive variance
+#uncertaintyPrediction = pred_variance
+# use the heuristic as proposed by Kapoor et al.
+uncertaintyPrediction = heuristic
+# no classification uncertainty at all?
+#uncertaintyPrediction = none
+
+#--define the computation scheme for the predictive variance, if needed--
+#if we do not need any predictive variance for this experiment
+#varianceApproximation = none
+# predictive variance approximation useful for sparse features - really fast
+varianceApproximation = approximate_rough 
+# predictive variance approximation with eigenvectors (finer)
+#varianceApproximation = approximate_fine
+#nrOfEigenvaluesToConsiderForVarApprox = 2
+#exact computation of predictive variance
+#varianceApproximation = exact
+
+#--define the optimization method--
+#optimization_method = none
+optimization_method = downhillsimplex
+
+#--stuff for the IterativeLinearSolver--
+#ils_verbose = true

+ 41 - 0
configs/ImagenetBinaryGP.conf

@@ -0,0 +1,41 @@
+#[HIKGP]
+[GPHIKClassifier]
+
+#optimization_method = "downhillsimplex"
+optimization_method = "none"
+parameter_upper_bound = 5.0
+ils_max_iterations = 500
+ils_verbose = true
+noise = 10.0
+verbose = true
+ils_min_residual = 1e-2
+learn_balanced = true
+
+[main]
+positive_class = 1
+
+# whether to use eriks folder (only works on dionysos)
+imageNetLocal = false
+
+# standard setting with one negative example for each category
+nneg = 50
+
+
+# with 20 iterations
+# This standard config should lead to ...  classification performance
+# With quantization we get: 0.891481 (with only 100 bins :)
+
+# Additional quantization
+
+#[HIKGP]
+[GPHIKClassifier]
+use_quantization = true
+num_bins = 100
+
+[RegGaussianProcess]
+noise = 10.0
+optimize_parameters = false
+
+[Kernel]
+robust_cholesky = "static"
+rchol_noise_variance = 10.0

+ 6 - 0
configs/computeNormHistFeat.conf

@@ -0,0 +1,6 @@
+[main]
+nrOfExamplesPerClass = 500
+nrOfDimensions = 50
+nrOfClasses = 3
+destination = /home/luetz/tmp/features.data
+saveLastDimension = false

+ 19 - 0
configs/createSIFTFeatures.conf

@@ -0,0 +1,19 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = seq * 100
+examples_test = all *
+
+[HSG]
+sample_scaling = 8
+localfeature_type = NICE_SIFT
+
+[main]
+destForFeat = /home/dbv/bilder/15Scenes/features/
+percentageOfPatchesForKMeans = 0.05
+nrOfClusters = 200
+verbose = false
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 18 - 0
configs/createSIFTFeaturesHSG.conf

@@ -0,0 +1,18 @@
+[all]
+dataset = /home/dbv/bilder/15Scenes/
+classselection = "*"
+examples = all *
+
+[HSG]
+sample_scaling = 8
+localfeature_type = NICE_SIFT
+
+[main]
+destForFeat = /home/dbv/bilder/15Scenes/features/
+verbose = false
+
+[cache]
+#root = "/home/dbv/bilder/15Scenes/features/featuresRaw/"
+root = "/home/luetz/tmp/lf/"
+descriptor_format = "binary_double"
+

+ 9 - 0
configs/scenes.reclassification.conf

@@ -0,0 +1,9 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = seq * 100
+examples_test = reclassification
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 9 - 0
configs/scenes.smalltest.conf

@@ -0,0 +1,9 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = random * 10
+examples_test = all *
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 9 - 0
configs/scenes.std.conf

@@ -0,0 +1,9 @@
+[traintest]
+dataset = /home/dbv/bilder/15Scenes/imagesScaled/
+classselection_train = "*"
+classselection_test = "*"
+examples_train = seq * 100
+examples_test = all *
+
+[cache]
+root = "/home/dbv/bilder/15Scenes/features/"

+ 1679 - 0
fast-hikDoxyConfig.txt

@@ -0,0 +1,1679 @@
+# Doxyfile 1.7.3
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = Fast HIK stuff
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER         =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = Provices methods needed for efficiently evaluate the Histogram Intersection Kernel and compute various derived variables
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = ./doxygen/
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES       = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE            =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT                  =
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS          =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS       =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET        =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS     = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET        = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE               =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING     =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+#  will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [0,1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES       = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
+# MathJax, but it is strongly recommended to install a local copy of MathJax
+# before deployment.
+
+MATHJAX_RELPATH        = http://www.mathjax.org/mathjax
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE      = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA             =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD                =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS        = 0
+
+# By default doxygen will write a font called Helvetica to the output
+# directory and reference it in all dot files that doxygen generates.
+# When you want a differently looking font you can specify the font name
+# using DOT_FONTNAME. You need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, svg, gif or svg.
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP            = YES

+ 10 - 0
libdepend.inc

@@ -0,0 +1,10 @@
+$(call PKG_DEPEND_INT,core/)
+$(call PKG_DEPEND_INT,gp-hik-core/)
+$(call PKG_DEPEND_INT,vislearning/cbaselib/)
+$(call PKG_DEPEND_INT,vislearning/features/localfeatures/)
+$(call PKG_DEPEND_INT,vislearning/features/simplefeatures/)
+$(call PKG_DEPEND_INT,vislearning/classifier/classifierbase/)
+$(call PKG_DEPEND_INT,vislearning/classifier/kernelclassifier/)
+$(call PKG_DEPEND_INT,vislearning/matlabAccessHighLevel/)
+$(call PKG_DEPEND_INT,optimization/)
+$(call PKG_DEPEND_EXT,LIBHIK)

+ 694 - 0
progs/IL_AL.cpp

@@ -0,0 +1,694 @@
+/**
+* @file IL_AL.cpp
+* @brief Incrementally train the GP HIK classifier using the predictive variance and its approximations to select new samples
+* @author Alexander Freytag
+* @date 09-05-2012
+*/
+#include <vector>
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+
+
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+//----------
+
+#include "vislearning/baselib/ProgressBar.h"
+
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+#include "vislearning/cbaselib/MultiDataset.h"
+#include <vislearning/cbaselib/LabeledSet.h>
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include <vislearning/baselib/Globals.h>
+
+#include <vislearning/math/kernels/KernelData.h>
+
+//----------
+
+#include "gp-hik-exp/progs/datatools.h"
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+//----------
+
+// #include <incrementallearning/IL_Framework_Generic.h>
+
+
+//
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+enum verbose_level {NONE = 0, LOW = 1,  MEDIUM = 2, EVERYTHING = 3};
+enum QueryStrategy{
+      RANDOM = 0,
+      GPMEAN,
+      GPPREDVAR,
+      GPHEURISTIC
+    }; 
+    
+std::string convertInt(int number)
+{
+   stringstream ss;//create a stringstream
+   ss << number;//add number to the stream
+   return ss.str();//return a string with the contents of the stream
+}
+
+/**
+    Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
+*/
+int main ( int argc, char **argv )
+{
+  std::cout.precision ( 10 );
+  std::cerr.precision ( 10 );
+
+  NICE::Config conf ( argc, argv );
+  int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
+  int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
+  int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
+  int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );  
+  bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
+  
+  double squaredNoise = pow(  conf.gD("GPHIKClassifierNICE", "noise", 0.01) , 2);
+
+  string queryStrategyString = conf.gS( "main", "queryStrategy", "random");
+  QueryStrategy queryStrategy;
+  if (queryStrategyString.compare("gpMean") == 0)
+  {
+    queryStrategy = GPMEAN;
+  }
+  else if (queryStrategyString.compare("gpPredVar") == 0)
+  {
+    queryStrategy = GPPREDVAR;
+  }
+  else if (queryStrategyString.compare("gpHeuristic") == 0)
+  {
+    queryStrategy = GPHEURISTIC;
+  }  
+  else
+  {
+    queryStrategy = RANDOM;
+  }
+ 
+  
+  int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
+  verbose_level verbose ( NONE );
+  switch ( verbose_int )
+  {
+    case 0:
+      verbose = NONE;
+      break;
+    case 1:
+      verbose = LOW;
+      break;
+    case 2:
+      verbose = MEDIUM;
+      break;
+    case 3:
+      verbose = EVERYTHING;
+      break;
+  }
+
+
+
+  /* initialize random seed: */
+  srand ( time ( NULL ) ); //with 0 for reproductive results
+//    srand ( 0 ); //with 0 for reproductive results
+
+  // ===========================  INIT =========================== 
+  
+  std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
+  std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
+  std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
+  
+ int nrOfClassesUsed;
+  
+  for ( int run = 0; run < num_runs; run++ )
+  {
+    std::cerr << "run: " << run << std::endl;    
+    
+    //15-scenes settings
+    std::string ext = conf.gS("main", "ext", ".txt");
+    std::cerr << "Using cache extension: " << ext << std::endl;
+
+    OBJREC::MultiDataset md ( &conf );
+    
+    std::cerr << "now read the dataset" << std::endl;
+   
+    // read training set
+    vector< NICE::Vector > trainDataOrig;
+    Vector y;
+    string trainRun ( "train" + convertInt( run ) );
+    std::cerr << "look for " << trainRun << std::endl;
+    const LabeledSet *train = md[ trainRun ]; //previously, we only selected "train", no we select the permutation for this run
+    
+    LabeledSet::Permutation orderTrain;
+    train->getPermutation(orderTrain); 
+    std::vector<string> filenamesTraining;
+    for ( LabeledSet::Permutation::const_iterator i = orderTrain.begin(); i != orderTrain.end(); i++)
+    {
+      string filename((i->second)->img());
+      filenamesTraining.push_back(filename);
+    }    
+
+    readData< std::vector< NICE::Vector >, NICE::Vector >  ( conf, *train, trainDataOrig, y, ext );
+    
+    std::set<int> classesAvailable;
+    for ( uint i = 0; i < y.size(); i++)
+    {
+      //automatically check for duplicates
+      classesAvailable.insert( y[i] );
+    }
+    
+    int numberOfClasses =  classesAvailable.size();
+    
+    std::map<int,int> nrExamplesPerClassInDataset;
+    std::map<int,std::vector<int> > examplesPerClassInDataset;
+    
+    for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
+    {
+      nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
+      examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
+    }
+
+    for ( uint i = 0; i < y.size(); i++ )
+    {
+      (examplesPerClassInDataset.find( y[i] )->second).push_back(i);
+    }
+    
+    for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
+    {
+      nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
+    }
+    
+    for ( std::map<int,int>::const_iterator it =  nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
+    {
+      cerr << it->first << ": " << it->second << endl;
+    }    
+       
+    Examples examples;   
+    
+    //count how many examples of every class we have while actively selecting new examples
+    //NOTE works only if we have subsequent class numbers
+    NICE::Vector pickedExamplesPerClass( classesAvailable.size(), trainExPerClass);
+    
+    std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
+    
+    //chose examples for every class used for training
+    //we will always use the first examples from each class, since the dataset comes already randomly ordered
+    for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++)
+    {
+      std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+      std::cerr << "pick training examples for class " << *clIt << std::endl;
+      
+      for (int i = 0; i < trainExPerClass; i++)
+      {
+        std::cerr << "i: " << i << std::endl;
+         int exampleIndex ( 0 ); //old: rand() % ( exIt->second.size() ) );
+         std::cerr << "pick example " << exIt->second[exampleIndex] << " - " << y[exIt->second[exampleIndex] ] << " -- " << filenamesTraining[exIt->second[exampleIndex]] <<  std::endl;
+         
+        Example example;
+        NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
+        example.svec = new SparseVector(xTrain);
+        //let's take this example and its corresponding label (which should be *clIt)
+        examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) ); 
+        //
+        exIt->second.erase(exIt->second.begin()+exampleIndex);
+      }
+    }    
+     
+    
+    std::vector<string> filenamesUnlabeled;
+    filenamesUnlabeled.clear();
+    //which examples are left to be actively chosen lateron?
+    std::vector<int> unlabeledExamples( y.size() - trainExPerClass*classesAvailable.size() );
+    int exCnt( 0 );
+    for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++ )
+    {
+      std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+      //list all examples of this specific class
+      for (std::vector<int>::const_iterator it = exIt->second.begin(); it != exIt->second.end(); it++)
+      {
+        unlabeledExamples[exCnt] = *it;
+        exCnt++;
+        filenamesUnlabeled.push_back( filenamesTraining[*it] );
+      }
+    }
+    
+    time_t  prep_start_time = clock();
+
+    GPHIKClassifierNICE * classifier  = new GPHIKClassifierNICE( &conf );
+    
+    FeaturePool fp; // will be ignored
+    classifier->train ( fp, examples );
+
+    float time_preparation = ( float ) ( clock() - prep_start_time ) ;
+    std::cerr << "Time for initial training: " << time_preparation / CLOCKS_PER_SEC << std::endl;
+    
+    nrOfClassesUsed = classesAvailable.size();
+    
+      // ------------------ TESTING
+    string testRun ( "test" + convertInt( run ) );
+    const LabeledSet *test = md[ testRun ]; //previously, we only selected "test", no we select the permutation for this run
+    VVector testData;
+    Vector yTest;
+    readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
+    
+
+    NICE::Matrix confusionMatrix ( numberOfClasses, numberOfClasses );
+    confusionMatrix.set ( 0.0 );
+
+    time_t  start_time = clock();
+
+    std::vector<int> chosen_examples_per_class ( numberOfClasses );
+    
+    std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+
+    if ( do_classification )
+    {
+      for ( uint i = 0 ; i < testData.size(); i++ )
+      {
+        Example example;
+        const Vector & xstar = testData[i];
+        SparseVector xstar_sparse ( xstar );
+        OBJREC::ClassificationResult result;
+        example.svec = &xstar_sparse;
+        
+        result = classifier->classify( example );
+//         cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
+        
+        result.classno_groundtruth = yTest[i];
+        confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+      }
+
+      float time_classification = ( float ) ( clock() - start_time ) ;
+      if ( verbose >= LOW )
+        cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
+      ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
+
+      confusionMatrix.normalizeRowsL1();
+      double avg_recognition_rate = 0.0;
+      for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+      {
+        if ( verbose >= MEDIUM )
+        {
+          std::cerr << "Class no: " <<  i  << " : " << confusionMatrix ( i, i ) << std::endl;
+        }
+        avg_recognition_rate += confusionMatrix ( i, i );
+      }
+
+      avg_recognition_rate /= confusionMatrix.rows();
+
+      std::cerr << confusionMatrix;
+      std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " % -- " << examples.size() << " training examples used" << std::endl << std::endl;
+
+      recognitions_rates[0].push_back ( avg_recognition_rate*100 );
+    }
+
+    //Now start the Incremental-Learning-Part
+    
+    for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
+    {
+      //chose examples for every class used for training
+      Examples newExamples;
+      
+      
+      //simply count how many possible example we have 
+      int nrOfPossibleExamples(  unlabeledExamples.size() );
+      
+      if (queryStrategy == RANDOM)
+      {
+        std::cerr << "print chosen examples: " << std::endl;        
+        for (int i = 0; i < incrementalAddSize; i++)
+        {        
+          int exampleIndex ( rand() % ( unlabeledExamples.size() ) );
+          
+          Example newExample;
+          NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exampleIndex] ];
+          newExample.svec = new SparseVector( xTrain ); 
+          int label( y[ unlabeledExamples[exampleIndex] ] );
+          newExamples.push_back ( pair<int, Example> ( label, newExample ) );
+          unlabeledExamples.erase( unlabeledExamples.begin()+exampleIndex );
+          std::cerr << exampleIndex+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ exampleIndex ] << std::endl;          
+          filenamesUnlabeled.erase( filenamesUnlabeled.begin()+exampleIndex );
+          pickedExamplesPerClass[label]++;
+        }
+      }// end computation for RANDOM
+      else if ( (queryStrategy == GPMEAN) || (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTIC) )
+      {
+        //compute uncertainty values for all examples according to the query strategy
+        std::vector<std::pair<int,double> > scores;
+        scores.clear();
+        time_t  unc_pred_start_time = clock();
+//         std::cerr << "possible examples to query: " << unlabeledExamples.size() << std::endl;
+        for (uint exIndex = 0; exIndex < unlabeledExamples.size(); exIndex++)
+        {
+            Example example;    
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exIndex] ];
+            SparseVector xTrainSparse ( xTrain );
+            example.svec = &xTrainSparse;
+            
+            if (queryStrategy == GPMEAN)
+            {
+              ClassificationResult r = classifier->classify( example );
+              double bestScore( numeric_limits<double>::max() );
+              for( int clCnt = 0; clCnt < nrOfClassesUsed; clCnt++)
+              {
+                if ( fabs(r.scores[clCnt]) < bestScore )
+                  bestScore = fabs(r.scores[clCnt]);
+              }
+              scores.push_back( std::pair<int,double> ( exIndex, bestScore ) );
+            }
+            else if (queryStrategy == GPPREDVAR)
+            {
+              NICE::Vector singleUncertainties;
+              //use the pred variance computation specified in the config file
+              classifier->predictUncertainty( example, singleUncertainties );
+              //take the maximum of the scores for the predictive variance
+              scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties.Max()) );
+            }
+            else if (queryStrategy == GPHEURISTIC)
+            {
+              NICE::Vector singleUncertainties;
+              //use the pred variance computation specified in the config file
+              classifier->predictUncertainty( example, singleUncertainties );
+              //compute the mean values for every class
+              ClassificationResult r = classifier->classify( example );
+              for ( int tmp = 0; tmp < singleUncertainties.size(); tmp++ )
+              {
+                singleUncertainties[tmp] = fabs(r.scores[tmp]) / sqrt( squaredNoise + singleUncertainties[tmp] );
+              }              
+              //take the minimum of the scores for the heuristic measure
+              scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties.Min()) );
+            }
+        }
+        float time_score_computation = ( float ) ( clock() - unc_pred_start_time ) ;
+          
+        //pick the ones with best score
+        //we could speed this up using a more sophisticated search method
+        
+        if (queryStrategy == GPPREDVAR) //take the maximum of the scores for the predictive variance
+        {
+          std::set<int> chosenExamplesForThisRun;
+          chosenExamplesForThisRun.clear();          
+          for (int i = 0; i < incrementalAddSize; i++)
+          {
+            std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+            
+            for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+            {
+              if (jIt->second > bestExample->second)
+                bestExample = jIt;
+            }
+            Example newExample;    
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ]; 
+            newExample.svec = new SparseVector( xTrain ); 
+            //actually this is the ACTIVE LEARNING step (query a label)
+            int label( y[ unlabeledExamples[bestExample->first] ] );
+            newExamples.push_back ( pair<int, Example> ( label, newExample ) );    
+            //remember the index, to safely remove this example afterwards from unlabeledExamples
+            chosenExamplesForThisRun.insert(bestExample->first);
+            scores.erase(bestExample);
+            pickedExamplesPerClass[label]++;
+          }
+          
+          std::cerr << "print chosen examples: " << std::endl;
+          int tmpCnt(0);
+          for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
+          {
+            std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ *it ] << std::endl;
+          }   
+          
+          //delete the queried examples from the set of unlabeled ones
+          //do this in an decreasing order in terms of indices to ensure valid access
+          for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+          {
+            unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+          }          
+        }
+        else //take the minimum of the scores for the heuristic and the gp mean (minimum margin)
+        {
+          std::set<int> chosenExamplesForThisRun;
+          chosenExamplesForThisRun.clear();
+          for (int i = 0; i < incrementalAddSize; i++)
+          {
+            std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+            
+            for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+            {
+              if (jIt->second < bestExample->second)
+                bestExample = jIt;
+            }
+            Example newExample;    
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
+            newExample.svec = new SparseVector( xTrain ); 
+            //actually this is the ACTIVE LEARNING step (query a label)
+            int label( y[ unlabeledExamples[bestExample->first] ] );
+            newExamples.push_back ( pair<int, Example> ( label, newExample ) );           
+            //remember the index, to safely remove this example afterwards from unlabeledExamples
+            chosenExamplesForThisRun.insert(bestExample->first);
+            scores.erase(bestExample);
+            pickedExamplesPerClass[label]++;
+          }    
+          
+          std::cerr << "print chosen examples: " << std::endl;
+          int tmpCnt(0);
+          for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
+          {
+            std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ *it ] << std::endl;
+          }           
+          
+          //delete the queried example from the set of unlabeled ones
+          //do this in an decreasing order in terms of indices to ensure valid access
+          for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+          {
+            unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );  
+            filenamesUnlabeled.erase( filenamesUnlabeled.begin()+(*it) );
+          }
+
+        }
+      
+        std::cerr << "Time used to compute query-scores for " <<  nrOfPossibleExamples << " examples: " << time_score_computation / CLOCKS_PER_SEC << " [s]" << std::endl;
+      } // end computation for GPMEAN, GPPREDVAR, or GPHEURISTIC
+           
+      
+      std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+          
+      time_t  IL_add_start_time = clock();
+
+      classifier->addMultipleExamples( newExamples );
+      
+      //remove the memory used in newExamples
+      for ( uint tmp = 0; tmp < newExamples.size(); tmp++ )
+      {
+        delete newExamples[tmp].second.svec;
+        newExamples[tmp].second.svec = NULL;
+      }
+      
+      float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
+      std::cerr << "Time for IL-adding of " << incrementalAddSize << " examples to already " <<  nrOfClassesUsed*trainExPerClass+incrementalAddSize*incrementationStep << "  training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
+      IL_training_times[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);
+
+           
+      //do the classification for evaluating the benefit of new examples
+      if ( do_classification )
+      {
+        confusionMatrix.set( 0.0 );
+        for ( uint i = 0 ; i < testData.size(); i++ )
+        {
+          Example example;
+          const Vector & xstar = testData[i];
+          SparseVector xstar_sparse ( xstar );
+          example.svec = &xstar_sparse;
+          OBJREC::ClassificationResult result;
+          
+          result = classifier->classify( example );
+          
+          result.classno_groundtruth = yTest[i];
+          confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+        }     
+
+
+        float time_classification = ( float ) ( clock() - start_time ) ;
+        if ( verbose >= LOW )
+          std::cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
+        ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
+
+        confusionMatrix.normalizeRowsL1();
+        double avg_recognition_rate = 0.0;
+        for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+        {
+          if ( verbose >= MEDIUM )
+          {
+            std::cerr << "Class no: " <<  i  << " : " << confusionMatrix ( i, i ) << std::endl;
+          }
+          avg_recognition_rate += confusionMatrix ( i, i );
+        }
+
+        avg_recognition_rate /= confusionMatrix.rows();
+
+        std::cerr << confusionMatrix;
+        std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl  << std::endl;
+
+        recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );
+      } //classification after IL adding
+    } //IL adding of different classes
+    std::cerr << "Final statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+    
+    //don't waste memory!
+    delete classifier;
+    for ( int tmp = 0; tmp < examples.size(); tmp++ )
+    {
+      delete examples[tmp].second.svec;
+      examples[tmp].second.svec = NULL;
+    }
+  }//runs 
+
+
+  std::cerr << "no of classes used: " << nrOfClassesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
+  
+  // ================= EVALUATION ========================0
+
+  if ( do_classification )
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of classification_times: " << std::endl;
+    for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+    {
+      for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::vector<float> mean_classification_times;
+    std::vector<float> std_dev_classification_times;
+    for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+    {
+      float mean_classification_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_classification_time += *itRun;
+      }
+      mean_classification_time /= it->size();
+      mean_classification_times.push_back ( mean_classification_time );
+
+      double std_dev_classification_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_classification_time += pow ( *itRun - mean_classification_time, 2 );
+      }
+      std_dev_classification_time /= it->size();
+      std_dev_classification_time = sqrt ( std_dev_classification_time );
+      std_dev_classification_times.push_back ( std_dev_classification_time );
+    }
+    
+    int datasize ( nrOfClassesUsed*trainExPerClass );
+    for ( uint i = 0; i < mean_classification_times.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " mean classification time: " << mean_classification_times[i] << " std_dev classification time: " << std_dev_classification_times[i] << std::endl;
+      datasize += incrementalAddSize ;
+    }
+  }
+  else
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "No classification done therefor no classification times available." << std::endl;
+  }
+
+  std::cerr << "========================" << std::endl;
+  std::cerr << "content of IL_training_times: " << std::endl;
+  for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+  {
+    for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+    {
+      std::cerr << *jt << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  std::vector<float> mean_IL_training_times;
+  std::vector<float> std_dev_IL_training_times;
+  for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+  {  
+    float mean_IL_training_time ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      mean_IL_training_time += *itRun;
+    }
+    mean_IL_training_time /= it->size();
+    mean_IL_training_times.push_back ( mean_IL_training_time );
+
+    double std_dev_IL_training_time ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      std_dev_IL_training_time += pow ( *itRun - mean_IL_training_time, 2 );
+    }
+    std_dev_IL_training_time /= it->size();
+    std_dev_IL_training_time = sqrt ( std_dev_IL_training_time );
+    std_dev_IL_training_times.push_back ( std_dev_IL_training_time );
+  }
+
+  int datasize ( nrOfClassesUsed*trainExPerClass );
+  for ( uint i = 0; i < mean_IL_training_times.size(); i++)
+  {
+    cerr << "size: " << datasize << " and adding " << incrementalAddSize << " mean IL_training time: " << mean_IL_training_times[i] << " std_dev IL_training time: " << std_dev_IL_training_times[i] << endl;
+    datasize += incrementalAddSize ;
+  }
+
+  if ( do_classification )
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of recognition_rates: " << std::endl;
+    for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+    {
+      for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::cerr << "calculating final results " << std::endl;
+    std::vector<double> mean_recs;
+    std::vector<double> std_dev_recs;
+    for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+    {
+      double mean_rec ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_rec += *itRun;
+      }
+      mean_rec /= it->size();
+      mean_recs.push_back ( mean_rec );
+
+      double std_dev_rec ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_rec += pow ( *itRun - mean_rec, 2 );
+      }
+      std_dev_rec /= it->size();
+      std_dev_rec = sqrt ( std_dev_rec );
+      std_dev_recs.push_back ( std_dev_rec );
+    }
+
+    int datasize ( nrOfClassesUsed*trainExPerClass );
+    for ( uint i = 0; i < recognitions_rates.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " mean_IL: " << mean_recs[i] << " std_dev_IL: " << std_dev_recs[i] << std::endl;
+      datasize += incrementalAddSize ;
+    }
+  }
+  else
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "No classification done therefor no classification times available." << std::endl;
+  }
+
+  return 0;
+}

+ 783 - 0
progs/IL_AL_Binary.cpp

@@ -0,0 +1,783 @@
+/**
+* @file IL_AL_Binary.cpp
+* @brief Incrementally train the GP HIK classifier using the predictive variance and its approximations to select new samples, perform binary tests
+* @author Alexander Freytag
+* @date 11-06-2012
+*/
+#include <vector>
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+
+
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+//----------
+
+#include "vislearning/baselib/ProgressBar.h"
+#include <vislearning/baselib/Globals.h>
+
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+#include "vislearning/cbaselib/MultiDataset.h"
+#include <vislearning/cbaselib/LabeledSet.h>
+#include "vislearning/cbaselib/ClassificationResults.h"
+
+
+#include <vislearning/math/kernels/KernelData.h>
+
+//----------
+
+#include "gp-hik-exp/progs/datatools.h"
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+//----------
+
+//
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+enum verbose_level {NONE = 0, LOW = 1,  MEDIUM = 2, EVERYTHING = 3};
+enum QueryStrategy{
+      RANDOM = 0,
+      GPMEAN,
+      GPPREDVAR,
+      GPHEURISTIC
+    }; 
+    
+std::string convertInt(int number)
+{
+   stringstream ss;//create a stringstream
+   ss << number;//add number to the stream
+   return ss.str();//return a string with the contents of the stream
+}
+
+/**
+    Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
+*/
+int main ( int argc, char **argv )
+{
+  std::cout.precision ( 10 );
+  std::cerr.precision ( 10 );
+
+  NICE::Config conf ( argc, argv );
+  int trainExPerClass = conf.gI ( "main", "trainExPerClass", 10 );
+  int incrementalAddSize = conf.gI("main", "incrementalAddSize", 1);
+  int nrOfIncrements = conf.gI("main", "nrOfIncrements", 9);
+  int num_runs = conf.gI ( "main", "num_runs", 10 );  
+  bool do_classification = conf.gB ( "main", "do_classification", true );
+  
+  double squaredNoise = pow(  conf.gD("GPHIKClassifier", "noise", 0.01) , 2);
+  
+  int minClass = conf.gI( "main", "minClass", 0);
+  int maxClass = conf.gI( "main", "maxClass", 15);
+
+  string queryStrategyString = conf.gS( "main", "queryStrategy", "random");
+  QueryStrategy queryStrategy;
+  if (queryStrategyString.compare("gpMean") == 0)
+  {
+    queryStrategy = GPMEAN;
+  }
+  else if (queryStrategyString.compare("gpPredVar") == 0)
+  {
+    queryStrategy = GPPREDVAR;
+  }
+  else if (queryStrategyString.compare("gpHeuristic") == 0)
+  {
+    queryStrategy = GPHEURISTIC;
+  }  
+  else
+  {
+    queryStrategy = RANDOM;
+  }
+ 
+  
+  int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
+  verbose_level verbose ( NONE );
+  switch ( verbose_int )
+  {
+    case 0:
+      verbose = NONE;
+      break;
+    case 1:
+      verbose = LOW;
+      break;
+    case 2:
+      verbose = MEDIUM;
+      break;
+    case 3:
+      verbose = EVERYTHING;
+      break;
+  }
+
+  std::string locationOfPermutations = conf.gS( "main", "locationOfPermutations", "/home/luetz/data/images/caltech-101/" );
+  std::string classselection_train = conf.gS( "main", "classselection_train", "*" );
+  std::string classselection_test = conf.gS( "main", "classselection_test", "*" );
+  std::string examples_train = conf.gS( "main", "examples_train", "seq * 100" );
+  std::string examples_test = conf.gS( "main", "examples_test", "seq * 50" );
+
+  /* initialize random seed: */
+  srand ( time ( NULL ) ); //with 0 for reproductive results
+//    srand ( 0 ); //with 0 for reproductive results
+  
+  for (int currentClass = minClass; currentClass <= maxClass; currentClass++)
+  {
+    std::cerr << "start binary experiments for class " << currentClass <<  std::endl;
+    
+    // ===========================  INIT =========================== 
+    
+    std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
+    std::vector<std::vector<double> > AUC_scores(nrOfIncrements+1);
+    std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
+    std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
+    
+    for ( int run = 0; run < num_runs; run++ )
+    {
+      std::cerr << "run: " << run << std::endl;    
+      
+      NICE::Config confCurrentRun ( conf );
+      confCurrentRun.sS( "train"+convertInt(run), "dataset", locationOfPermutations+"run"+convertInt(run)+".train" );
+      confCurrentRun.sS( "train"+convertInt(run), "classselection_train", classselection_train );
+      confCurrentRun.sS( "train"+convertInt(run), "examples_train", examples_train );
+      confCurrentRun.sS( "test"+convertInt(run), "dataset", locationOfPermutations+"run"+convertInt(run)+".test" );
+      confCurrentRun.sS( "test"+convertInt(run), "classselection_test", classselection_test );
+      confCurrentRun.sS( "train"+convertInt(run), "examples_test", examples_test );
+     
+      
+      //15-scenes settings
+      std::string ext = confCurrentRun.gS("main", "ext", ".txt");
+      std::cerr << "Using cache extension: " << ext << std::endl;
+
+      OBJREC::MultiDataset md ( &confCurrentRun );
+      
+      std::cerr << "now read the dataset" << std::endl;
+    
+      // read training set
+      vector< NICE::Vector > trainDataOrig;
+      Vector y;
+      string trainRun ( "train" + convertInt( run ) );
+      std::cerr << "look for " << trainRun << std::endl;
+      const LabeledSet *train = md[ trainRun ]; //previously, we only selected "train", no we select the permutation for this run
+            
+      //we just store the filenames to have a look which image we picked in every step
+      std::vector<std::string> filenamesTraining;
+      readData< std::vector< NICE::Vector >, NICE::Vector >  ( confCurrentRun, *train, trainDataOrig, y, filenamesTraining, ext );
+      std::cerr << "dimension: "<< trainDataOrig[0].size() << std::endl;
+      std::cerr << "length L1: " << trainDataOrig[0].normL1() << " length L2: " << trainDataOrig[0].normL2() <<std::endl;
+      
+      std::cerr << "label vector after reading: " << y << std::endl;
+      
+
+      bool firstPositivePrinted( false );
+      //assure the binary setting
+      for ( uint i = 0; i < y.size(); i++ )
+      {
+        if ( y[i] == currentClass)
+        {
+          if ( !firstPositivePrinted )
+          {
+            std::cerr << "first positive example: " << filenamesTraining[i] << std::endl;
+            firstPositivePrinted = true;
+          }
+          y[i] = 1;
+        }
+        else 
+          y[i] = 0;//-1;        
+      }
+      
+      std::cerr << "resulting binary label vector:" << y << std::endl;
+      
+      std::set<int> classesAvailable;
+      classesAvailable.insert( 0 ); //we have a single negative class
+      classesAvailable.insert( 1 ); //and we have a single positive class
+      
+      std::map<int,int> nrExamplesPerClassInDataset; //simply count how many examples for every class are available
+      std::map<int,std::vector<int> > examplesPerClassInDataset;  //as well as their corresponding indices in the dataset
+      
+      //initialize this storage
+      for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
+      {
+        nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
+        examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
+      }
+
+      //store the indices of the examples
+      for ( uint i = 0; i < y.size(); i++ )
+      {
+        (examplesPerClassInDataset.find( y[i] )->second).push_back(i);
+      }
+      
+      //and count how many examples are in every class
+      for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
+      {
+        nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
+      }
+      
+      //simple output to tell how many examples we have for every class
+      for ( std::map<int,int>::const_iterator it =  nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
+      {
+        cerr << it->first << ": " << it->second << endl;
+      }    
+        
+      Examples examples;   
+      
+      //count how many examples of every class we have while actively selecting new examples
+      //NOTE works only if we have subsequent class numbers
+      NICE::Vector pickedExamplesPerClass( classesAvailable.size(), trainExPerClass);
+      
+      std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
+      
+      //chose examples for every class used for training
+      //we will always use the first examples from each class, since the dataset comes already randomly ordered
+      for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++)
+      {
+        std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+        std::cerr << "pick training examples for class " << *clIt << std::endl;
+        
+        for (int i = 0; i < trainExPerClass; i++)
+        {
+          std::cerr << "i: " << i << std::endl;
+          int exampleIndex ( 0 ); //old: rand() % ( exIt->second.size() ) );
+          std::cerr << "pick example " << exIt->second[exampleIndex] << " - " << y[exIt->second[exampleIndex] ] << " -- " << filenamesTraining[exIt->second[exampleIndex]] << std::endl;
+          
+          Example example;
+          NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
+          example.svec = new SparseVector(xTrain);
+          //let's take this example and its corresponding label (which should be *clIt)
+          examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) ); 
+          //
+          exIt->second.erase(exIt->second.begin()+exampleIndex);
+        }
+      }    
+      
+      std::vector<std::string> filenamesUnlabeled;
+      filenamesUnlabeled.clear();      
+      
+      //which examples are left to be actively chosen lateron?
+      std::vector<int> unlabeledExamples( y.size() - trainExPerClass*classesAvailable.size() );
+      int exCnt( 0 );
+      for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++ )
+      {
+        std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+        //list all examples of this specific class
+        for (std::vector<int>::const_iterator it = exIt->second.begin(); it != exIt->second.end(); it++)
+        {
+          unlabeledExamples[exCnt] = *it;
+          exCnt++;
+        filenamesUnlabeled.push_back( filenamesTraining[*it] );          
+        }
+      }
+      
+      time_t  prep_start_time = clock();
+
+      //TODO balancing?
+      //this should decrease the random suffering, so we will not do this ;)
+      GPHIKClassifierNICE * classifier  = new GPHIKClassifierNICE( &confCurrentRun );
+      
+      FeaturePool fp; // will be ignored
+      classifier->train ( fp, examples );
+
+      float time_preparation = ( float ) ( clock() - prep_start_time ) ;
+      std::cerr << "Time for initial training: " << time_preparation / CLOCKS_PER_SEC << std::endl;
+      
+      int nrOfClassesUsed = classesAvailable.size();
+      
+        // ------------------ TESTING
+      string testRun ( "test" + convertInt( run ) );
+      const LabeledSet *test = md[ testRun ]; //previously, we only selected "test", now we select the permutation for this run
+      VVector testData;
+      Vector yTest;
+      readData< VVector, Vector > ( confCurrentRun, *test, testData, yTest, ext );
+      
+      NICE::Matrix confusionMatrix ( 2, 2 );
+      confusionMatrix.set ( 0.0 );      
+      
+      time_t  start_time = clock();
+
+      std::vector<int> chosen_examples_per_class ( nrOfClassesUsed );
+      
+      std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+
+      if ( do_classification  )
+      {
+        ClassificationResults results;
+        for ( uint i = 0 ; i < testData.size(); i++ )
+        {
+          Example example;
+          const Vector & xstar = testData[i];
+          SparseVector xstar_sparse ( xstar );
+          OBJREC::ClassificationResult result;
+          example.svec = &xstar_sparse;
+          
+          result = classifier->classify( example );
+          
+          result.classno_groundtruth = ( yTest[i] == 1 ) ? 1 : 0;
+//           std::cerr << "gt: " << result.classno_groundtruth << " -- " << result.classno << std::endl;
+//           (result.scores).store( std::cerr );
+          confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+          results.push_back( result );
+        }
+
+        float time_classification = ( float ) ( clock() - start_time ) ;
+        if ( verbose >= LOW )
+          cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
+        ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
+        
+        confusionMatrix.normalizeRowsL1();
+        std::cerr << confusionMatrix;
+        double avg_recognition_rate = 0.0;
+        for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+        {
+          avg_recognition_rate += confusionMatrix ( i, i );
+        }        
+        avg_recognition_rate /= confusionMatrix.rows();
+        std::cerr << "class: " << currentClass << " run: " << run << " avg recognition rate: " <<  avg_recognition_rate*100 << " % -- " << examples.size() << " training examples used" << std::endl;
+
+        recognitions_rates[0].push_back ( avg_recognition_rate*100 );        
+
+        std::cerr << "perform auc evaluation "<< std::endl;
+        double score = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+        
+        std::cerr << "class: " << currentClass << " run: " << run << " AUC-score: " <<  score << " % -- " << examples.size() << " training examples used" << std::endl << std::endl;
+
+        AUC_scores[0].push_back ( score*100 );
+      }
+
+      //Now start the Incremental-Learning-Part
+      
+      for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
+      {
+        //chose examples for every class used for training
+        Examples newExamples;
+        
+        
+        //simply count how many possible example we have 
+        int nrOfPossibleExamples(  unlabeledExamples.size() );
+        
+        if (queryStrategy == RANDOM)
+        {
+          std::cerr << "print chosen examples: " << std::endl;           
+          for (int i = 0; i < incrementalAddSize; i++)
+          {        
+            int exampleIndex ( rand() % ( unlabeledExamples.size() ) );
+            
+            Example newExample;
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exampleIndex] ];
+            newExample.svec = new SparseVector( xTrain ); 
+            int label( y[ unlabeledExamples[exampleIndex] ] );
+            newExamples.push_back ( pair<int, Example> ( label, newExample ) );
+            unlabeledExamples.erase( unlabeledExamples.begin()+exampleIndex );
+            std::cerr << exampleIndex+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ exampleIndex ] << std::endl;          
+            filenamesUnlabeled.erase( filenamesUnlabeled.begin()+exampleIndex );            
+            pickedExamplesPerClass[label]++;
+          }
+        }// end computation for RANDOM
+        else if ( (queryStrategy == GPMEAN) || (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTIC) )
+        {
+          //compute uncertainty values for all examples according to the query strategy
+          std::vector<std::pair<int,double> > scores;
+          scores.clear();
+          time_t  unc_pred_start_time = clock();
+  //         std::cerr << "possible examples to query: " << unlabeledExamples.size() << std::endl;
+          for (uint exIndex = 0; exIndex < unlabeledExamples.size(); exIndex++)
+          {
+              Example example;    
+              NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exIndex] ];
+              SparseVector xTrainSparse ( xTrain );
+              example.svec = &xTrainSparse;
+              
+              if (queryStrategy == GPMEAN)
+              {
+                ClassificationResult r = classifier->classify( example );
+                double bestScore( numeric_limits<double>::max() );
+                for( int clCnt = 0; clCnt < nrOfClassesUsed; clCnt++)
+                {
+                  if ( fabs(r.scores[clCnt]) < bestScore )
+                    bestScore = fabs(r.scores[clCnt]);
+                }
+                scores.push_back( std::pair<int,double> ( exIndex, bestScore ) );
+              }
+              else if (queryStrategy == GPPREDVAR)
+              {
+                NICE::Vector singleUncertainties;
+                //use the pred variance computation specified in the config file
+                classifier->predictUncertainty( example, singleUncertainties );
+                //take the maximum of the scores for the predictive variance
+                scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties.Max()) );
+              }
+              else if (queryStrategy == GPHEURISTIC)
+              {
+                NICE::Vector singleUncertainties;
+                //use the pred variance computation specified in the config file
+                classifier->predictUncertainty( example, singleUncertainties );
+                //compute the mean values for every class
+                ClassificationResult r = classifier->classify( example );
+                for ( int tmp = 0; tmp < singleUncertainties.size(); tmp++ )
+                {
+                  singleUncertainties[tmp] = fabs(r.scores[tmp]) / sqrt( squaredNoise + singleUncertainties[tmp] );
+                }              
+                //take the minimum of the scores for the heuristic measure
+                scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties.Min()) );
+              }
+          }
+          float time_score_computation = ( float ) ( clock() - unc_pred_start_time ) ;
+            
+          //pick the ones with best score
+          //we could speed this up using a more sophisticated search method
+          
+          if (queryStrategy == GPPREDVAR) //take the maximum of the scores for the predictive variance
+          {
+            std::set<int> chosenExamplesForThisRun;
+            chosenExamplesForThisRun.clear();          
+            for (int i = 0; i < incrementalAddSize; i++)
+            {
+              std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+              std::vector<std::pair<int,double> >::iterator worstExample = scores.begin();
+              
+              for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+              {
+                if (jIt->second > bestExample->second)
+                  bestExample = jIt;
+                if (jIt->second < worstExample->second)
+                  worstExample = jIt;                
+              }
+              std::cerr << "i: " << i << " bestExample: " << bestExample->second << " worstExample: " << worstExample->second << std::endl;
+              
+              Example newExample;    
+              NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ]; 
+              newExample.svec = new SparseVector( xTrain ); 
+              //actually this is the ACTIVE LEARNING step (query a label)
+              int label( y[ unlabeledExamples[bestExample->first] ] );
+              newExamples.push_back ( pair<int, Example> ( label, newExample ) );    
+              //remember the index, to safely remove this example afterwards from unlabeledExamples
+              chosenExamplesForThisRun.insert(bestExample->first);
+              scores.erase(bestExample);
+              pickedExamplesPerClass[label]++;
+            }
+            
+//             std::cerr << "print chosen examples: " << std::endl; 
+/*            int tmpCnt(0);
+            for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
+            {
+              std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ *it ] << std::endl;
+            } */              
+            //delete the queried examples from the set of unlabeled ones
+            //do this in an decreasing order in terms of indices to ensure valid access
+            for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+            {
+              unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+            }          
+          }
+          else //take the minimum of the scores for the heuristic and the gp mean (minimum margin)
+          {
+            std::set<int> chosenExamplesForThisRun;
+            chosenExamplesForThisRun.clear();
+            for (int i = 0; i < incrementalAddSize; i++)
+            {
+              std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+              std::vector<std::pair<int,double> >::iterator worstExample = scores.begin();
+              
+              for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+              {
+                if (jIt->second < bestExample->second)
+                  bestExample = jIt;
+               if (jIt->second > worstExample->second)
+                  worstExample = jIt;               
+              }
+              std::cerr << "i: " << i << " bestExample: " << bestExample->second << " worstExample: " << worstExample->second << std::endl;
+              Example newExample;    
+              NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
+              newExample.svec = new SparseVector( xTrain ); 
+              //actually this is the ACTIVE LEARNING step (query a label)
+              int label( y[ unlabeledExamples[bestExample->first] ] );
+              newExamples.push_back ( pair<int, Example> ( label, newExample ) );           
+              //remember the index, to safely remove this example afterwards from unlabeledExamples
+              chosenExamplesForThisRun.insert(bestExample->first);
+              scores.erase(bestExample);
+              pickedExamplesPerClass[label]++;
+            }  
+            
+/*            std::cerr << "print chosen examples: " << std::endl;
+            int tmpCnt(0);
+            for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
+            {
+              std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ *it ] << std::endl;
+            }  */           
+            
+            //delete the queried example from the set of unlabeled ones
+            //do this in an decreasing order in terms of indices to ensure valid access
+            for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+            {
+              unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+            }
+
+          }
+        
+          std::cerr << "Time used to compute query-scores for " <<  nrOfPossibleExamples << " examples: " << time_score_computation / CLOCKS_PER_SEC << " [s]" << std::endl;
+        } // end computation for GPMEAN, GPPREDVAR, or GPHEURISTIC
+        
+        std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+            
+        time_t  IL_add_start_time = clock();
+
+        classifier->addMultipleExamples( newExamples );
+        
+        //remove the memory used in newExamples
+        for ( uint tmp = 0; tmp < newExamples.size(); tmp++ )
+        {
+          delete newExamples[tmp].second.svec;
+          newExamples[tmp].second.svec = NULL;
+        }
+        
+        float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
+        std::cerr << "Time for IL-adding of " << incrementalAddSize << " examples to already " <<  nrOfClassesUsed*trainExPerClass+incrementalAddSize*incrementationStep << "  training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
+        IL_training_times[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);    
+            
+        //do the classification for evaluating the benefit of new examples
+        if ( do_classification )
+        {
+          time_t  start_time = clock();
+          ClassificationResults results;
+          confusionMatrix.set( 0.0 );
+          for ( uint i = 0 ; i < testData.size(); i++ )
+          {
+            Example example;
+            const Vector & xstar = testData[i];
+            SparseVector xstar_sparse ( xstar );
+            example.svec = &xstar_sparse;
+            OBJREC::ClassificationResult result;
+            
+            result = classifier->classify( example );
+            
+            result.classno_groundtruth = ( yTest[i] == 1 ) ? 1 : 0;
+            results.push_back( result );      
+            confusionMatrix ( result.classno_groundtruth , result.classno ) ++;            
+          }     
+
+          float time_classification = ( float ) ( clock() - start_time ) ;
+          if ( verbose >= LOW )
+            std::cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
+          ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
+          
+          confusionMatrix.normalizeRowsL1();
+          std::cerr << confusionMatrix;
+          double avg_recognition_rate ( 0.0 );
+          for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+          {
+            avg_recognition_rate += confusionMatrix ( i, i );
+          }
+          avg_recognition_rate /= confusionMatrix.rows();          
+          
+          std::cerr << "class: " << currentClass << " run: " << run << " avg recognition rate: " <<  avg_recognition_rate*100 << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl;
+
+          recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );           
+
+          
+          double score = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+          std::cerr << "class: " << currentClass << " run: " << run << " AUC-score: " <<  score << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl << std::endl;          
+
+          AUC_scores[incrementationStep+1].push_back ( score*100 );
+        } //classification after IL adding */
+      } //IL adding of different classes
+      std::cerr << "Final statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+      
+      //don't waste memory!
+      delete classifier;
+      for ( int tmp = 0; tmp < examples.size(); tmp++ )
+      {
+        delete examples[tmp].second.svec;
+        examples[tmp].second.svec = NULL;
+      }
+    }//runs 
+       
+    // ================= EVALUATION =========================
+    
+    int nrOfClassesUsed ( 2 ); //binary setting
+
+    if ( do_classification )
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << " final evaluation for class: " << currentClass << std::endl;
+      std::cerr << "content of classification_times: " << std::endl;
+      for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+      {
+        for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+        {
+          std::cerr << *jt << " ";
+        }
+        std::cerr << std::endl;
+      }
+
+      std::vector<float> mean_classification_times;
+      std::vector<float> std_dev_classification_times;
+      for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+      {
+        float mean_classification_time ( 0.0 );
+        for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          mean_classification_time += *itRun;
+        }
+        mean_classification_time /= it->size();
+        mean_classification_times.push_back ( mean_classification_time );
+
+        double std_dev_classification_time ( 0.0 );
+        for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          std_dev_classification_time += pow ( *itRun - mean_classification_time, 2 );
+        }
+        std_dev_classification_time /= it->size();
+        std_dev_classification_time = sqrt ( std_dev_classification_time );
+        std_dev_classification_times.push_back ( std_dev_classification_time );
+      }
+      
+      int datasize ( nrOfClassesUsed*trainExPerClass );
+      for ( uint i = 0; i < mean_classification_times.size(); i++)
+      {
+        std::cerr << "size: " << datasize << " mean classification time: " << mean_classification_times[i] << " std_dev classification time: " << std_dev_classification_times[i] << std::endl;
+        datasize += incrementalAddSize ;
+      }
+    }
+    else
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << "No classification done therefor no classification times available." << std::endl;
+    }
+
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of IL_training_times for class : "<< currentClass << std::endl;
+    for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+    {
+      for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::vector<float> mean_IL_training_times;
+    std::vector<float> std_dev_IL_training_times;
+    for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+    {  
+      float mean_IL_training_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_IL_training_time += *itRun;
+      }
+      mean_IL_training_time /= it->size();
+      mean_IL_training_times.push_back ( mean_IL_training_time );
+
+      double std_dev_IL_training_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_IL_training_time += pow ( *itRun - mean_IL_training_time, 2 );
+      }
+      std_dev_IL_training_time /= it->size();
+      std_dev_IL_training_time = sqrt ( std_dev_IL_training_time );
+      std_dev_IL_training_times.push_back ( std_dev_IL_training_time );
+    }
+
+    int datasize ( nrOfClassesUsed*trainExPerClass );
+    for ( uint i = 0; i < mean_IL_training_times.size(); i++)
+    {
+      cerr << "size: " << datasize << " and adding " << incrementalAddSize << " mean IL_training time: " << mean_IL_training_times[i] << " std_dev IL_training time: " << std_dev_IL_training_times[i] << endl;
+      datasize += incrementalAddSize ;
+    }
+
+    if ( do_classification )
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << "content of recognition_rates for class : "<< currentClass << std::endl;
+      for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+      {
+        for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+        {
+          std::cerr << *jt << " ";
+        }
+        std::cerr << std::endl;
+      }
+
+      std::cerr << "calculating final recognition_rates for class : "<< currentClass << std::endl;
+      std::vector<double> mean_recs;
+      std::vector<double> std_dev_recs;
+      for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+      {
+        double mean_rec ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          mean_rec += *itRun;
+        }
+        mean_rec /= it->size();
+        mean_recs.push_back ( mean_rec );
+
+        double std_dev_rec ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          std_dev_rec += pow ( *itRun - mean_rec, 2 );
+        }
+        std_dev_rec /= it->size();
+        std_dev_rec = sqrt ( std_dev_rec );
+        std_dev_recs.push_back ( std_dev_rec );
+      }
+
+      int datasize ( nrOfClassesUsed*trainExPerClass );
+      for ( uint i = 0; i < recognitions_rates.size(); i++)
+      {
+        std::cerr << "size: " << datasize << " mean_IL: " << mean_recs[i] << " std_dev_IL: " << std_dev_recs[i] << std::endl;
+        datasize += incrementalAddSize ;
+      }
+      
+      std::cerr << "========================" << std::endl;
+      std::cerr << "content of AUC_scores for class : "<< currentClass << std::endl;
+      for ( std::vector<std::vector<double> >::const_iterator it = AUC_scores.begin(); it != AUC_scores.end(); it++ )
+      {
+        for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+        {
+          std::cerr << *jt << " ";
+        }
+        std::cerr << std::endl;
+      }
+
+      std::cerr << "calculating final AUC_scores for class : "<< currentClass << std::endl;
+      std::vector<double> mean_aucs;
+      std::vector<double> std_dev_aucs;
+      for (std::vector<std::vector<double> >::const_iterator it = AUC_scores.begin(); it != AUC_scores.end(); it++ )
+      {
+        double mean_auc ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          mean_auc += *itRun;
+        }
+        mean_auc /= it->size();
+        mean_aucs.push_back ( mean_auc );
+
+        double std_dev_auc ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          std_dev_auc += pow ( *itRun - mean_auc, 2 );
+        }
+        std_dev_auc /= it->size();
+        std_dev_auc = sqrt ( std_dev_auc );
+        std_dev_aucs.push_back ( std_dev_auc );
+      }
+
+      datasize  = nrOfClassesUsed*trainExPerClass;
+      for ( uint i = 0; i < recognitions_rates.size(); i++)
+      {
+        std::cerr << "size: " << datasize << " mean_IL: " << mean_aucs[i] << " std_dev_IL: " << std_dev_aucs[i] << std::endl;
+        datasize += incrementalAddSize ;
+      }      
+    }
+    else
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << "No classification done therefor no classification times available." << std::endl;
+    } 
+    
+  } //for int currentClass...
+
+  return 0;
+}

+ 902 - 0
progs/IL_AL_Binary_GPBaseline.cpp

@@ -0,0 +1,902 @@
+/**
+* @file IL_AL_Binary_GPBaseline.cpp
+* @brief Incrementally train the GP HIK classifier using the predictive variance and its approximations to select new samples, perform binary tests. We do not use the fast-hik implementations but perform the computations manually
+* @author Alexander Freytag
+* @date 11-06-2012
+*/
+#include <vector>
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+
+
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+#include <core/basics/Timer.h>
+
+#include <core/algebra/CholeskyRobust.h>
+
+#include <core/vector/Algorithms.h>
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+//----------
+
+#include <vislearning/baselib/ProgressBar.h>
+#include <vislearning/baselib/Globals.h>
+
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/cbaselib/LabeledSet.h>
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/cbaselib/Example.h>
+
+#include <vislearning/math/kernels/KernelData.h>
+
+//----------
+
+#include "gp-hik-exp/progs/datatools.h"
+
+//
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+enum verbose_level {NONE = 0, LOW = 1,  MEDIUM = 2, EVERYTHING = 3};
+enum QueryStrategy{
+      RANDOM = 0,
+      GPMEAN,
+      GPPREDVAR,
+      GPHEURISTIC
+    }; 
+    
+std::string convertInt(int number)
+{
+   stringstream ss;//create a stringstream
+   ss << number;//add number to the stream
+   return ss.str();//return a string with the contents of the stream
+}
+
+double measureMinimumDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b)
+{
+  double sum(0.0);
+    
+  NICE::SparseVector::const_iterator aIt = a.begin();
+  NICE::SparseVector::const_iterator bIt = b.begin();
+   
+  while ( (aIt != a.end()) && (bIt != b.end()) )
+  {
+    if (aIt->first == bIt->first)
+    {
+      sum += std::min( aIt->second, bIt->second );      
+      aIt++;
+      bIt++;
+    }
+    else if ( aIt->first < bIt->first)
+    {
+      //minimum is zero
+      aIt++;      
+    }
+    else
+    {
+      //minimum is zero
+      bIt++;       
+    }
+  }
+  
+  //we do not have to compute the remaining values for the second iterator, since the other one is since in the corresponding dimensions
+  
+  return sum;
+}
+
+/**
+    Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
+*/
+int main ( int argc, char **argv )
+{
+  std::cout.precision ( 10 );
+  std::cerr.precision ( 10 );
+
+  NICE::Config conf ( argc, argv );
+  int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
+  int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
+  int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
+  int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );  
+  bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
+  
+  double noise = conf.gD("GPHIKClassifier", "noise", 0.01);
+  double squaredNoise = pow( noise, 2);
+  
+  int minClass = conf.gI( "main", "minClass", 0);
+  int maxClass = conf.gI( "main", "maxClass", 15);
+
+  string queryStrategyString = conf.gS( "main", "queryStrategy", "random");
+  QueryStrategy queryStrategy;
+  if (queryStrategyString.compare("gpMean") == 0)
+  {
+    queryStrategy = GPMEAN;
+  }
+  else if (queryStrategyString.compare("gpPredVar") == 0)
+  {
+    queryStrategy = GPPREDVAR;
+  }
+  else if (queryStrategyString.compare("gpHeuristic") == 0)
+  {
+    queryStrategy = GPHEURISTIC;
+  }  
+  else
+  {
+    queryStrategy = RANDOM;
+  }
+ 
+  
+  int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
+  verbose_level verbose ( NONE );
+  switch ( verbose_int )
+  {
+    case 0:
+      verbose = NONE;
+      break;
+    case 1:
+      verbose = LOW;
+      break;
+    case 2:
+      verbose = MEDIUM;
+      break;
+    case 3:
+      verbose = EVERYTHING;
+      break;
+  }
+
+  std::string locationOfPermutations = conf.gS( "main", "locationOfPermutations", "/home/luetz/data/images/caltech-101/" );
+  std::string classselection_train = conf.gS( "main", "classselection_train", "*" );
+  std::string classselection_test = conf.gS( "main", "classselection_test", "*" );
+  std::string examples_train = conf.gS( "main", "examples_train", "seq * 100" );
+  std::string examples_test = conf.gS( "main", "examples_test", "seq * 50" );
+
+  /* initialize random seed: */
+  srand ( time ( NULL ) ); //with 0 for reproductive results
+//    srand ( 0 ); //with 0 for reproductive results
+  
+  for (int currentClass = minClass; currentClass <= maxClass; currentClass++)
+  {
+    std::cerr << "start binary experiments for class " << currentClass <<  std::endl;
+    
+    // ===========================  INIT =========================== 
+    
+    std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
+    std::vector<std::vector<double> > AUC_scores(nrOfIncrements+1);
+    std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
+    std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
+    
+    for ( int run = 0; run < num_runs; run++ )
+    {
+      std::cerr << "run: " << run << std::endl;    
+      
+      NICE::Config confCurrentRun ( conf );
+      confCurrentRun.sS( "train"+convertInt(run), "dataset", locationOfPermutations+"run"+convertInt(run)+".train" );
+      confCurrentRun.sS( "train"+convertInt(run), "classselection_train", classselection_train );
+      confCurrentRun.sS( "train"+convertInt(run), "examples_train", examples_train );
+      confCurrentRun.sS( "test"+convertInt(run), "dataset", locationOfPermutations+"run"+convertInt(run)+".test" );
+      confCurrentRun.sS( "test"+convertInt(run), "classselection_test", classselection_test );
+      confCurrentRun.sS( "train"+convertInt(run), "examples_test", examples_test );
+     
+      
+      //15-scenes settings
+      std::string ext = confCurrentRun.gS("main", "ext", ".txt");
+      std::cerr << "Using cache extension: " << ext << std::endl;
+
+      OBJREC::MultiDataset md ( &confCurrentRun );
+      
+      std::cerr << "now read the dataset" << std::endl;
+    
+      // read training set
+      vector< NICE::Vector > trainDataOrig;
+      Vector y;
+      string trainRun ( "train" + convertInt( run ) );
+      std::cerr << "look for " << trainRun << std::endl;
+      const LabeledSet *train = md[ trainRun ]; //previously, we only selected "train", no we select the permutation for this run
+            
+      //we just store the filenames to have a look which image we picked in every step
+      std::vector<std::string> filenamesTraining;
+      readData< std::vector< NICE::Vector >, NICE::Vector >  ( confCurrentRun, *train, trainDataOrig, y, filenamesTraining, ext );
+      
+      std::cerr << "label vector after reading: " << y << std::endl;
+      
+
+      bool firstPositivePrinted( false );
+      //assure the binary setting
+      for ( uint i = 0; i < y.size(); i++ )
+      {
+        if ( y[i] == currentClass)
+        {
+          if ( !firstPositivePrinted )
+          {
+            std::cerr << "first positive example: " << filenamesTraining[i] << std::endl;
+            firstPositivePrinted = true;
+          }
+          y[i] = 1;
+        }
+        else 
+          y[i] = 0;//-1;        
+      }
+           
+      std::cerr << "resulting binary label vector:" << y << std::endl;
+      
+      std::set<int> classesAvailable;
+      classesAvailable.insert( 0 ); //we have a single negative class
+      classesAvailable.insert( 1 ); //and we have a single positive class
+      
+      std::map<int,int> nrExamplesPerClassInDataset; //simply count how many examples for every class are available
+      std::map<int,std::vector<int> > examplesPerClassInDataset;  //as well as their corresponding indices in the dataset
+      
+      //initialize this storage
+      for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
+      {
+        nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
+        examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
+      }
+
+      //store the indices of the examples
+      for ( uint i = 0; i < y.size(); i++ )
+      {
+        (examplesPerClassInDataset.find( y[i] )->second).push_back(i);
+      }
+      
+      //and count how many examples are in every class
+      for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
+      {
+        nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
+      }
+      
+      //simple output to tell how many examples we have for every class
+      for ( std::map<int,int>::const_iterator it =  nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
+      {
+        cerr << it->first << ": " << it->second << endl;
+      }    
+        
+      Examples examples;   
+      
+      //count how many examples of every class we have while actively selecting new examples
+      //NOTE works only if we have subsequent class numbers
+      NICE::Vector pickedExamplesPerClass( classesAvailable.size(), trainExPerClass);
+      
+      std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
+      
+      //chose examples for every class used for training
+      //we will always use the first examples from each class, since the dataset comes already randomly ordered
+      for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++)
+      {
+        std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+        std::cerr << "pick training examples for class " << *clIt << std::endl;
+        
+        for (int i = 0; i < trainExPerClass; i++)
+        {
+          std::cerr << "i: " << i << std::endl;
+          int exampleIndex ( 0 ); //old: rand() % ( exIt->second.size() ) );
+          std::cerr << "pick example " << exIt->second[exampleIndex] << " - " << y[exIt->second[exampleIndex] ] << " -- " << filenamesTraining[exIt->second[exampleIndex]] << std::endl;
+          
+          Example example;
+          NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
+          example.svec = new SparseVector(xTrain);
+          //let's take this example and its corresponding label (which should be *clIt)
+          examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) ); 
+          //
+          exIt->second.erase(exIt->second.begin()+exampleIndex);
+        }
+      }    
+      
+      std::vector<std::string> filenamesUnlabeled;
+      filenamesUnlabeled.clear();      
+      
+      //which examples are left to be actively chosen lateron?
+      std::vector<int> unlabeledExamples( y.size() - trainExPerClass*classesAvailable.size() );
+      int exCnt( 0 );
+      for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++ )
+      {
+        std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+        //list all examples of this specific class
+        for (std::vector<int>::const_iterator it = exIt->second.begin(); it != exIt->second.end(); it++)
+        {
+          unlabeledExamples[exCnt] = *it;
+          exCnt++;
+        filenamesUnlabeled.push_back( filenamesTraining[*it] );          
+        }
+      }
+
+      //brute force GP regression graining
+      Timer t;
+      t.start();
+      NICE::Matrix kernelMatrix (examples.size(), examples.size(), 0.0);
+        
+      //and set zero to minus one for the internal GP computations for expected mean
+      NICE::Vector yBinGP ( examples.size(), -1 );   
+      
+      //now compute the kernelScores for every element
+      double kernelScore(0.0);
+      for ( uint i = 0; i < examples.size(); i++ )
+      {
+        for ( uint j = i; j < examples.size(); j++ )
+        {
+          kernelScore = measureMinimumDistance(* examples[i].second.svec, * examples[j].second.svec);
+          kernelMatrix(i,j) = kernelScore;
+          if (i != j)
+            kernelMatrix(j,i) = kernelScore;
+        }
+        if ( examples[i].first == 1)
+          yBinGP[i] = 1;
+      }  
+      
+      //adding some noise, if necessary
+      if ( squaredNoise != 0.0 )
+      {
+        kernelMatrix.addIdentity( noise );
+      }
+      else
+      {
+        //zero was already set
+      }    
+      std::cerr << "noise: " << noise << std::endl;
+      std::cerr << "kernelMatrix: " << kernelMatrix << std::endl;
+    
+      //compute its inverse
+      //noise is already added :)
+      
+      CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+      
+      NICE::Matrix choleskyMatrix ( examples.size(), examples.size(), 0.0 );      
+      cr.robustChol ( kernelMatrix, choleskyMatrix );   
+      NICE::Vector GPrightPart ( examples.size() );
+      choleskySolveLargeScale ( choleskyMatrix, yBinGP, GPrightPart );          
+      
+      std::cerr << "choleskyMatrix: " << choleskyMatrix << std::endl;     
+      
+      t.stop();
+      cerr << "Time used for initial training: " << t.getLast() << endl;      
+      
+      int nrOfClassesUsed = classesAvailable.size();
+      
+        // ------------------ TESTING
+      string testRun ( "test" + convertInt( run ) );
+      const LabeledSet *test = md[ testRun ]; //previously, we only selected "test", now we select the permutation for this run
+      VVector testData;
+      Vector yTest;
+      readData< VVector, Vector > ( confCurrentRun, *test, testData, yTest, ext );
+      
+      NICE::Matrix confusionMatrix ( 2, 2 );
+      confusionMatrix.set ( 0.0 );      
+      
+      time_t  start_time = clock();
+
+      std::vector<int> chosen_examples_per_class ( nrOfClassesUsed );
+      
+      std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+
+      if ( do_classification  )
+      {
+        ClassificationResults results;
+        for ( uint i = 0 ; i < testData.size(); i++ )
+        {
+          const Vector & xstar = testData[i];
+          SparseVector xstar_sparse ( xstar );
+
+          //compute similarities
+          NICE::Vector kernelVector ( examples.size(), 0.0 );
+          for ( uint j = 0; j < examples.size(); j++ )
+          {
+            kernelVector[j] = measureMinimumDistance( * examples[j].second.svec, xstar_sparse );
+          }     
+          
+          //compute the resulting score
+          double score = kernelVector.scalarProduct( GPrightPart );
+          
+          //this is the standard score-object needed for the evaluation
+          FullVector scores ( 2 );  
+          scores[0] = -1.0*score;
+          scores[1] = score;
+
+          ClassificationResult result ( scores.maxElement(), scores );
+          
+          result.classno_groundtruth = ( yTest[i] == 1 ) ? 1 : 0;
+          result.classno = ( score >= 0.0 ) ? 1 : 0;
+
+          confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+          results.push_back( result );
+        }
+
+        float time_classification = ( float ) ( clock() - start_time ) ;
+        if ( verbose >= LOW )
+          cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
+        ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
+        
+        confusionMatrix.normalizeRowsL1();
+        std::cerr << confusionMatrix;
+        double avg_recognition_rate = 0.0;
+        for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+        {
+          avg_recognition_rate += confusionMatrix ( i, i );
+        }        
+        avg_recognition_rate /= confusionMatrix.rows();
+        std::cerr << "class: " << currentClass << " run: " << run << " avg recognition rate: " <<  avg_recognition_rate*100 << " % -- " << examples.size() << " training examples used" << std::endl;
+
+        recognitions_rates[0].push_back ( avg_recognition_rate*100 );        
+
+        std::cerr << "number of classified examples: " << results.size() << std::endl;
+
+        std::cerr << "perform auc evaluation "<< std::endl;
+        double aucScore = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+        
+        std::cerr << "class: " << currentClass << " run: " << run << " AUC-score: " <<  aucScore << " % -- " << examples.size() << " training examples used" << std::endl << std::endl;
+
+        AUC_scores[0].push_back ( aucScore*100 );
+      }
+
+      //Now start the Incremental-Learning-Part
+      
+      for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
+      {
+        //simply count how many possible example we have 
+        int nrOfPossibleExamples(  unlabeledExamples.size() );
+        
+        if (queryStrategy == RANDOM)
+        {
+          std::cerr << "print chosen examples: " << std::endl;           
+          for (int i = 0; i < incrementalAddSize; i++)
+          {        
+            int exampleIndex ( rand() % ( unlabeledExamples.size() ) );
+            
+            Example newExample;
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exampleIndex] ];
+            newExample.svec = new SparseVector( xTrain ); 
+            int label( y[ unlabeledExamples[exampleIndex] ] );
+            examples.push_back ( pair<int, Example> ( label, newExample ) );
+            unlabeledExamples.erase( unlabeledExamples.begin()+exampleIndex );
+            std::cerr << exampleIndex+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ exampleIndex ] << std::endl;          
+            filenamesUnlabeled.erase( filenamesUnlabeled.begin()+exampleIndex );            
+            pickedExamplesPerClass[label]++;
+          }
+        }// end computation for RANDOM
+        else if ( (queryStrategy == GPMEAN) || (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTIC) )
+        {
+          //compute uncertainty values for all examples according to the query strategy
+          std::vector<std::pair<int,double> > scores;
+          scores.clear();
+          time_t  unc_pred_start_time = clock();
+          for (uint exIndex = 0; exIndex < unlabeledExamples.size(); exIndex++)
+          {
+              NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exIndex] ];
+              SparseVector xTrainSparse ( xTrain );
+              //compute similarities
+              NICE::Vector kernelVector ( examples.size(), 0.0);
+              for ( uint j = 0; j < examples.size(); j++ )
+              {
+                kernelVector[j] = measureMinimumDistance( * examples[j].second.svec, xTrainSparse );
+              }     
+              
+              if (queryStrategy == GPMEAN)
+              {              
+                //compute the resulting score
+                double score = kernelVector.scalarProduct( GPrightPart );                 
+                scores.push_back( std::pair<int,double> ( exIndex, fabs(score) ) );
+              }
+              else if (queryStrategy == GPPREDVAR)
+              {
+                double kernelSelf ( measureMinimumDistance( xTrainSparse, xTrainSparse) ); 
+                NICE::Vector rightPart (examples.size());
+                choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
+                double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );                
+                scores.push_back( std::pair<int,double> ( exIndex, uncertainty) );
+              }
+              else if (queryStrategy == GPHEURISTIC)
+              {
+                double kernelSelf ( measureMinimumDistance( xTrainSparse, xTrainSparse) ); 
+                NICE::Vector rightPart (examples.size());
+                choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
+                //uncertainty
+                double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );                 
+                //mean
+                double score = kernelVector.scalarProduct( GPrightPart );                 
+                //compute the resulting score
+                scores.push_back( std::pair<int,double> ( exIndex, fabs(score) / sqrt( squaredNoise + uncertainty ) ) );
+              }
+          }
+          float time_score_computation = ( float ) ( clock() - unc_pred_start_time ) ;
+            
+          //pick the ones with best score
+          //we could speed this up using a more sophisticated search method
+          
+          if (queryStrategy == GPPREDVAR) //take the maximum of the scores for the predictive variance
+          {
+            std::set<int> chosenExamplesForThisRun;
+            chosenExamplesForThisRun.clear();          
+            for (int i = 0; i < incrementalAddSize; i++)
+            {
+              std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+              std::vector<std::pair<int,double> >::iterator worstExample = scores.begin();
+              
+              for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+              {
+                if (jIt->second > bestExample->second)
+                  bestExample = jIt;
+                if (jIt->second < worstExample->second)
+                  worstExample = jIt;                
+              }
+              std::cerr << "i: " << i << " bestExample: " << bestExample->second << " worstExample: " << worstExample->second << std::endl;
+              
+              Example newExample;    
+              NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ]; 
+              newExample.svec = new SparseVector( xTrain ); 
+              //actually this is the ACTIVE LEARNING step (query a label)
+              int label( y[ unlabeledExamples[bestExample->first] ] );
+              examples.push_back ( pair<int, Example> ( label, newExample ) );    
+              //remember the index, to safely remove this example afterwards from unlabeledExamples
+              chosenExamplesForThisRun.insert(bestExample->first);
+              scores.erase(bestExample);
+              pickedExamplesPerClass[label]++;
+            }
+            
+//             std::cerr << "print chosen examples: " << std::endl; 
+/*            int tmpCnt(0);
+            for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
+            {
+              std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " <<  filenamesUnlabeled[ *it ] << std::endl;
+            } */              
+            //delete the queried examples from the set of unlabeled ones
+            //do this in an decreasing order in terms of indices to ensure valid access
+            for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+            {
+              unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+            }          
+          }
+          else //take the minimum of the scores for the heuristic and the gp mean (minimum margin)
+          {
+            std::set<int> chosenExamplesForThisRun;
+            chosenExamplesForThisRun.clear();
+            for (int i = 0; i < incrementalAddSize; i++)
+            {
+              std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+              std::vector<std::pair<int,double> >::iterator worstExample = scores.begin();
+              
+              for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+              {
+                if (jIt->second < bestExample->second)
+                  bestExample = jIt;
+               if (jIt->second > worstExample->second)
+                  worstExample = jIt;               
+              }
+              std::cerr << "i: " << i << " bestExample: " << bestExample->second << " worstExample: " << worstExample->second << std::endl;
+              Example newExample;    
+              NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
+              newExample.svec = new SparseVector( xTrain ); 
+              //actually this is the ACTIVE LEARNING step (query a label)
+              int label( y[ unlabeledExamples[bestExample->first] ] );
+              examples.push_back ( pair<int, Example> ( label, newExample ) );           
+              //remember the index, to safely remove this example afterwards from unlabeledExamples
+              chosenExamplesForThisRun.insert(bestExample->first);
+              scores.erase(bestExample);
+              pickedExamplesPerClass[label]++;
+            }  
+                      
+            //delete the queried example from the set of unlabeled ones
+            //do this in an decreasing order in terms of indices to ensure valid access
+            for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+            {
+              unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+            }
+
+          }
+        
+          std::cerr << "Time used to compute query-scores for " <<  nrOfPossibleExamples << " examples: " << time_score_computation / CLOCKS_PER_SEC << " [s]" << std::endl;
+        } // end computation for GPMEAN, GPPREDVAR, or GPHEURISTIC
+        
+        std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+
+        //again: brute force GP regression graining
+        Timer t;
+        t.start();
+        NICE::Matrix kernelMatrix (examples.size(), examples.size(), 0.0);
+          
+        //and set zero to minus one for the internal GP computations for expected mean
+        NICE::Vector yBinGP ( examples.size(), -1 );   
+        
+        //now compute the kernelScores for every element
+        double kernelScore(0.0);
+        for ( uint i = 0; i < examples.size(); i++ )
+        {
+          for ( uint j = i; j < examples.size(); j++ )
+          {
+            kernelScore = measureMinimumDistance(* examples[i].second.svec, * examples[j].second.svec);
+            kernelMatrix(i,j) = kernelScore;
+            if (i != j)
+              kernelMatrix(j,i) = kernelScore;
+          }
+          if ( examples[i].first == 1)
+            yBinGP[i] = 1;
+        }  
+        
+        //adding some noise, if necessary
+        if ( squaredNoise != 0.0 )
+        {
+          kernelMatrix.addIdentity( squaredNoise );
+        }
+        else
+        {
+          //zero was already set
+        }    
+      
+        //compute its inverse
+        //noise is already added :)
+
+        //update the cholesky decomposition
+        choleskyMatrix.resize ( examples.size(), examples.size() );      
+        choleskyMatrix.set( 0.0 );
+        cr.robustChol ( kernelMatrix, choleskyMatrix );   
+        
+        //and update the right part needed for the posterior mean
+        GPrightPart.resize ( examples.size() );
+        GPrightPart.set( 0.0 );
+
+        choleskySolveLargeScale ( choleskyMatrix, yBinGP, GPrightPart );   
+               
+        t.stop();
+        std::cerr << "Time for IL-adding of " << incrementalAddSize << " examples to already " <<  nrOfClassesUsed*trainExPerClass+incrementalAddSize*incrementationStep << "  training-examples: " << t.getLast() << " [s]" << std::endl;
+        IL_training_times[incrementationStep].push_back( t.getLast() );    
+            
+        //do the classification for evaluating the benefit of new examples
+        if ( do_classification )
+        {
+          time_t  start_time = clock();
+          ClassificationResults results;
+          confusionMatrix.set( 0.0 );
+          for ( uint i = 0 ; i < testData.size(); i++ )
+          {
+            const Vector & xstar = testData[i];
+            SparseVector xstar_sparse ( xstar );
+            
+            //compute similarities
+            NICE::Vector kernelVector ( examples.size(), 0.0 );
+            for ( uint j = 0; j < examples.size(); j++ )
+            {
+              kernelVector[j] = measureMinimumDistance( * examples[j].second.svec, xstar_sparse );
+            }     
+            
+            //compute the resulting score
+            double score = kernelVector.scalarProduct( GPrightPart );
+            
+            //this is the standard score-object needed for the evaluation
+            FullVector scores ( 2 );  
+            scores[0] = -1.0*score;
+            scores[1] = score;
+
+            ClassificationResult result ( scores.maxElement(), scores );
+            
+            result.classno_groundtruth = ( yTest[i] == 1 ) ? 1 : 0;
+
+            result.classno = ( score >= 0.0 ) ? 1 : 0;            
+
+            results.push_back( result );      
+            confusionMatrix ( result.classno_groundtruth , result.classno ) ++;            
+          }     
+
+          float time_classification = ( float ) ( clock() - start_time ) ;
+          if ( verbose >= LOW )
+            std::cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
+          ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
+          
+          confusionMatrix.normalizeRowsL1();
+          std::cerr << confusionMatrix;
+          double avg_recognition_rate ( 0.0 );
+          for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+          {
+            avg_recognition_rate += confusionMatrix ( i, i );
+          }
+          avg_recognition_rate /= confusionMatrix.rows();          
+          
+          std::cerr << "class: " << currentClass << " run: " << run << " avg recognition rate: " <<  avg_recognition_rate*100 << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl;
+
+          recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );           
+
+          
+          double score = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+          std::cerr << "class: " << currentClass << " run: " << run << " AUC-score: " <<  score << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl << std::endl;          
+
+          AUC_scores[incrementationStep+1].push_back ( score*100 );
+        } //classification after IL adding */
+      } //IL adding of different classes
+      std::cerr << "Final statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+      
+      //don't waste memory!
+      for ( uint tmp = 0; tmp < examples.size(); tmp++ )
+      {
+        delete examples[tmp].second.svec;
+        examples[tmp].second.svec = NULL;
+      }
+    }//runs 
+       
+    // ================= EVALUATION =========================
+    
+    int nrOfClassesUsed ( 2 ); //binary setting
+
+    if ( do_classification )
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << " final evaluation for class: " << currentClass << std::endl;
+      std::cerr << "content of classification_times: " << std::endl;
+      for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+      {
+        for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+        {
+          std::cerr << *jt << " ";
+        }
+        std::cerr << std::endl;
+      }
+
+      std::vector<float> mean_classification_times;
+      std::vector<float> std_dev_classification_times;
+      for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+      {
+        float mean_classification_time ( 0.0 );
+        for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          mean_classification_time += *itRun;
+        }
+        mean_classification_time /= it->size();
+        mean_classification_times.push_back ( mean_classification_time );
+
+        double std_dev_classification_time ( 0.0 );
+        for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          std_dev_classification_time += pow ( *itRun - mean_classification_time, 2 );
+        }
+        std_dev_classification_time /= it->size();
+        std_dev_classification_time = sqrt ( std_dev_classification_time );
+        std_dev_classification_times.push_back ( std_dev_classification_time );
+      }
+      
+      int datasize ( nrOfClassesUsed*trainExPerClass );
+      for ( uint i = 0; i < mean_classification_times.size(); i++)
+      {
+        std::cerr << "size: " << datasize << " mean classification time: " << mean_classification_times[i] << " std_dev classification time: " << std_dev_classification_times[i] << std::endl;
+        datasize += incrementalAddSize ;
+      }
+    }
+    else
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << "No classification done therefor no classification times available." << std::endl;
+    }
+
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of IL_training_times for class : "<< currentClass << std::endl;
+    for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+    {
+      for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::vector<float> mean_IL_training_times;
+    std::vector<float> std_dev_IL_training_times;
+    for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+    {  
+      float mean_IL_training_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_IL_training_time += *itRun;
+      }
+      mean_IL_training_time /= it->size();
+      mean_IL_training_times.push_back ( mean_IL_training_time );
+
+      double std_dev_IL_training_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_IL_training_time += pow ( *itRun - mean_IL_training_time, 2 );
+      }
+      std_dev_IL_training_time /= it->size();
+      std_dev_IL_training_time = sqrt ( std_dev_IL_training_time );
+      std_dev_IL_training_times.push_back ( std_dev_IL_training_time );
+    }
+
+    int datasize ( nrOfClassesUsed*trainExPerClass );
+    for ( uint i = 0; i < mean_IL_training_times.size(); i++)
+    {
+      cerr << "size: " << datasize << " and adding " << incrementalAddSize << " mean IL_training time: " << mean_IL_training_times[i] << " std_dev IL_training time: " << std_dev_IL_training_times[i] << endl;
+      datasize += incrementalAddSize ;
+    }
+
+    if ( do_classification )
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << "content of recognition_rates for class : "<< currentClass << std::endl;
+      for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+      {
+        for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+        {
+          std::cerr << *jt << " ";
+        }
+        std::cerr << std::endl;
+      }
+
+      std::cerr << "calculating final recognition_rates for class : "<< currentClass << std::endl;
+      std::vector<double> mean_recs;
+      std::vector<double> std_dev_recs;
+      for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+      {
+        double mean_rec ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          mean_rec += *itRun;
+        }
+        mean_rec /= it->size();
+        mean_recs.push_back ( mean_rec );
+
+        double std_dev_rec ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          std_dev_rec += pow ( *itRun - mean_rec, 2 );
+        }
+        std_dev_rec /= it->size();
+        std_dev_rec = sqrt ( std_dev_rec );
+        std_dev_recs.push_back ( std_dev_rec );
+      }
+
+      int datasize ( nrOfClassesUsed*trainExPerClass );
+      for ( uint i = 0; i < recognitions_rates.size(); i++)
+      {
+        std::cerr << "size: " << datasize << " mean_IL: " << mean_recs[i] << " std_dev_IL: " << std_dev_recs[i] << std::endl;
+        datasize += incrementalAddSize ;
+      }
+      
+      std::cerr << "========================" << std::endl;
+      std::cerr << "content of AUC_scores for class : "<< currentClass << std::endl;
+      for ( std::vector<std::vector<double> >::const_iterator it = AUC_scores.begin(); it != AUC_scores.end(); it++ )
+      {
+        for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+        {
+          std::cerr << *jt << " ";
+        }
+        std::cerr << std::endl;
+      }
+
+      std::cerr << "calculating final AUC_scores for class : "<< currentClass << std::endl;
+      std::vector<double> mean_aucs;
+      std::vector<double> std_dev_aucs;
+      for (std::vector<std::vector<double> >::const_iterator it = AUC_scores.begin(); it != AUC_scores.end(); it++ )
+      {
+        double mean_auc ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          mean_auc += *itRun;
+        }
+        mean_auc /= it->size();
+        mean_aucs.push_back ( mean_auc );
+
+        double std_dev_auc ( 0.0 );
+        for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+        {
+          std_dev_auc += pow ( *itRun - mean_auc, 2 );
+        }
+        std_dev_auc /= it->size();
+        std_dev_auc = sqrt ( std_dev_auc );
+        std_dev_aucs.push_back ( std_dev_auc );
+      }
+
+      datasize  = nrOfClassesUsed*trainExPerClass;
+      for ( uint i = 0; i < recognitions_rates.size(); i++)
+      {
+        std::cerr << "size: " << datasize << " mean_IL: " << mean_aucs[i] << " std_dev_IL: " << std_dev_aucs[i] << std::endl;
+        datasize += incrementalAddSize ;
+      }      
+    }
+    else
+    {
+      std::cerr << "========================" << std::endl;
+      std::cerr << "No classification done therefor no classification times available." << std::endl;
+    } 
+    
+  } //for int currentClass...
+
+  return 0;
+}

+ 530 - 0
progs/IL_NewExamples.cpp

@@ -0,0 +1,530 @@
+/**
+* @file IL_NewExamples.cpp
+* @brief Large GP-IL-Testsetup
+* @author Alexander Freytag
+* @date 09-05-2012
+*/
+#include <vector>
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+
+
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+//----------
+
+#include "vislearning/baselib/ProgressBar.h"
+
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+#include "vislearning/cbaselib/MultiDataset.h"
+#include <vislearning/cbaselib/LabeledSet.h>
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include <vislearning/baselib/Globals.h>
+
+#include <vislearning/math/kernels/KernelData.h>
+
+//----------
+
+#include "gp-hik-exp/progs/datatools.h"
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+//----------
+
+// #include <incrementallearning/IL_Framework_Generic.h>
+
+
+//
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+enum verbose_level {NONE = 0, LOW = 1,  MEDIUM = 2, EVERYTHING = 3};
+
+/**
+    Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
+*/
+int main ( int argc, char **argv )
+{
+  std::cout.precision ( 5 );
+  std::cerr.precision ( 5 );
+
+  NICE::Config conf ( argc, argv );
+  int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
+  int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
+  int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
+  int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );  
+  bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
+  bool incrementalNotBatch = conf.gB( "GP_IL", "incrementalNotBatch", true );
+  
+  string featureLocation = conf.gS( "GP_IL", "featureLocation", "toyExampleLargeLargeScale.data");
+  
+  int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
+  verbose_level verbose ( NONE );
+  switch ( verbose_int )
+  {
+    case 0:
+      verbose = NONE;
+      break;
+    case 1:
+      verbose = LOW;
+      break;
+    case 2:
+      verbose = MEDIUM;
+      break;
+    case 3:
+      verbose = EVERYTHING;
+      break;
+  }
+
+
+
+  /* initialize random seed: */
+  srand ( time ( NULL ) ); //with 0 for reproductive results
+//    srand ( 0 ); //with 0 for reproductive results
+
+  // ===========================  INIT =========================== 
+  
+  //these classes are the basic knowledge we have at the beginning
+  set<int> classesForTraining;
+  classesForTraining.insert(0);
+  classesForTraining.insert(1);
+  classesForTraining.insert(2);
+  classesForTraining.insert(3);
+  classesForTraining.insert(4);
+  classesForTraining.insert(5);
+  classesForTraining.insert(6);
+  classesForTraining.insert(7);
+  classesForTraining.insert(8);
+  classesForTraining.insert(9);
+  classesForTraining.insert(10);
+  classesForTraining.insert(11);
+  classesForTraining.insert(12);
+  classesForTraining.insert(13);
+  classesForTraining.insert(14);
+  
+//   //these classes will be added iteratively to our training set
+//   std::set<int> classesForIncrementalTraining;
+  
+  std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
+  std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
+  std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
+  
+  for ( int run = 0; run < num_runs; run++ )
+  {
+    std::cerr << "run: " << run << std::endl;    
+    
+    //15-scenes settings
+    std::string ext = conf.gS("main", "ext", ".txt"); 
+    std::cerr << "Using cache extension: " << ext << std::endl;
+
+    OBJREC::MultiDataset md ( &conf );
+    const ClassNames & classNamesTrain = md.getClassNames("train");
+    
+    // read training set
+    vector< NICE::Vector > trainDataOrig;
+    Vector y;
+    const LabeledSet *train = md["train"];
+
+    readData< std::vector< NICE::Vector >, NICE::Vector >  ( conf, *train, trainDataOrig, y, ext );
+
+    std::vector<double> labelsStd;
+    int datasize_all ( trainDataOrig.size() );
+    
+    std::set<int> classesAvailable;
+    for ( uint i = 0; i < y.size(); i++)
+    {
+      //automatically check for duplicates
+      classesAvailable.insert(y[i]);
+    }
+    
+    int numberOfClasses =  classesAvailable.size();
+    
+    std::map<int,int> nrExamplesPerClassInDataset;
+    std::map<int,std::vector<int> > examplesPerClassInDataset;
+    
+    for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
+    {
+      nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
+      examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
+    }
+    
+    for ( uint i = 0; i < y.size(); i++ )
+    {
+      (examplesPerClassInDataset.find(y[i])->second).push_back(i);
+    }
+    
+    for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
+    {
+      nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
+    }
+    
+    for ( std::map<int,int>::const_iterator it =  nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
+    {
+      cerr << it->first << ": " << it->second << endl;
+    }    
+       
+    Examples examples;
+    
+    
+    std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
+    //chose examples for every class used for training
+    for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
+    {
+      std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+      std::cerr << "pick training examples for class " << *clIt << std::endl;
+      
+      for (int i = 0; i < trainExPerClass; i++)
+      {
+        std::cerr << "i: " << i << std::endl;
+         int exampleIndex ( rand() % ( exIt->second.size() ) );
+         std::cerr << "exampleIndex: " << exampleIndex << std::endl;
+         
+        Example example;
+        NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
+        example.svec = new SparseVector(xTrain);
+        examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );         
+         
+        exIt->second.erase(exIt->second.begin()+exampleIndex);
+      }
+    }    
+    std::cerr << "start training " << std::endl;
+    time_t  prep_start_time = clock();
+
+    GPHIKClassifierNICE * classifier  = new GPHIKClassifierNICE( &conf );
+    
+    FeaturePool fp; // will be ignored
+    classifier->train ( fp, examples );
+
+    float time_preparation = ( float ) ( clock() - prep_start_time ) ;
+    
+    int classesUsed(classesForTraining.size());
+    
+    std::cerr << "training done " << std::endl;
+    
+      // ------------------ TESTING
+    const LabeledSet *test = md["test"];
+    VVector testData;
+    Vector yTest;
+    readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
+    
+
+    NICE::Matrix confusionMatrix ( numberOfClasses, numberOfClasses );
+    confusionMatrix.set ( 0.0 );
+
+    time_t  start_time = clock();
+
+    std::vector<int> chosen_examples_per_class ( numberOfClasses );
+
+    if ( do_classification )
+    {
+      for ( uint i = 0 ; i < testData.size(); i++ )
+      {
+        Example example;
+        const Vector & xstar = testData[i];
+        SparseVector xstar_sparse ( xstar );
+        OBJREC::ClassificationResult result;
+        example.svec = &xstar_sparse;
+        
+        result = classifier->classify( example );
+        cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
+        
+        result.classno_groundtruth = yTest[i];
+        confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+      }
+
+
+      float time_classification = ( float ) ( clock() - start_time ) ;
+      if ( verbose >= LOW )
+        cerr << "Time for Classification with " << classesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
+      ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
+
+      confusionMatrix.normalizeRowsL1();
+      double avg_recognition_rate = 0.0;
+      for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+      {
+        if ( verbose >= MEDIUM )
+        {
+          cerr << "Class no: " <<  i  << " : " << confusionMatrix ( i, i ) << endl;
+        }
+        avg_recognition_rate += confusionMatrix ( i, i );
+      }
+
+      avg_recognition_rate /= confusionMatrix.rows();
+
+      cerr << confusionMatrix << endl;
+      cerr << "avg recognition rate " << avg_recognition_rate*100 << " %" << endl;
+
+      recognitions_rates[0].push_back ( avg_recognition_rate*100 );
+    }
+
+    //Now start the Incremental-Learning-Part
+    
+    for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
+    {
+      
+      //iteratively add 1 example
+      if (incrementalNotBatch)
+      {
+      uint oldSize = examples.size();
+      //chose examples for every class used for training
+      int cnt(0);
+      Examples newExamples;
+      for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
+      {
+        std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+        
+        for (int i = 0; i < incrementalAddSize; i++)
+        {
+          std::cerr << "i: " << cnt << std::endl;
+          Example example;    
+          
+          int exampleIndex ( rand() % ( exIt->second.size() ) );
+          NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex] ];
+          example.svec = new SparseVector(xTrain);
+//           examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );           
+          newExamples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
+          exIt->second.erase(exIt->second.begin()+exampleIndex);
+          cnt++;
+        }
+      }        
+           
+        std::cerr << "Incremental, but not batch" << std::endl;
+        time_t  IL_add_start_time = clock();
+
+        classifier->addMultipleExamples( newExamples );
+        float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
+        std::cerr << "Time for IL-adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " <<  classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << "  training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
+        IL_training_times[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);
+      }
+      else
+      {       
+        std::cerr << "batch retraining -- add new data to currently known training examples" << std::endl;
+        //chose examples for every class used for training
+        int cnt(0);
+        for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
+        {
+          std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+          
+          for (int i = 0; i < incrementalAddSize; i++)
+          {
+            std::cerr << "i: " << cnt << std::endl;
+            Example example;    
+            
+            int exampleIndex ( rand() % ( exIt->second.size() ) );
+            NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex] ];
+            example.svec = new SparseVector(xTrain);
+            examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );           
+
+            exIt->second.erase(exIt->second.begin()+exampleIndex);
+            cnt++;
+          }
+        }          
+
+        std::cerr <<  "start batch retraining" << std::endl;
+        time_t  batch_add_start_time = clock();        
+        //
+        if (classifier != NULL)
+          delete classifier;
+        classifier = new GPHIKClassifierNICE( &conf );
+        classifier->train( fp, examples );
+        //
+        float time_batch_add = ( float ) ( clock() - batch_add_start_time ) ;
+        std::cerr << "Time for batch relearning after adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " <<  classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << "  training-examples: " << time_batch_add / CLOCKS_PER_SEC << " [s]" << std::endl;
+        IL_training_times[incrementationStep].push_back(time_batch_add / CLOCKS_PER_SEC);        
+      }
+           
+      //do the classification for evaluating the benefit of new examples
+      if ( do_classification )
+      {
+        std::cerr << "do classification" << std::endl;
+        for ( uint i = 0 ; i < testData.size(); i++ )
+        {
+          Example example;
+          const Vector & xstar = testData[i];
+          SparseVector xstar_sparse ( xstar );
+          example.svec = &xstar_sparse;
+          OBJREC::ClassificationResult result;
+          
+          result = classifier->classify( example );
+          
+          std::cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl; 
+          
+          result.classno_groundtruth = yTest[i];
+          confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+        }     
+
+
+        float time_classification = ( float ) ( clock() - start_time ) ;
+        if ( verbose >= LOW )
+          std::cerr << "Time for Classification with " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
+        ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
+
+        confusionMatrix.normalizeRowsL1();
+        double avg_recognition_rate = 0.0;
+        for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+        {
+          if ( verbose >= MEDIUM )
+          {
+            std::cerr << "Class no: " <<  i  << " : " << confusionMatrix ( i, i ) << std::endl;
+          }
+          avg_recognition_rate += confusionMatrix ( i, i );
+        }
+
+        avg_recognition_rate /= confusionMatrix.rows();
+
+        cerr << confusionMatrix << endl;
+        cerr << "avg recognition rate " << avg_recognition_rate*100 << " %" << endl;
+
+        recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );
+      } //classification after IL adding
+    } //IL adding of different classes
+  }//runs 
+
+
+  int classesUsed(classesForTraining.size());
+  std::cerr << "classes used: " << classesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
+
+  if ( do_classification )
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of classification_times: " << std::endl;
+    for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+    {
+      for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::vector<float> mean_classification_times;
+    std::vector<float> std_dev_classification_times;
+    for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
+    {
+      float mean_classification_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_classification_time += *itRun;
+      }
+      mean_classification_time /= it->size();
+      mean_classification_times.push_back ( mean_classification_time );
+
+      double std_dev_classification_time ( 0.0 );
+      for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_classification_time += pow ( *itRun - mean_classification_time, 2 );
+      }
+      std_dev_classification_time /= it->size();
+      std_dev_classification_time = sqrt ( std_dev_classification_time );
+      std_dev_classification_times.push_back ( std_dev_classification_time );
+    }
+    
+    int datasize ( classesUsed*trainExPerClass );
+    for ( uint i = 0; i < mean_classification_times.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " mean classification time: " << mean_classification_times[i] << " std_dev classification time: " << std_dev_classification_times[i] << std::endl;
+      datasize += classesUsed*incrementalAddSize ;
+    }
+  }
+  else
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "No classification done therefor no classification times available." << std::endl;
+  }
+
+  std::cerr << "========================" << std::endl;
+  std::cerr << "content of IL_training_times: " << std::endl;
+  for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+  {
+    for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+    {
+      std::cerr << *jt << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  std::vector<float> mean_IL_training_times;
+  std::vector<float> std_dev_IL_training_times;
+  for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
+  {  
+    float mean_IL_training_time ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      mean_IL_training_time += *itRun;
+    }
+    mean_IL_training_time /= it->size();
+    mean_IL_training_times.push_back ( mean_IL_training_time );
+
+    double std_dev_IL_training_time ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      std_dev_IL_training_time += pow ( *itRun - mean_IL_training_time, 2 );
+    }
+    std_dev_IL_training_time /= it->size();
+    std_dev_IL_training_time = sqrt ( std_dev_IL_training_time );
+    std_dev_IL_training_times.push_back ( std_dev_IL_training_time );
+  }
+
+  int datasize ( classesUsed*trainExPerClass );
+  for ( uint i = 0; i < mean_IL_training_times.size(); i++)
+  {
+    cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " mean IL_training time: " << mean_IL_training_times[i] << " std_dev IL_training time: " << std_dev_IL_training_times[i] << endl;
+    datasize += classesUsed*incrementalAddSize ;
+  }
+
+  if ( do_classification )
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of recognition_rates: " << std::endl;
+    for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+    {
+      for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::cerr << "calculating final results " << std::endl;
+    std::vector<double> mean_recs;
+    std::vector<double> std_dev_recs;
+    for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+    {
+      double mean_rec ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_rec += *itRun;
+      }
+      mean_rec /= it->size();
+      mean_recs.push_back ( mean_rec );
+
+      double std_dev_rec ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_rec += pow ( *itRun - mean_rec, 2 );
+      }
+      std_dev_rec /= it->size();
+      std_dev_rec = sqrt ( std_dev_rec );
+      std_dev_recs.push_back ( std_dev_rec );
+    }
+
+    int datasize ( classesUsed*trainExPerClass + classesUsed*incrementalAddSize);
+    for ( uint i = 0; i < recognitions_rates.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " mean_IL: " << mean_recs[i] << " std_dev_IL: " << std_dev_recs[i] << std::endl;
+      datasize += classesUsed*incrementalAddSize ;
+    }
+  }
+  else
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "No classification done therefor no classification times available." << std::endl;
+  }
+
+  return 0;
+}

+ 571 - 0
progs/IL_NewExamples_Comparison.cpp

@@ -0,0 +1,571 @@
+/**
+* @file IL_NewExamples_Comparison.cpp
+* @brief Large GP-IL-Testsetup
+* @author Alexander Freytag
+* @date 09-05-2012
+*/
+#include <vector>
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+#include <iostream>
+#include <math.h>
+
+
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+//----------
+
+#include "vislearning/baselib/ProgressBar.h"
+
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+#include "vislearning/cbaselib/MultiDataset.h"
+#include <vislearning/cbaselib/LabeledSet.h>
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include <vislearning/baselib/Globals.h>
+
+#include <vislearning/math/kernels/KernelData.h>
+
+//----------
+
+#include "gp-hik-exp/progs/datatools.h"
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+//----------
+
+// #include <incrementallearning/IL_Framework_Generic.h>
+
+
+//
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+enum verbose_level {NONE = 0, LOW = 1,  MEDIUM = 2, EVERYTHING = 3};
+
+/**
+    Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
+*/
+int main ( int argc, char **argv )
+{
+  std::cout.precision ( 5 );
+  std::cerr.precision ( 5 );
+
+  NICE::Config conf ( argc, argv );
+  int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
+  int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
+  int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
+  int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );  
+  bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
+  
+  string featureLocation = conf.gS( "GP_IL", "featureLocation", "toyExampleLargeLargeScale.data");
+  
+  int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
+  verbose_level verbose ( NONE );
+  switch ( verbose_int )
+  {
+    case 0:
+      verbose = NONE;
+      break;
+    case 1:
+      verbose = LOW;
+      break;
+    case 2:
+      verbose = MEDIUM;
+      break;
+    case 3:
+      verbose = EVERYTHING;
+      break;
+  }
+  
+  /* initialize random seed: */
+  srand ( time ( NULL ) ); //with 0 for reproductive results
+//    srand ( 0 ); //with 0 for reproductive results
+
+  // ===========================  INIT =========================== 
+  
+  //these classes are the basic knowledge we have at the beginning
+  set<int> classesForTraining;
+  classesForTraining.insert(0);
+  classesForTraining.insert(1);
+  classesForTraining.insert(2);
+  classesForTraining.insert(3);
+  classesForTraining.insert(4);
+  classesForTraining.insert(5);
+  classesForTraining.insert(6);
+  classesForTraining.insert(7);
+  classesForTraining.insert(8);
+  classesForTraining.insert(9);
+  classesForTraining.insert(10);
+  classesForTraining.insert(11);
+  classesForTraining.insert(12);
+  classesForTraining.insert(13);
+  classesForTraining.insert(14);
+  
+//   //these classes will be added iteratively to our training set
+//   std::set<int> classesForIncrementalTraining;
+  
+  std::vector<std::vector<double> > recognitionsRatesBatch(nrOfIncrements+1);
+  std::vector<std::vector<double> > recognitionsRatesIL(nrOfIncrements+1);
+  
+  std::vector<std::vector<float> > trainingTimesBatch(nrOfIncrements+1);
+  std::vector<std::vector<float> > trainingTimesIL(nrOfIncrements+1);
+  
+  for ( int run = 0; run < num_runs; run++ )
+  {
+    std::cerr << "run: " << run << std::endl;    
+    
+    //15-scenes settings
+    std::string ext = conf.gS("main", "ext", ".txt"); 
+    std::cerr << "Using cache extension: " << ext << std::endl;
+
+    OBJREC::MultiDataset md ( &conf );
+    const ClassNames & classNamesTrain = md.getClassNames("train");    
+    
+    // read training set
+    vector< NICE::Vector > trainDataOrig;
+    Vector y;
+    const LabeledSet *train = md["train"];
+
+    readData< std::vector< NICE::Vector >, NICE::Vector >  ( conf, *train, trainDataOrig, y, ext );
+
+    std::vector<double> labelsStd;
+    int datasize_all ( trainDataOrig.size() );
+    
+    std::set<int> classesAvailable;
+    for ( uint i = 0; i < y.size(); i++)
+    {
+      //automatically check for duplicates
+      classesAvailable.insert(y[i]);
+    }
+    
+    int numberOfClasses =  classesAvailable.size();
+    
+    std::map<int,int> nrExamplesPerClassInDataset;
+    std::map<int,std::vector<int> > examplesPerClassInDataset;
+    
+    for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
+    {
+      nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
+      examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
+    }
+    
+    for ( uint i = 0; i < y.size(); i++ )
+    {
+      (examplesPerClassInDataset.find(y[i])->second).push_back(i);
+    }
+    
+    for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
+    {
+      nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
+    }
+    
+    for ( std::map<int,int>::const_iterator it =  nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
+    {
+      cerr << it->first << ": " << it->second << endl;
+    }    
+   
+    Examples examples;
+    
+    
+    std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
+    //chose examples for every class used for training
+    for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
+    {
+      std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+//       std::cerr << "pick training examples for class " << *clIt << std::endl;
+      
+      for (int i = 0; i < trainExPerClass; i++)
+      {
+//         std::cerr << "i: " << i << std::endl;
+         int exampleIndex ( rand() % ( exIt->second.size() ) );
+//          std::cerr << "exampleIndex: " << exampleIndex << std::endl;
+         
+        Example example;
+        NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
+        example.svec = new SparseVector(xTrain);
+        examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );         
+         
+        exIt->second.erase(exIt->second.begin()+exampleIndex);
+      }
+    }    
+
+    std::cerr << "start training " << std::endl;
+    time_t  prep_start_time = clock();
+
+    GPHIKClassifierNICE * classifierBatch = new GPHIKClassifierNICE( &conf ); //we don't need this one in the first round
+    GPHIKClassifierNICE * classifierIL  = new GPHIKClassifierNICE( &conf );
+    
+    FeaturePool fp; // will be ignored
+    classifierIL->train ( fp, examples );
+
+    float time_preparation = ( float ) ( clock() - prep_start_time ) ;
+    
+    int classesUsed(classesForTraining.size());
+    
+    std::cerr << "training done " << std::endl;
+    
+      // ------------------ TESTING
+    const LabeledSet *test = md["test"];
+    VVector testData;
+    Vector yTest;
+    readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
+    
+
+    NICE::Matrix confusionMatrixBatch ( numberOfClasses, numberOfClasses );
+    NICE::Matrix confusionMatrixIL ( numberOfClasses, numberOfClasses );
+    confusionMatrixBatch.set ( 0.0 );
+    confusionMatrixIL.set ( 0.0 );
+
+    time_t  start_time = clock();
+
+    std::vector<int> chosen_examples_per_class ( numberOfClasses );
+
+    if ( do_classification )
+    {
+      for ( uint i = 0 ; i < testData.size(); i++ )
+      {
+        Example example;
+        const Vector & xstar = testData[i];
+        SparseVector xstar_sparse ( xstar );
+        OBJREC::ClassificationResult result;
+        example.svec = &xstar_sparse;
+        
+        result = classifierIL->classify( example );
+        cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
+        
+        result.classno_groundtruth = yTest[i];
+        confusionMatrixIL ( result.classno_groundtruth , result.classno ) ++;
+      }
+
+
+      float time_classification = ( float ) ( clock() - start_time ) ;
+      if ( verbose >= LOW )
+        cerr << "Time for Classification with " << classesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
+
+      confusionMatrixIL.normalizeRowsL1();
+      double avg_recognition_rate = 0.0;
+      for ( int i = 0 ; i < ( int ) confusionMatrixIL.rows(); i++ )
+      {
+        if ( verbose >= MEDIUM )
+        {
+          cerr << "Class no: " <<  i  << " : " << confusionMatrixIL ( i, i ) << endl;
+        }
+        avg_recognition_rate += confusionMatrixIL ( i, i );
+      }
+
+      avg_recognition_rate /= confusionMatrixIL.rows();
+
+      std::cerr << confusionMatrixIL << std::endl;
+      std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " %" << std::endl;
+
+      recognitionsRatesBatch[0].push_back ( avg_recognition_rate*100 );
+      recognitionsRatesIL[0].push_back ( avg_recognition_rate*100 );
+    }
+
+    //Now start the Incremental-Learning-Part
+    
+    for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
+    {
+      uint oldSize = examples.size();
+      //chose examples for every class used for training
+      int cnt(0);
+      Examples newExamples;
+      for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
+      {
+        std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+        
+        for (int i = 0; i < incrementalAddSize; i++)
+        {
+          std::cerr << "i: " << cnt << std::endl;
+          Example example;    
+          
+          int exampleIndex ( rand() % ( exIt->second.size() ) );
+          NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex] ];
+          example.svec = new SparseVector(xTrain);
+          examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );           
+          newExamples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
+          exIt->second.erase(exIt->second.begin()+exampleIndex);
+          cnt++;
+        }
+      }
+      
+      std::cerr << "Incremental, but not batch" << std::endl;
+      time_t  IL_add_start_time = clock();
+//       for ( uint i = oldSize ; i < examples.size() ; i++ )
+//       {
+//         Example & example = examples[i].second;
+//         int classno = examples[i].first;
+//   
+//         //skip the optimization for the first k examples
+//         classifierIL->addExample( example, (double) classno, true );
+//       }
+//       for ( uint i = examples.size()-1 ; i < examples.size() ; i++ )
+//       {
+//         Example & example = examples[i].second;
+//         int classno = examples[i].first;
+//         //perform the optimization
+//         classifierIL->addExample( example, (double) classno, true );
+//       }
+      classifierIL->addMultipleExamples( newExamples );
+      float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
+      std::cerr << "Time for IL-adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " <<  classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << "  training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
+      trainingTimesIL[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);        
+    
+    
+      std::cerr <<  "start batch retraining" << std::endl;
+      time_t  batch_add_start_time = clock();        
+      //
+      if (classifierBatch != NULL)
+        delete classifierBatch;
+      classifierBatch = new GPHIKClassifierNICE( &conf );
+      classifierBatch->train( fp, examples );
+      //
+      float time_batch_add = ( float ) ( clock() - batch_add_start_time ) ;
+      std::cerr << "Time for batch relearning after adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " <<  classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << "  training-examples: " << time_batch_add / CLOCKS_PER_SEC << " [s]" << std::endl;
+      trainingTimesBatch[incrementationStep].push_back(time_batch_add / CLOCKS_PER_SEC);        
+           
+      //do the classification for evaluating the benefit of new examples
+      if ( do_classification )
+      {
+        std::cerr << "do classification" << std::endl;
+        for ( uint i = 0 ; i < testData.size(); i++ )
+        {
+          Example example;
+          const Vector & xstar = testData[i];
+          SparseVector xstar_sparse ( xstar );
+          example.svec = &xstar_sparse;
+          OBJREC::ClassificationResult resultBatch;
+          OBJREC::ClassificationResult resultIL;
+          
+          resultBatch = classifierBatch->classify( example );
+          resultIL = classifierIL->classify( example );
+          
+          std::cerr << "Batch: [" << i << " / " << testData.size() << "] " << resultBatch.classno << " " << yTest[i] << std::endl; 
+          std::cerr << "IL:    [" << i << " / " << testData.size() << "] " << resultIL.classno << " " << yTest[i] << std::endl; 
+          
+          resultBatch.classno_groundtruth = yTest[i];
+          resultIL.classno_groundtruth = yTest[i];
+          
+          confusionMatrixBatch ( resultBatch.classno_groundtruth , resultBatch.classno ) ++;
+          confusionMatrixIL ( resultIL.classno_groundtruth , resultIL.classno ) ++;          
+        }     
+
+
+        float time_classification = ( float ) ( clock() - start_time ) ;
+        if ( verbose >= LOW )
+          std::cerr << "Time for Classification with " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
+
+        confusionMatrixBatch.normalizeRowsL1();
+        confusionMatrixIL.normalizeRowsL1();
+        
+        double ARRBatch = 0.0;
+        double ARRIL = 0.0;
+        for ( int i = 0 ; i < ( int ) confusionMatrixBatch.rows(); i++ )
+        {
+          if ( verbose >= MEDIUM )
+          {
+            std::cerr << "Batch Class no: " <<  i  << " : " << confusionMatrixBatch ( i, i ) << std::endl;
+            std::cerr << "IL    Class no: " <<  i  << " : " << confusionMatrixIL ( i, i ) << std::endl;
+          }
+          ARRBatch += confusionMatrixBatch ( i, i );
+          ARRIL += confusionMatrixIL ( i, i );
+        }
+
+        ARRBatch /= confusionMatrixBatch.rows();
+        ARRIL /= confusionMatrixIL.rows();
+
+        std::cerr << "Batch matrix and results: " << std::endl;
+        std::cerr << confusionMatrixBatch << std::endl;
+        std::cerr << "ARRBatch " << ARRBatch*100 << " %" << std::endl;
+        
+        std::cerr << "IL matrix and results: " << std::endl;
+        std::cerr << confusionMatrixIL << std::endl;
+        std::cerr << "ARRIL " << ARRIL*100 << " %" << std::endl;        
+
+        recognitionsRatesBatch[incrementationStep+1].push_back ( ARRBatch*100 );
+        recognitionsRatesIL[incrementationStep+1].push_back ( ARRIL*100 );
+      } //classification after IL adding
+    } //IL adding of different classes
+  }//runs 
+
+
+  int classesUsed(classesForTraining.size());
+  std::cerr << "classes used: " << classesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
+
+  std::cerr << "========================" << std::endl;
+  std::cerr << "content of trainingTimesIL: " << std::endl;
+  for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesIL.begin(); it != trainingTimesIL.end(); it++ )
+  {
+    for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+    {
+      std::cerr << *jt << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  std::vector<float> trainingTimesILMean;
+  std::vector<float> trainingTimesILStdDev;
+  for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesIL.begin(); it != trainingTimesIL.end(); it++ )
+  {  
+    float trainingTimeILMean ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      trainingTimeILMean += *itRun;
+    }
+    trainingTimeILMean /= it->size();
+    trainingTimesILMean.push_back ( trainingTimeILMean );
+
+    double trainingTimeILStdDev ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      trainingTimeILStdDev += pow ( *itRun - trainingTimeILMean, 2 );
+    }
+    trainingTimeILStdDev /= it->size();
+    trainingTimeILStdDev = sqrt ( trainingTimeILStdDev );
+    trainingTimesILStdDev.push_back ( trainingTimeILStdDev );
+  }
+
+  int datasize ( classesUsed*trainExPerClass );
+  for ( uint i = 0; i < trainingTimesILMean.size(); i++)
+  {
+    cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " trainingTimesILMean: " << trainingTimesILMean[i] << " trainingTimesILStdDev: " << trainingTimesILStdDev[i] << endl;
+    datasize += classesUsed*incrementalAddSize ;
+  }
+  
+  std::cerr << "========================" << std::endl;
+  std::cerr << "content of trainingTimesBatch: " << std::endl;
+  for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesBatch.begin(); it != trainingTimesBatch.end(); it++ )
+  {
+    for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+    {
+      std::cerr << *jt << " ";
+    }
+    std::cerr << std::endl;
+  }
+
+  std::vector<float> trainingTimesBatchMean;
+  std::vector<float> trainingTimesBatchStdDev;
+  for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesBatch.begin(); it != trainingTimesBatch.end(); it++ )
+  {  
+    float trainingTimeBatchMean ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      trainingTimeBatchMean += *itRun;
+    }
+    trainingTimeBatchMean /= it->size();
+    trainingTimesBatchMean.push_back ( trainingTimeBatchMean );
+
+    double trainingTimeBatchStdDev ( 0.0 );
+    for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+    {
+      trainingTimeBatchStdDev += pow ( *itRun - trainingTimeBatchMean, 2 );
+    }
+    trainingTimeBatchStdDev /= it->size();
+    trainingTimeBatchStdDev = sqrt ( trainingTimeBatchStdDev );
+    trainingTimesBatchStdDev.push_back ( trainingTimeBatchStdDev );
+  }
+
+  datasize = classesUsed*trainExPerClass;
+  for ( uint i = 0; i < trainingTimesBatchMean.size(); i++)
+  {
+    cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " trainingTimesBatchMean: " << trainingTimesBatchMean[i] << " trainingTimesBatchStdDev: " << trainingTimesBatchStdDev[i] << endl;
+    datasize += classesUsed*incrementalAddSize ;
+  }  
+
+  if ( do_classification )
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of recognitionsRatesIL: " << std::endl;
+    for ( std::vector<std::vector<double> >::const_iterator it = recognitionsRatesIL.begin(); it != recognitionsRatesIL.end(); it++ )
+    {
+      for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::cerr << "calculating final IL results " << std::endl;
+    std::vector<double> recRatesILMean;
+    std::vector<double> recRatesILStdDev;
+    for (std::vector<std::vector<double> >::const_iterator it = recognitionsRatesIL.begin(); it != recognitionsRatesIL.end(); it++ )
+    {
+      double recRateILMean ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        recRateILMean += *itRun;
+      }
+      recRateILMean /= it->size();
+      recRatesILMean.push_back ( recRateILMean );
+
+      double recRateILStdDev ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        recRateILStdDev += pow ( *itRun - recRateILMean, 2 );
+      }
+      recRateILStdDev /= it->size();
+      recRateILStdDev = sqrt ( recRateILStdDev );
+      recRatesILStdDev.push_back ( recRateILStdDev );
+    }
+
+    int datasize ( classesUsed*trainExPerClass);
+    for ( uint i = 0; i < recRatesILMean.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " recRatesILMean: " << recRatesILMean[i] << " recRatesILStdDev: " << recRatesILStdDev[i] << std::endl;
+      datasize += classesUsed*incrementalAddSize ;
+    }
+    
+    std::cerr << "========================" << std::endl;
+    std::cerr << "content of recognitionsRatesBatch: " << std::endl;
+    for ( std::vector<std::vector<double> >::const_iterator it = recognitionsRatesBatch.begin(); it != recognitionsRatesBatch.end(); it++ )
+    {
+      for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::cerr << "calculating final batch results " << std::endl;
+    std::vector<double> recRatesBatchMean;
+    std::vector<double> recRatesBatchStdDev;
+    for (std::vector<std::vector<double> >::const_iterator it = recognitionsRatesBatch.begin(); it != recognitionsRatesBatch.end(); it++ )
+    {
+      double recRateBatchMean ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        recRateBatchMean += *itRun;
+      }
+      recRateBatchMean /= it->size();
+      recRatesBatchMean.push_back ( recRateBatchMean );
+
+      double recRateBatchStdDev ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        recRateBatchStdDev += pow ( *itRun - recRateBatchMean, 2 );
+      }
+      recRateBatchStdDev /= it->size();
+      recRateBatchStdDev = sqrt ( recRateBatchStdDev );
+      recRatesBatchStdDev.push_back ( recRateBatchStdDev );
+    }
+
+    datasize = classesUsed*trainExPerClass;
+    for ( uint i = 0; i < recRatesBatchMean.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " recRatesBatchMean: " << recRatesBatchMean[i] << " recRatesBatchStdDev: " << recRatesBatchStdDev[i] << std::endl;
+      datasize += classesUsed*incrementalAddSize ;
+    }    
+  }
+  else
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "No classification done therefor no classification times available." << std::endl;
+  }
+
+  return 0;
+}

+ 88 - 0
progs/Makefile.inc

@@ -0,0 +1,88 @@
+# BINARY-DIRECTORY-MAKEFILE
+# conventions:
+# - there are no subdirectories, they are ignored!
+# - all ".C", ".cpp" and ".c" files in the current directory are considered
+#   independent binaries, and linked as such.
+# - the binaries depend on the library of the parent directory
+# - the binary names are created with $(BINNAME), i.e. it will be more or less
+#   the name of the .o file
+# - all binaries will be added to the default build list ALL_BINARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+#SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+#include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# include the libdepend.inc file, which gives additional dependencies for the
+# libraries and binaries. additionally, an automatic dependency from the library
+# of the parent directory is added (commented out in the code below).
+
+-include $(SUBDIR)libdepend.inc
+
+PARENTDIR:=$(patsubst %/,%,$(dir $(patsubst %/,%,$(SUBDIR))))
+$(eval $(call PKG_DEPEND_INT,$(PARENTDIR)))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+      $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+BINARIES:=$(patsubst %.o,$(BINDIR)%,$(filter-out moc_%,$(notdir $(OBJS))))
+ALL_BINARIES+=$(BINARIES)
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. each binary depends on the corresponding .o file and
+# on the libraries specified by the INTLIBS/EXTLIBS. these dependencies can be
+# specified manually or they are automatically stored in a .bd file.
+
+$(foreach head,$(wildcard $(SUBDIR)*.h),$(eval $(shell grep -q Q_OBJECT $(head) && echo $(head) | sed -e's@^@/@;s@.*/\(.*\)\.h$$@$(BINDIR)\1:$(OBJDIR)moc_\1.o@')))
+-include $(OBJS:%.o=%.bd)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 921 - 0
progs/activeLearningCheckerBoard.cpp

@@ -0,0 +1,921 @@
+/**
+* @file ActiveLearningCheckerBoard.cpp
+* @brief Incrementally train the GP HIK classifier using the predictive variance and its approximations to select new samples, perform binary tests. We do not use the fast-hik implementations but perform the computations manually
+* @author Alexander Freytag
+* @date 11-06-2012
+*/
+#include <vector>
+#include <iostream>
+#include <stdlib.h>
+#include <time.h>
+#include <set>
+
+
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+#include <core/basics/Timer.h>
+
+#include <core/image/ImageT.h>
+#include <core/image/ColorImageT.h>
+#include <core/image/CircleT.h>
+#include <core/image/LineT.h>
+// QT Interface for image display
+// We only use the simple function showImage in this example, but there is far more
+// to explore in this area.
+#include <core/imagedisplay/ImageDisplay.h>
+
+#include "core/algebra/CholeskyRobust.h"
+
+#include "core/vector/Algorithms.h"
+#include <core/vector/SparseVectorT.h>
+#include <core/vector/VectorT.h>
+
+//----------
+
+#include "vislearning/baselib/ProgressBar.h"
+#include <vislearning/baselib/Globals.h>
+
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+#include "vislearning/cbaselib/MultiDataset.h"
+#include <vislearning/cbaselib/LabeledSet.h>
+#include "vislearning/cbaselib/ClassificationResults.h"
+
+
+#include <vislearning/math/kernels/KernelData.h>
+
+//----------
+
+#include "gp-hik-exp/progs/datatools.h"
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+//----------
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+enum QueryStrategy{
+      RANDOM = 0,
+      GPMEAN,
+      GPPREDVAR,
+      GPHEURISTIC,
+      GPHEURISTICPLUS,
+      GPBALANCE
+    }; 
+    
+std::string convertInt(int number)
+{
+   stringstream ss;//create a stringstream
+   ss << number;//add number to the stream
+   return ss.str();//return a string with the contents of the stream
+}
+
+void sampleFromCheckerboard( const int & nrOfSectorsProDim, const int & sizeOfSector, const int & examplesPerSector, std::vector<NICE::Vector> & examples, NICE::Vector & labels)
+{
+  int nrOfSectorsTotal ( nrOfSectorsProDim * nrOfSectorsProDim );
+  //set the labels
+  labels.resize( nrOfSectorsTotal * examplesPerSector );
+  for ( int ex = 0; ex < examplesPerSector; ex++)
+  {
+    for ( int i = 0; i < nrOfSectorsProDim; i++)
+    {
+      for ( int j = 0; j < nrOfSectorsProDim; j++)
+      {      
+        labels[ (i*nrOfSectorsProDim+j)*examplesPerSector + ex ] = ( i + j ) % 2;
+      }
+    }
+  }
+
+  for ( int i = 0; i < nrOfSectorsProDim; i++)
+  {
+    for ( int j = 0; j < nrOfSectorsProDim; j++)
+    {  
+      for ( int ex = 0; ex < examplesPerSector; ex++)
+      {
+        NICE::Vector example( 3 );
+        double xi ( rand() % sizeOfSector  + i * sizeOfSector ) ;
+        double yi ( rand() % sizeOfSector  + j * sizeOfSector );
+        //compute normalized histograms
+        example[0] = xi / (nrOfSectorsTotal*sizeOfSector);
+        example[1] = yi / (nrOfSectorsTotal*sizeOfSector);
+        example[2] = 1.0 - example[0] - example[1];
+        examples.push_back( example );
+      }
+    }
+  }
+}
+
+void paintImageBorders( NICE::ColorImage & img, const int & nrOfSectorsProDim, const int & sizeOfSector )
+{
+  std::cerr << "img.width(): " << img.width() << " img.height(): " << img.height() << std::endl;
+  std::cerr << "nrOfSectorsProDim*sizeOfSector-1: " << nrOfSectorsProDim*sizeOfSector-1 << std::endl;
+  
+  NICE::Line l1 ( NICE::Coord( 0, 0 ) , NICE::Coord ( 0, nrOfSectorsProDim*sizeOfSector-1) );
+  NICE::Line l2 ( NICE::Coord( 0, nrOfSectorsProDim*sizeOfSector-1 ) , NICE::Coord ( nrOfSectorsProDim*sizeOfSector-1, nrOfSectorsProDim*sizeOfSector-1) );
+  NICE::Line l3 ( NICE::Coord( nrOfSectorsProDim*sizeOfSector-1, nrOfSectorsProDim*sizeOfSector-1 ) , NICE::Coord ( nrOfSectorsProDim*sizeOfSector-1, 0) );
+  NICE::Line l4 ( NICE::Coord( nrOfSectorsProDim*sizeOfSector-1, 0 ) , NICE::Coord ( 0, 0 ) );
+  
+  l1.draw( img, Color ( 0, 0, 0 ) );
+  l2.draw( img, Color ( 0, 0, 0 ) ); 
+  l3.draw( img, Color ( 0, 0, 0 ) );
+  l4.draw( img, Color ( 0, 0, 0 ) );   
+}
+
+void paintSectorsInImage( NICE::ColorImage & img, const int & nrOfSectorsProDim, const int & sizeOfSector )
+{
+  for ( int i = 1; i < nrOfSectorsProDim; i++ )
+  {
+    NICE::Line lHor ( NICE::Coord( 0, i*sizeOfSector ) , NICE::Coord ( nrOfSectorsProDim*sizeOfSector, i*sizeOfSector) );
+    NICE::Line lVer ( NICE::Coord( i*sizeOfSector, 0 ) , NICE::Coord ( i*sizeOfSector, nrOfSectorsProDim*sizeOfSector) );
+    lHor.draw( img, Color ( 0, 0, 0 ) );
+    lVer.draw( img, Color ( 0, 0, 0 ) );     
+  }
+}
+
+void paintLabeledExamples( NICE::ColorImage & img, const NICE::Vector & y, const Examples & examples, const int & nrOfSectorsProDim, const int & sizeOfSector, const int & diameter )
+{
+  int nrOfSectorsTotal ( nrOfSectorsProDim * nrOfSectorsProDim );
+  for ( uint lE = 0; lE < examples.size(); lE++)
+  {
+//     if ( y[lE] != 1)
+//     {
+//       NICE::Circle circ   (  NICE::Coord( (int) ( (* examples[lE].second.svec) [0] *nrOfSectorsTotal *sizeOfSector) ,
+//                                                 (int) (( (* examples[lE].second.svec) [1]) * nrOfSectorsTotal *sizeOfSector) ), diameter );
+//       circ.draw ( img, Color ( 255, 0, 0 ) );
+//     }
+//     else
+//     {      
+//       NICE::Circle circ   (  NICE::Coord( (int) ( (* examples[lE].second.svec) [0] * nrOfSectorsTotal *sizeOfSector) ,
+//                                                 (int) ( (* examples[lE].second.svec) [1] * nrOfSectorsTotal *sizeOfSector) ), diameter );
+//       circ.draw ( img, Color ( 0, 0, 255 ) );      
+//     }
+    int thickness (2);
+    for ( int i = 0; i < thickness; i++)
+    {
+      NICE::Circle circ   (  NICE::Coord( (int) ( (* examples[lE].second.svec) [0] * nrOfSectorsTotal *sizeOfSector) ,
+                                  (int) ( (* examples[lE].second.svec) [1] * nrOfSectorsTotal *sizeOfSector) ), diameter-i );
+      circ.draw ( img, Color ( 0, 0, 0 ) );   //old: ( 0, 255, 0 )
+    }
+  }  
+}
+
+void paintUnlabeledExamples( NICE::ColorImage & img, const vector< NICE::Vector > & trainDataOrig, const NICE::Vector & y, const std::vector<int> & unlabeledExamples, const int & nrOfSectorsProDim, const int & sizeOfSector, const int & diameter )
+{
+  int nrOfSectorsTotal ( nrOfSectorsProDim * nrOfSectorsProDim );
+  for ( uint uE = 0; uE < unlabeledExamples.size(); uE++)
+  {
+    if ( y[ unlabeledExamples[uE] ] == 0)
+    {    
+      NICE::Circle circ   (  NICE::Coord( (int) (trainDataOrig[ unlabeledExamples[uE] ] [0] * nrOfSectorsTotal *sizeOfSector),
+                                                (int) (trainDataOrig[ unlabeledExamples[uE] ] [1] * nrOfSectorsTotal *sizeOfSector) ) , diameter );
+      circ.draw ( img, Color ( 255, 0, 0 ) );
+    }
+    else
+    {
+      NICE::Circle circ   (  NICE::Coord( (int) (trainDataOrig[ unlabeledExamples[uE] ] [0] * nrOfSectorsTotal *sizeOfSector) , 
+                            (int) (trainDataOrig[ unlabeledExamples[uE] ] [1] * nrOfSectorsTotal *sizeOfSector) ) , diameter );
+      circ.draw ( img, Color ( 0, 0, 255 ) );      
+    }
+  }  
+}
+
+void paintClassificationResult( NICE::ColorImage & img, const NICE::Vector& xstar, const int & diameter, const ClassificationResult & result, const int & nrOfSectorsProDim, const int & sizeOfSector )
+{
+  int nrOfSectorsTotal ( nrOfSectorsProDim * nrOfSectorsProDim );
+  NICE::Circle circ   (  NICE::Coord( (int) ( xstar[0] * nrOfSectorsTotal *sizeOfSector) ,
+                                                    (int) ( xstar[1] * nrOfSectorsTotal *sizeOfSector) ), diameter ); 
+  if (result.classno == 1) // classified as negative
+  {
+    circ.draw ( img, Color ( 0, 0, 255 ) );
+  }
+  else // classified as positive
+  {
+    circ.draw ( img, Color ( 255, 0, 0 ) );
+  }
+}
+
+void paintQueriedExamples( NICE::ColorImage & img, const NICE::Vector& xstar, const int & diameter, const int & nrOfSectorsProDim, const int & sizeOfSector )
+{
+  int nrOfSectorsTotal ( nrOfSectorsProDim * nrOfSectorsProDim );
+  int thickness (2);
+  for ( int i = 0; i < thickness; i++)
+  {
+    NICE::Circle circ   (  NICE::Coord( (int) ( xstar[0] * nrOfSectorsTotal *sizeOfSector) ,
+                                            (int) ( xstar[1] * nrOfSectorsTotal *sizeOfSector) ), diameter-i );
+    circ.draw ( img, Color ( 0, 0, 0 ) );   //old: ( 0, 255, 0 )
+  }
+}
+
+/**
+    Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
+*/
+int main ( int argc, char **argv )
+{
+  std::cout.precision ( 10 );
+  std::cerr.precision ( 10 );
+
+  NICE::Config conf ( argc, argv );
+  int trainExPerClass = conf.gI ( "main", "trainExPerClass", 10 );
+  int incrementalAddSize = conf.gI("main", "incrementalAddSize", 1);
+  int nrOfIncrements = conf.gI("main", "nrOfIncrements", 9);
+  
+  int num_runs = conf.gI ( "main", "num_runs", 10 ); 
+  bool do_classification = conf.gB ( "main", "do_classification", true );
+  
+  double noise = conf.gD("GPHIKClassifier", "noise", 0.01);
+  double squaredNoise = pow( noise, 2);
+  
+  int sizeOfSector = conf.gI( "main", "sizeOfSector", 250 );
+  int nrOfSectorsProDim = conf.gI( "main", "nrOfSectorsProDim", 2 );
+  int examplesPerSector = conf.gI( "main", "examplesPerSector", 5 );
+  int examplesPerSectorTest = conf.gI( "main", "examplesPerSectorTest", 50 );
+  
+  bool visualizationOfResults = conf.gB( "main", "visualizationOfResults", true );
+  bool paintSectorBorders = conf.gB( "main", "paintSectorBorders" , true );
+  bool saveImages = conf.gB( "main", "saveImages", false );
+  std::string destinationForImages = conf.gS( "main", "destinationForImages", "" );
+  
+  string queryStrategyString = conf.gS( "main", "queryStrategy", "random");
+  QueryStrategy queryStrategy;
+  if (queryStrategyString.compare("gpMean") == 0)
+  {
+    queryStrategy = GPMEAN;
+  }
+  else if (queryStrategyString.compare("gpPredVar") == 0)
+  {
+    queryStrategy = GPPREDVAR;
+  }
+  else if (queryStrategyString.compare("gpHeuristic") == 0)
+  {
+    queryStrategy = GPHEURISTIC;
+  }
+  else if (queryStrategyString.compare("gpHeuristicPlus") == 0)
+  {
+    queryStrategy = GPHEURISTICPLUS;
+  }  
+  else if (queryStrategyString.compare("gpBalance") == 0)
+  {
+    queryStrategy = GPBALANCE;
+  }    
+  else
+  {
+    queryStrategy = RANDOM;
+  }
+ 
+  
+  bool verbose = conf.gB ( "main", "verbose", false );
+
+  /* initialize random seed: */
+//   srand ( time ( NULL ) ); //with 0 for reproductive results
+//    srand ( 0 ); //with 0 for reproductive results
+  
+  // ===========================  INIT =========================== 
+  
+  std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
+  std::vector<std::vector<double> > AUC_scores(nrOfIncrements+1);
+  std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
+  std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
+  
+  for ( int run = 0; run < num_runs; run++ )
+  {
+    std::cerr << "run: " << run << std::endl;    
+    srand ( run * 100000 ); //with 0 for reproductive results
+    
+    // read training set
+    vector< NICE::Vector > trainDataOrig;
+    Vector y;
+    sampleFromCheckerboard( nrOfSectorsProDim, sizeOfSector, examplesPerSector, trainDataOrig, y );  
+    
+    // ------------------ TESTING
+    std::vector<NICE::Vector> testData;
+    Vector yTest;
+    sampleFromCheckerboard( nrOfSectorsProDim, sizeOfSector, examplesPerSectorTest, testData, yTest );     
+
+    if ( verbose )
+    {
+      for (uint i = 0; i < trainDataOrig.size(); i++ )
+      {
+        std::cerr << i << " : " << trainDataOrig[i] << std::endl;
+      }    
+            
+      std::cerr << "resulting binary label vector:" << y << std::endl;
+    }
+    
+    std::set<int> classesAvailable;
+    classesAvailable.insert( 0 ); //we have a single negative class
+    classesAvailable.insert( 1 ); //and we have a single positive class
+    
+    std::map<int,int> nrExamplesPerClassInDataset; //simply count how many examples for every class are available
+    std::map<int,std::vector<int> > examplesPerClassInDataset;  //as well as their corresponding indices in the dataset
+    
+    //initialize this storage
+    for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
+    {
+      nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
+      examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
+    }
+
+    //store the indices of the examples
+    for ( uint i = 0; i < y.size(); i++ )
+    {
+      (examplesPerClassInDataset.find( y[i] )->second).push_back(i);
+    }
+    
+    //and count how many examples are in every class
+    for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
+    {
+      nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
+    }
+    
+    //simple output to tell how many examples we have for every class
+    for ( std::map<int,int>::const_iterator it =  nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
+    {
+      cerr << it->first << ": " << it->second << endl;
+    }    
+      
+    Examples examples;   
+    
+    //count how many examples of every class we have while actively selecting new examples
+    //NOTE works only if we have subsequent class numbers
+    NICE::Vector pickedExamplesPerClass( classesAvailable.size(), trainExPerClass);
+    
+    std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
+    
+    //chose examples for every class used for training
+    //we will always use the first examples from each class, since the dataset comes already randomly ordered
+    for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++)
+    {
+      std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+      if ( verbose )
+        std::cerr << "pick training examples for class " << *clIt << std::endl;
+      
+      for (int i = 0; i < trainExPerClass; i++)
+      {
+        if ( verbose )        
+          std::cerr << "i: " << i << std::endl;
+        int exampleIndex ( rand() % ( exIt->second.size() ) );
+        if ( verbose )        
+          std::cerr << "pick example " << exIt->second[exampleIndex] << " - " << y[exIt->second[exampleIndex] ]  << std::endl;
+        
+        Example example;
+        NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
+        example.svec = new SparseVector(xTrain);
+        //let's take this example and its corresponding label (which should be *clIt)
+        examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) ); 
+        //
+        exIt->second.erase(exIt->second.begin()+exampleIndex);
+      }
+    }    
+    
+    for (uint i = 0; i < examples.size(); i++ )
+    {
+      std::cerr << i << " : ";
+      examples[i].second.svec->store(std::cerr);
+    }
+    
+    
+    //which examples are left to be actively chosen lateron?
+    std::vector<int> unlabeledExamples( y.size() - trainExPerClass*classesAvailable.size() );
+    int exCnt( 0 );
+    for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++ )
+    {
+      std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
+      //list all examples of this specific class
+      for (std::vector<int>::const_iterator it = exIt->second.begin(); it != exIt->second.end(); it++)
+      {
+        unlabeledExamples[exCnt] = *it;
+        exCnt++;     
+      }
+    }
+    
+    //Fast-HIK
+    GPHIKClassifierNICE * classifier  = new GPHIKClassifierNICE( &conf );
+      
+    time_t  prep_start_time = clock();
+    FeaturePool fp; // will be ignored
+    classifier->train ( fp, examples );
+    float time_preparation = ( float ) ( clock() - prep_start_time ) ;
+    std::cerr << "Time for training: " << time_preparation / CLOCKS_PER_SEC << std::endl;      
+    
+    //this is only needed for the visualization
+    NICE::Vector yBinGP ( examples.size(), -1 );   
+
+    for ( uint i = 0; i < examples.size(); i++ )
+    {
+      if ( examples[i].first == 1)
+        yBinGP[i] = 1;
+    }  
+    std::cerr << "yBinGP: " << yBinGP << std::endl;
+    
+    int nrOfClassesUsed = classesAvailable.size();
+
+    if ( visualizationOfResults )
+    {
+      NICE::ColorImage img ( nrOfSectorsProDim*sizeOfSector, nrOfSectorsProDim*sizeOfSector );
+      img.set( 255, 255, 255 );
+      
+      if ( paintSectorBorders )
+        paintSectorsInImage( img, nrOfSectorsProDim, sizeOfSector );
+      paintImageBorders( img, nrOfSectorsProDim, sizeOfSector );
+      //paint the example that we can query
+      paintLabeledExamples( img, yBinGP, examples, nrOfSectorsProDim, sizeOfSector, 10 );     
+      //and those that we already know
+      paintUnlabeledExamples( img, trainDataOrig, y,  unlabeledExamples, nrOfSectorsProDim, sizeOfSector, 2 );
+      
+      if ( saveImages )
+      {
+        img.writePPM ( destinationForImages + "imgAL_run"+convertInt(run)+"_000_initialBoard.ppm" );
+      }
+      else
+        showImage(img, "Initial Checkerboard");
+    }
+    
+/*    // ------------------ TESTING
+    std::vector<NICE::Vector> testData;
+    Vector yTest;
+    sampleFromCheckerboard( nrOfSectorsProDim, sizeOfSector, examplesPerSectorTest, testData, yTest ); */   
+        
+    NICE::Matrix confusionMatrix ( 2, 2 );
+    confusionMatrix.set ( 0.0 );      
+    
+    time_t  start_time = clock();
+
+    std::vector<int> chosen_examples_per_class ( nrOfClassesUsed );
+    
+    std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+
+    if ( do_classification  )
+    {
+      NICE::ColorImage imgTest;
+      if ( visualizationOfResults )
+      {
+        imgTest.resize ( nrOfSectorsProDim*sizeOfSector, nrOfSectorsProDim*sizeOfSector );
+        imgTest.set( 255, 255, 255 );
+        
+        if ( paintSectorBorders )
+          paintSectorsInImage( imgTest, nrOfSectorsProDim, sizeOfSector );  
+        paintImageBorders( imgTest, nrOfSectorsProDim, sizeOfSector );  
+        //again paint our labeled training images used so far
+        paintLabeledExamples( imgTest, yBinGP, examples, nrOfSectorsProDim, sizeOfSector, 10 ); 
+      }
+      
+      ClassificationResults results;
+      ClassificationResult result;
+      for ( uint i = 0 ; i < testData.size(); i++ )
+      {
+        const Vector & xstar = testData[i];
+        SparseVector xstar_sparse ( xstar );
+        Example example;
+        example.svec = &xstar_sparse;        
+        result = classifier->classify( example );
+        
+        if ( visualizationOfResults )
+          paintClassificationResult( imgTest, xstar, 2, result, nrOfSectorsProDim, sizeOfSector );
+        
+        result.classno_groundtruth = ( yTest[i] == 1 ) ? 1 : 0;
+        confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
+        results.push_back( result );        
+      }
+      
+      if ( visualizationOfResults )
+      {
+        if ( saveImages )
+        {
+          imgTest.writePPM ( destinationForImages + "imgAL_run"+convertInt(run)+"_incStep_"+convertInt(0)+"ClassifResult.ppm" );
+        }       
+        else
+          showImage(imgTest, "Classification Result");
+      }
+
+      float time_classification = ( float ) ( clock() - start_time ) ;
+      if ( verbose )
+        cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
+      ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
+      
+      confusionMatrix.normalizeRowsL1();
+      std::cerr << confusionMatrix;
+      double avg_recognition_rate = 0.0;
+      for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+      {
+        avg_recognition_rate += confusionMatrix ( i, i );
+      }        
+      avg_recognition_rate /= confusionMatrix.rows();
+      std::cerr << " run: " << run << " avg recognition rate: " <<  avg_recognition_rate*100 << " % -- " << examples.size() << " training examples used" << std::endl;
+
+      recognitions_rates[0].push_back ( avg_recognition_rate*100 );        
+      std::cerr << "number of classified examples: " << results.size() << std::endl;
+
+      std::cerr << "perform auc evaluation "<< std::endl;
+      double aucScore = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+      
+      std::cerr << " run: " << run << " AUC-score: " <<  aucScore << " % -- " << examples.size() << " training examples used" << std::endl << std::endl;
+
+      AUC_scores[0].push_back ( aucScore*100 );
+    }
+
+    //Now start the Incremental-Learning-Part
+    
+    for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
+    {
+      //simply count how many possible example we have 
+      int nrOfPossibleExamples(  unlabeledExamples.size() );
+      
+      //chose examples for every class used for training
+      Examples newExamples;      
+      
+      NICE::ColorImage imgAL;
+      
+      if ( visualizationOfResults )
+      {     
+        imgAL.resize ( nrOfSectorsProDim*sizeOfSector, nrOfSectorsProDim*sizeOfSector );
+        imgAL.set( 255, 255, 255 );
+  
+        if ( paintSectorBorders )
+          paintSectorsInImage( imgAL, nrOfSectorsProDim, sizeOfSector ); 
+        paintImageBorders( imgAL, nrOfSectorsProDim, sizeOfSector );
+        //again paint our labeled training images used so far
+        paintLabeledExamples( imgAL, yBinGP, examples, nrOfSectorsProDim, sizeOfSector, 10 );
+        //and paint the unlabeled examples that are available to query
+        paintUnlabeledExamples( imgAL, trainDataOrig, y,  unlabeledExamples, nrOfSectorsProDim, sizeOfSector, 2 );
+      }
+            
+      if (queryStrategy == RANDOM)
+      {
+        if ( verbose ) 
+          std::cerr << "print chosen examples: " << std::endl;           
+        for (int i = 0; i < incrementalAddSize; i++)
+        {        
+          int exampleIndex ( rand() % ( unlabeledExamples.size() ) );
+          
+          Example newExample;
+          NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exampleIndex] ];
+          newExample.svec = new SparseVector( xTrain ); 
+          int label( y[ unlabeledExamples[exampleIndex] ] );
+          //store this example for the visualization
+          examples.push_back ( pair<int, Example> ( label, newExample ) );
+          //and store it to add it to the classifier
+          newExamples.push_back ( pair<int, Example> ( label, newExample ) );
+          unlabeledExamples.erase( unlabeledExamples.begin()+exampleIndex );
+          if ( verbose ) 
+            std::cerr << exampleIndex+1 << " / " << incrementalAddSize << std::endl;          
+           
+          pickedExamplesPerClass[label]++;
+          yBinGP.append(label);
+          
+          if ( visualizationOfResults )
+            paintQueriedExamples( imgAL, xTrain, 10, nrOfSectorsProDim, sizeOfSector );
+        }
+      }// end computation for RANDOM
+      else if ( (queryStrategy == GPMEAN) || (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTIC) || (queryStrategy == GPHEURISTICPLUS) || GPBALANCE)
+      {
+        //compute uncertainty values for all examples according to the query strategy
+        std::vector<std::pair<int,double> > scores;
+        scores.clear();
+        time_t  unc_pred_start_time = clock();
+        for (uint exIndex = 0; exIndex < unlabeledExamples.size(); exIndex++)
+        {
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exIndex] ];
+            SparseVector xTrainSparse ( xTrain );
+            Example example;
+            example.svec = &xTrainSparse;        
+
+            if (queryStrategy == GPMEAN)
+            {              
+              //compute the resulting score
+              ClassificationResult r = classifier->classify( example );
+              //we only have two classes with "inverse" outputs
+              scores.push_back( std::pair<int,double> ( exIndex, fabs(r.scores[0]) ) );
+            }
+            else if (queryStrategy == GPPREDVAR)
+            {
+              NICE::Vector singleUncertainties;
+              //use the pred variance computation specified in the config file
+              classifier->predictUncertainty( example, singleUncertainties );
+              //take the maximum of the scores for the predictive variance
+              scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties[0]) );
+            }
+            else if (queryStrategy == GPHEURISTIC)
+            {
+              NICE::Vector singleUncertainties;
+              //use the pred variance computation specified in the config file
+              classifier->predictUncertainty( example, singleUncertainties );
+              //compute the mean values for every class
+              ClassificationResult r = classifier->classify( example );
+              //take the minimum of the scores for the heuristic measure
+              scores.push_back( std::pair<int,double> ( exIndex, fabs(r.scores[0]) / sqrt( squaredNoise + singleUncertainties[0] )) );             
+            }
+            else if (queryStrategy == GPHEURISTICPLUS)
+            {
+              NICE::Vector singleUncertainties;
+              //use the pred variance computation specified in the config file
+              classifier->predictUncertainty( example, singleUncertainties );
+              //compute the mean values for every class
+              ClassificationResult r = classifier->classify( example );
+              //take the minimum of the scores for the heuristic measure
+              scores.push_back( std::pair<int,double> ( exIndex, fabs(r.scores[0]) + sqrt( squaredNoise + singleUncertainties[0] )) );             
+            }
+            else if (queryStrategy == GPBALANCE)
+            {
+              NICE::Vector singleUncertainties;
+              //use the pred variance computation specified in the config file
+              classifier->predictUncertainty( example, singleUncertainties );
+              //compute the mean values for every class
+              ClassificationResult r = classifier->classify( example );
+              double scorePositive (fabs (r.scores[0] - 1.0 ));
+              double scoreNegative (fabs (r.scores[0] + 1.0 ));
+              double score = scorePositive < scoreNegative ? scorePositive : scoreNegative;
+              //take the minimum of the scores for the heuristic measure
+              scores.push_back( std::pair<int,double> ( exIndex, score / ( squaredNoise + singleUncertainties[0] )) );             
+            }            
+        }
+        float time_score_computation = ( float ) ( clock() - unc_pred_start_time ) ;
+          
+        //pick the ones with best score
+        //we could speed this up using a more sophisticated search method
+        
+        if ( (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTICPLUS) )//take the maximum of the scores for the predictive variance or the new weight
+        {
+          std::set<int> chosenExamplesForThisRun;
+          chosenExamplesForThisRun.clear();          
+          for (int i = 0; i < incrementalAddSize; i++)
+          {
+            std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+            std::vector<std::pair<int,double> >::iterator worstExample = scores.begin();
+            
+            for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+            {
+              if (jIt->second > bestExample->second)
+                bestExample = jIt;
+              if (jIt->second < worstExample->second)
+                worstExample = jIt;                
+            }
+            if ( verbose ) 
+              std::cerr << "i: " << i << " bestExample: " << bestExample->second << " worstExample: " << worstExample->second << std::endl;
+            
+            Example newExample;    
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ]; 
+            newExample.svec = new SparseVector( xTrain ); 
+            //actually this is the ACTIVE LEARNING step (query a label)
+            int label( y[ unlabeledExamples[bestExample->first] ] );
+            //store this example for the visualization
+            examples.push_back ( pair<int, Example> ( label, newExample ) );    
+            //and store it to add it to the classifier
+            newExamples.push_back ( pair<int, Example> ( label, newExample ) );    
+            //remember the index, to safely remove this example afterwards from unlabeledExamples
+            chosenExamplesForThisRun.insert(bestExample->first);
+            scores.erase(bestExample);
+            pickedExamplesPerClass[label]++;
+            yBinGP.append(label);
+            
+            if ( visualizationOfResults )
+              paintQueriedExamples( imgAL, xTrain, 10, nrOfSectorsProDim, sizeOfSector );
+          }
+          
+          //delete the queried examples from the set of unlabeled ones
+          //do this in an decreasing order in terms of indices to ensure valid access
+          for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+          {
+            unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+          }          
+        }
+        else //take the minimum of the scores for the heuristic, heuristicPlus and the gp mean (minimum margin)
+        {
+          std::set<int> chosenExamplesForThisRun;
+          chosenExamplesForThisRun.clear();
+          for (int i = 0; i < incrementalAddSize; i++)
+          {
+            std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
+            std::vector<std::pair<int,double> >::iterator worstExample = scores.begin();
+            
+            for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
+            {
+              if (jIt->second < bestExample->second)
+                bestExample = jIt;
+              if (jIt->second > worstExample->second)
+                worstExample = jIt;               
+            }
+            if ( verbose )
+              std::cerr << "i: " << i << " bestExample: " << bestExample->second << " worstExample: " << worstExample->second << std::endl;
+            Example newExample;    
+            NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
+            newExample.svec = new SparseVector( xTrain ); 
+            //actually this is the ACTIVE LEARNING step (query a label)
+            int label( y[ unlabeledExamples[bestExample->first] ] );
+            //store this example for the visualization
+            examples.push_back ( pair<int, Example> ( label, newExample ) );
+            //and store it to add it to the classifier
+            newExamples.push_back ( pair<int, Example> ( label, newExample ) );
+            //remember the index, to safely remove this example afterwards from unlabeledExamples
+            chosenExamplesForThisRun.insert(bestExample->first);
+            scores.erase(bestExample);
+            pickedExamplesPerClass[label]++;
+            yBinGP.append(label);
+            
+            if ( visualizationOfResults )
+              paintQueriedExamples( imgAL, xTrain, 10, nrOfSectorsProDim, sizeOfSector );
+          }  
+                    
+          //delete the queried example from the set of unlabeled ones
+          //do this in an decreasing order in terms of indices to ensure valid access
+          for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
+          {
+            unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );             
+          }
+
+        }
+      
+        std::cerr << "Time used to compute query-scores for " <<  nrOfPossibleExamples << " examples: " << time_score_computation / CLOCKS_PER_SEC << " [s]" << std::endl;
+      } // end computation for GPMEAN, GPPREDVAR, GPHEURISTIC, GPHEURISTICPLUS
+      
+      if ( visualizationOfResults )
+      {      
+        if ( saveImages )
+        {
+          imgAL.writePPM ( destinationForImages + "imgAL_run"+convertInt(run)+"_incStep_"+convertInt(incrementationStep+1)+"_queries.ppm" );
+        }
+        else
+          showImage(imgAL, "Old and new queried example");
+      }
+      
+      std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+
+      //incremental learning
+      classifier->addMultipleExamples( newExamples );      
+      
+      //do the classification for evaluating the benefit of new examples
+      if ( do_classification )
+      {
+        
+        NICE::ColorImage imgTest;
+        if ( visualizationOfResults )
+        {
+          imgTest.resize( nrOfSectorsProDim*sizeOfSector, nrOfSectorsProDim*sizeOfSector );
+          imgTest.set( 255, 255, 255 );
+          
+          if ( paintSectorBorders )
+            paintSectorsInImage( imgTest, nrOfSectorsProDim, sizeOfSector ); 
+          paintImageBorders( imgTest, nrOfSectorsProDim, sizeOfSector ); 
+          //again paint our labeled training images used so far
+          paintLabeledExamples( imgTest, yBinGP, examples, nrOfSectorsProDim, sizeOfSector, 10 ); 
+        }
+        
+        time_t  start_time = clock();
+        ClassificationResults results;
+        confusionMatrix.set( 0.0 );
+        ClassificationResult result;
+        for ( uint i = 0 ; i < testData.size(); i++ )
+        {
+          const Vector & xstar = testData[i];
+          SparseVector xstar_sparse ( xstar );
+          
+          Example example;
+          example.svec = &xstar_sparse;        
+          result = classifier->classify( example );          
+          
+          if ( visualizationOfResults )
+            paintClassificationResult( imgTest, xstar, 2, result, nrOfSectorsProDim, sizeOfSector );
+                   
+          result.classno_groundtruth = ( yTest[i] == 1 ) ? 1 : 0; 
+          results.push_back( result );      
+          confusionMatrix ( result.classno_groundtruth , result.classno ) ++;            
+        }     
+
+        float time_classification = ( float ) ( clock() - start_time ) ;
+        if ( verbose )
+          std::cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
+        ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
+        
+        confusionMatrix.normalizeRowsL1();
+        std::cerr << confusionMatrix;
+        double avg_recognition_rate ( 0.0 );
+        for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
+        {
+          avg_recognition_rate += confusionMatrix ( i, i );
+        }
+        avg_recognition_rate /= confusionMatrix.rows();          
+        
+        std::cerr << " run: " << run << " avg recognition rate: " <<  avg_recognition_rate*100 << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl;
+
+        recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );           
+
+        
+        double score = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+        std::cerr << " run: " << run << " AUC-score: " <<  score << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl << std::endl;          
+
+        AUC_scores[incrementationStep+1].push_back ( score*100 );
+        
+        if ( visualizationOfResults )
+        {        
+          if ( saveImages )
+          {
+            imgTest.writePPM ( destinationForImages + "imgAL_run"+convertInt(run)+"_incStep_"+convertInt(incrementationStep+1)+"ClassifResult.ppm" );
+          } 
+          else
+            showImage(imgTest, "Classification result after inc step " + convertInt(incrementationStep+1));  
+        }
+      } //classification after IL adding */
+    } //IL adding of different classes
+    std::cerr << "Final statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
+    
+    //don't waste memory!
+    for ( uint tmp = 0; tmp < examples.size(); tmp++ )
+    {
+      delete examples[tmp].second.svec;
+      examples[tmp].second.svec = NULL;
+    }
+  }//runs 
+      
+  // ================= EVALUATION =========================
+  
+  int nrOfClassesUsed ( 2 ); //binary setting
+
+  if ( do_classification )
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "recognition_rates" << std::endl;
+    for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+    {
+      for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::vector<double> mean_recs;
+    std::vector<double> std_dev_recs;
+    for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
+    {
+      double mean_rec ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_rec += *itRun;
+      }
+      mean_rec /= it->size();
+      mean_recs.push_back ( mean_rec );
+
+      double std_dev_rec ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_rec += pow ( *itRun - mean_rec, 2 );
+      }
+      std_dev_rec /= it->size();
+      std_dev_rec = sqrt ( std_dev_rec );
+      std_dev_recs.push_back ( std_dev_rec );
+    }
+
+    int datasize ( nrOfClassesUsed*trainExPerClass );
+    for ( uint i = 0; i < recognitions_rates.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " meanRR: " << mean_recs[i] << " stdDevRR: " << std_dev_recs[i] << std::endl;
+      datasize += incrementalAddSize ;
+    }
+    
+    std::cerr << "========================" << std::endl;
+    std::cerr << "AUC_scores" << std::endl;
+    for ( std::vector<std::vector<double> >::const_iterator it = AUC_scores.begin(); it != AUC_scores.end(); it++ )
+    {
+      for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
+      {
+        std::cerr << *jt << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    std::vector<double> mean_aucs;
+    std::vector<double> std_dev_aucs;
+    for (std::vector<std::vector<double> >::const_iterator it = AUC_scores.begin(); it != AUC_scores.end(); it++ )
+    {
+      double mean_auc ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        mean_auc += *itRun;
+      }
+      mean_auc /= it->size();
+      mean_aucs.push_back ( mean_auc );
+
+      double std_dev_auc ( 0.0 );
+      for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
+      {
+        std_dev_auc += pow ( *itRun - mean_auc, 2 );
+      }
+      std_dev_auc /= it->size();
+      std_dev_auc = sqrt ( std_dev_auc );
+      std_dev_aucs.push_back ( std_dev_auc );
+    }
+
+    datasize  = nrOfClassesUsed*trainExPerClass;
+    for ( uint i = 0; i < recognitions_rates.size(); i++)
+    {
+      std::cerr << "size: " << datasize << " meanAUC: " << mean_aucs[i] << " stdDevAUC: " << std_dev_aucs[i] << std::endl;
+      datasize += incrementalAddSize ;
+    }      
+  }
+  else
+  {
+    std::cerr << "========================" << std::endl;
+    std::cerr << "No classification done therefor no classification times available." << std::endl;
+  } 
+
+  return 0;
+}

+ 99 - 0
progs/bovizeObjectBankFeatures.cpp

@@ -0,0 +1,99 @@
+/** 
+* @file boviceObjectBankFeatures.cpp
+* @brief convert ObjectBank features to a sparse BoV histogram representation
+* @author Erik Rodner
+* @date 01/23/2012
+
+*/
+#include <algorithm>
+#include "core/basics/Config.h"
+#include "vislearning/cbaselib/MultiDataset.h"
+#include "vislearning/cbaselib/MutualInformation.h"
+#include "vislearning/baselib/Globals.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+void boviceFeatures ( const Config & conf, const Vector & thresholds, 
+                    const LabeledSet & ls, const string & srcExtension, const string & dstExtension )
+{
+  string cacheroot = conf.gS("cache", "root");
+
+  LOOP_ALL_S ( ls ) 
+  {
+    EACH_S(classno, imgfn);
+    Globals::setCurrentImgFN ( imgfn ); 
+    string cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + srcExtension;
+    cerr << "processing " << cachefn << endl;
+
+    vector<double> x;
+    ifstream ifs ( cachefn.c_str(), ios::in );
+    if ( ! ifs.good() )
+      fthrow(Exception, "File not found: " << cachefn );
+    while ( !ifs.eof() ) {
+      double val = 0.0;
+      if ( ifs >> val )
+        x.push_back(val);
+    }
+    ifs.close();
+   
+
+    Vector xnew ( 177, 0.0 );
+    for ( uint i = 0 ; i < 177; i++ )
+      for ( uint j = 0 ; j < 252 ; j++ )
+        xnew[i] += ( x[j + i*252] > thresholds[i] ) ? 1.0 : 0.0;
+
+    xnew.normalizeL1();
+
+    string dst_cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + dstExtension;
+    ofstream ofs ( dst_cachefn.c_str(), ios::out );
+    if ( ! ofs.good() )
+      fthrow(Exception, "Unable to write to " << dst_cachefn );
+    ofs << xnew << endl;
+    ofs.close ();
+  }
+    
+}
+
+/** 
+    
+    convert ObjectBank features to a sparse histogram representation 
+    
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+    
+  MultiDataset md ( &conf );
+  const LabeledSet *train = md["train"];
+  const LabeledSet *test = md["test"];
+  string dstExtention = conf.gS("main", "dstext", ".txt");
+  string threshFile = conf.gS("main", "threshfile");
+
+  Vector thresholds ( 177 );
+  ifstream ifs ( threshFile.c_str(), ios::in );
+  if ( !ifs.good() )
+    fthrow(Exception, "Unable to open threshold file!");
+
+  int index = 0;
+  while ( !ifs.eof() )
+  {
+    double val;
+    if ( ifs >> val ) {
+      if ( index >= thresholds.size() )
+        fthrow(Exception, "Error parsing threshold file!");
+      thresholds[index] = val;
+    }
+    index++;
+  }
+
+  ifs.close();
+
+  boviceFeatures ( conf, thresholds, *train, ".jpg.feat", dstExtention );
+  boviceFeatures ( conf, thresholds, *test, ".jpg.feat", dstExtention );
+    
+  return 0;
+}

+ 172 - 0
progs/compressObjectBankFeatures.cpp

@@ -0,0 +1,172 @@
+/** 
+* @file compressObjectBankFeatures.cpp
+* @brief convert ObjectBank features to a sparse histogram representation
+* @author Erik Rodner
+* @date 01/23/2012
+
+*/
+#include <algorithm>
+#include "core/basics/Config.h"
+#include "vislearning/cbaselib/MultiDataset.h"
+#include "vislearning/cbaselib/MutualInformation.h"
+#include "vislearning/baselib/Globals.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+const bool use_standard = false; // experimental setting
+
+Vector transformFeature ( const vector<double> & src )
+{
+  Vector dst;
+
+  if ( use_standard ) {
+    dst = Vector(src);
+  } else {
+    if ( src.size() != 44604 )
+      fthrow(Exception, "This is not a ObjectBank feature! The size is: " << src.size());
+
+    dst.resize ( 177 );
+    dst.set(0.0);
+    
+    // This was a bad idea: taking the maximum
+    /*
+    for ( uint i = 0 ; i < 177; i++ )
+      dst[i] = *max_element( src.begin() + i*252, src.begin() + (i+1)*252 );
+    // even a worse idea: summation
+    for ( uint i = 0 ; i < 177; i++ )
+      for ( uint j = 0 ; j < 252 ; j++ )
+        dst[i] += src[j + i*252];
+    */
+  }
+
+  return dst;
+}
+
+void readPlainData ( const Config & conf, const LabeledSet & ls, LabeledSetVector & X, string extension = ".txt" )
+{
+  string cacheroot = conf.gS("cache", "root");
+
+  X.clear();
+
+  LOOP_ALL_S ( ls ) 
+  {
+    EACH_S(classno, imgfn);
+    Globals::setCurrentImgFN ( imgfn ); 
+    string cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + extension;
+    
+    cerr << "fn: " << imgfn << " cachefn: " << cachefn << endl;
+
+    vector<double> x;
+    ifstream ifs ( cachefn.c_str(), ios::in );
+    if ( ! ifs.good() )
+      fthrow(Exception, "File not found: " << cachefn );
+    while ( !ifs.eof() ) 
+    {
+      double val = 0.0;
+      if ( ifs >> val )
+        x.push_back(val);
+    }
+    ifs.close();
+
+    X.add ( classno, transformFeature( x ) );
+  }
+}
+
+void saveFeatures ( const Config & conf, const map<double, int> & features, const Vector & thresholds, 
+                    const LabeledSet & ls, const string & srcExtension, const string & dstExtension )
+{
+  string cacheroot = conf.gS("cache", "root");
+
+  LOOP_ALL_S ( ls ) 
+  {
+    EACH_S(classno, imgfn);
+    Globals::setCurrentImgFN ( imgfn ); 
+    string cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + srcExtension;
+    cerr << "processing " << cachefn << endl;
+
+    vector<double> x;
+    ifstream ifs ( cachefn.c_str(), ios::in );
+    if ( ! ifs.good() )
+      fthrow(Exception, "File not found: " << cachefn );
+    while ( !ifs.eof() ) {
+      double val = 0.0;
+      if ( ifs >> val )
+        x.push_back(val);
+    }
+    ifs.close();
+   
+    Vector xt = transformFeature(x);
+
+    Vector xnew ( features.size() );
+    int index = 0;
+    for ( map<double, int>::const_iterator j = features.begin(); j != features.end(); j++, index++ )
+    {
+      int srcIndex = j->second;
+      if ( srcIndex >= xt.size() )
+        fthrow(Exception, "Bad bug in saveFeatures(...)" );
+      xnew[index] = (xt[srcIndex] > thresholds[srcIndex]) ? 1.0 : 0.0;
+    }
+
+    // If we do not normalize our features, we pretty much get into 
+    // trouble with the minimum kernel...because the vector with only values of "1" is very
+    // much similar to every vector
+    xnew.normalizeL1();
+
+    string dst_cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + dstExtension;
+    ofstream ofs ( dst_cachefn.c_str(), ios::out );
+    if ( ! ofs.good() )
+      fthrow(Exception, "Unable to write to " << dst_cachefn );
+    ofs << xnew << endl;
+    ofs.close ();
+  }
+    
+}
+
+/** 
+    
+    convert ObjectBank features to a sparse histogram representation 
+    
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+    
+  MultiDataset md ( &conf );
+  Vector y;
+  const LabeledSet *train = md["train"];
+  LabeledSetVector trainData;
+
+  readPlainData ( conf, *train, trainData, ".jpg.feat" );
+  
+  // compute optimal thresholds for thresholding
+  MutualInformation mi ( true /*verbose*/ );
+  Vector thresholds;
+  Vector mis;
+  mi.computeThresholdsOverall ( trainData, thresholds, mis );
+
+  cerr << mis << endl;
+    
+  int numFeatures = conf.gI("main", "d", mis.size() );
+  cerr << "Retaining " << numFeatures << " features ..." << endl;
+   
+  map<double, int> features;
+  for ( uint i = 0 ; i < mis.size(); i++ )
+    features.insert ( pair<double, int> ( - mis[i], i ) );
+  // all features should be now sorted with features[0] being the most informative one
+
+  // remove boring features
+  map<double, int>::iterator j = features.begin();
+  advance( j, numFeatures );
+  features.erase ( j, features.end() );
+   
+  const LabeledSet *test = md["test"];
+  string dstExtention = conf.gS("main", "dstext", ".txt");
+  saveFeatures ( conf, features, thresholds, *train, ".jpg.feat", dstExtention );
+  saveFeatures ( conf, features, thresholds, *test, ".jpg.feat", dstExtention );
+    
+  return 0;
+}

+ 51 - 0
progs/computeLocalFeatures.cpp

@@ -0,0 +1,51 @@
+#include <iostream>
+#include <fstream>
+
+#include <core/image/ImageT.h>
+#include <core/basics/Config.h>
+
+#include <vislearning/baselib/Globals.h>
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/features/localfeatures/LFonHSG.h>
+#include <vislearning/features/localfeatures/LFWriteCache.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+int main(int argc, char **argv)
+{
+  Config conf ( argc, argv );
+
+  string destForFeat = conf.gS("main", "destForFeat", "/tmp/");
+  bool verbose = conf.gB("main", "verbose", false);
+
+  MultiDataset md ( &conf );
+
+  const LabeledSet & all = *(md["all"]);
+
+  const ClassNames & classNames = md.getClassNames("all");
+
+  LocalFeatureRepresentation *lfrep = NULL;
+  lfrep = new LFonHSG(&conf);
+  
+  LocalFeatureRepresentation *lfrep_save = new LFWriteCache ( &conf, lfrep );
+  lfrep = lfrep_save;
+  
+  LOOP_ALL_S ( all ) 
+  {
+    EACH_S(classno, imgfilename);
+    Globals::setCurrentImgFN ( imgfilename ); 
+    if (verbose)
+      std::cerr << "imgfilename: " << imgfilename << std::endl;
+    Image img (imgfilename);
+    NICE::VVector positions;
+    NICE::VVector features;
+    // Extract the descriptor-Values from a given grayscale-Image.
+    lfrep->extractFeatures ( img, features, positions );
+    if (verbose)
+      std::cerr << "features.size: " << features.size() << std::endl;
+  }
+
+  return 0;
+}

+ 117 - 0
progs/computeNormalizedHistogramFeatures.cpp

@@ -0,0 +1,117 @@
+/** 
+* @file computeNormalizedHistogramFeatures.cpp
+* @brief simply compute randomly generated, normalized histogram features
+* @author Alexander Freytag
+* @date 07-05-2012 (dd-mm-yyyy)
+*/
+
+#include <vector>
+#include <iostream>
+#include <cstdlib>
+#include <ctime>
+
+#include <core/basics/Config.h>
+#include <core/vector/MatrixT.h>
+#include <core/vector/VectorT.h>
+#include <core/vector/Algorithms.h>
+#include <core/vector/MatrixT.h>
+
+using namespace std;
+using namespace NICE;
+
+
+int main (int argc, char* argv[])
+{
+
+  Config conf ( argc, argv );
+
+  int n = conf.gI("main", "nrOfExamplesPerClass", 10);
+  int d = conf.gI("main", "nrOfDimensions", 20);
+  int m = conf.gI("main", "nrOfClasses", 2);
+  string destination = conf.gS( "main", "destination", "/tmp/features.data");
+  bool saveLastDimension = conf.gB( "main", "saveLastDimension", false);
+  
+  if (d < m)
+  {
+    std::cerr << "Sry, you specified less dimensions than number of classes" << std::endl;
+    return -1;
+  }
+  if (d <= 1 )
+  {
+    std::cerr << "Sry, you have to specify at least two dimensions" << std::endl;
+    return -1;
+  }
+  
+  NICE::Vector y (n*m);
+  NICE::Vector yBin (n*m);
+  NICE::Matrix data;
+  
+  if (saveLastDimension)
+  {
+    data.resize( n*m, d-1 );
+  }
+  else
+  {
+    data.resize( n*m, d-1 );
+  }
+  data.set( 0.0 );
+
+  int classCount(0);
+  for (int i = 0; i < n*m; i++)
+  {
+    double meanPrimary ( std::min(0.25, 1.0/(d-1) ) );
+    double meanSecondary ( 0.75 / (2.0*d) );
+    double stdDev( 0.75 / (3.0*d) );
+       
+    double sum(0.0);
+    double sampleValue(0.0);
+    
+    for (int dim = 0; dim < d-1; dim++)
+    {
+
+      do 
+      { 
+        sampleValue = fabs( randGaussDouble ( stdDev ) );
+        
+        if (dim == classCount)
+          sampleValue += meanPrimary;
+        else
+          sampleValue += meanSecondary;
+      } while ( ( sum + sampleValue ) >= 1.0) ; //pay attention that the normalization works properly
+      sum += sampleValue;      
+      data(i, dim) = sampleValue;
+    }
+    
+    //normalization
+    if ( saveLastDimension )
+    {
+      data( i, d-1 ) = 1.0 - sum;
+      std::cerr << "i: " << i << " d-1: " << d-1 << " sum: " << sum << " 1.0 - sum: " << 1.0-sum << std::endl;
+    }
+    
+    //save the corresponding label
+    y[i] = classCount;
+    if (classCount < m/2.0)
+      yBin[i] = 0;
+    else
+      yBin[i] = 1;
+
+    if ( (i % n ) == (n-1))
+    {
+      classCount++;
+    }
+  }
+  
+  
+  std::filebuf fb;
+  fb.open (destination.c_str(),ios::out);
+  std::ostream os(&fb);
+//
+  os << data << std::endl;  
+  os << yBin << std::endl;
+  os << y;
+// 
+  fb.close();  
+  
+  return 0;
+}

+ 184 - 0
progs/computeSIFTFeatures.cpp

@@ -0,0 +1,184 @@
+#include <iostream>
+#include <fstream>
+#include <vector>
+
+#include <core/vector/VectorT.h>
+#include <core/vector/MatrixT.h>
+#include <core/image/ImageT.h>
+#include <core/basics/Config.h>
+#include <core/vector/VVector.h>
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/features/localfeatures/LFonHSG.h>
+#include <vislearning/features/localfeatures/GenericLocalFeatureSelection.h>
+#include <vislearning/features/simplefeatures/FCCodebookHistBin.h>
+#include <vislearning/features/simplefeatures/CodebookPrototypes.h>
+#include <vislearning/math/cluster/KMeans.h>
+#include <set>
+
+#include <vislearning/baselib/Globals.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+int main(int argc, char **argv)
+{
+  Config conf ( argc, argv );
+
+  string destForFeat = conf.gS("main", "destForFeat", "/tmp/");
+  double percentageOfPatchesForKMeans = conf.gD("main", "percentageOfPatchesForKMeans", 0.3);
+  int nrOfClusters = conf.gI("main", "nrOfClusters", 200);
+  bool verbose = conf.gB("main", "verbose", false);
+
+  MultiDataset md ( &conf );
+
+  const LabeledSet & train = *(md["train"]);
+  const LabeledSet & test = *(md["test"]);
+
+  const ClassNames & classNamesTrain = md.getClassNames("train");
+  const ClassNames & classNamesTest = md.getClassNames("test");
+
+  LFonHSG GridForSIFT(&conf);
+
+  std::vector<VVector> featuresTrainset;
+  
+  LOOP_ALL_S ( train ) 
+  {
+    EACH_S(classno, imgfilename);
+    if (verbose)
+      std::cerr << "imgfilename: " << imgfilename << std::endl;
+    Image img (imgfilename);
+    NICE::VVector positions;
+    NICE::VVector features;
+    // Extract the descriptor-Values from a given grayscale-Image.
+    GridForSIFT.extractFeatures ( img, features, positions );
+    if (verbose)
+      std::cerr << "features.size: " << features.size() << std::endl;
+    featuresTrainset.push_back(features);    
+  }
+  int trainsize(featuresTrainset.size());
+  int nrOfFivePercent(round(0.05*trainsize));
+
+  std::cerr << "Now sample randomly some patches (SIFT-features)" << std::endl;
+
+  VVector x;
+  x.clear();
+
+  int count(0);
+  //randomly sampling of patches -- take percentageOfPatchesForKMeans % of the patches from each image
+  for (int i = 0; i < trainsize; i++, count++)
+  {
+    //small stat bar
+    if ( (count % nrOfFivePercent) == 0)
+      std::cerr << "sampling for image nr: " << count << " / " << trainsize << std::endl;
+    
+    std::set<int> indices;
+    int currentSize(featuresTrainset[i].size());
+    for (int k = 0; k < (int) round(percentageOfPatchesForKMeans*currentSize); k++)
+    {
+      int idx( round((float)rand() * (currentSize-1)/ (float)(RAND_MAX)) );
+      while ( (indices.find(idx) != indices.end()) && (i > 0))
+      {
+        idx = ( round((float)rand() * (currentSize-1)/ (float)(RAND_MAX)) );
+      }
+      x.push_back(featuresTrainset[i][idx]);
+      indices.insert(idx);
+    }
+  }
+
+  std::cerr << "sampling done - final number of patches: " << x.size() << std::endl;
+  std::cerr << "Apply k-Means on randomly sampled patches" << std::endl;
+
+  VVector prototypes;
+  std::vector<double> weights;
+  std::vector<int> assignments;
+  std::cerr << "nrOfClusters: " << nrOfClusters << std::endl;
+
+  ClusterAlgorithm *clusteralg = new KMeans (nrOfClusters);
+  clusteralg->cluster(x, prototypes, weights, assignments);
+  delete clusteralg;
+
+  std::cerr << "prototypes.size(): " << prototypes.size() << std::endl;
+
+  CodebookPrototypes codebook ( prototypes );
+
+  std::cerr << "Now apply our k-means codebook to every image for computing the resulting features" << std::endl;
+  FCCodebookHistBin featureGenerator(&conf, &GridForSIFT, "sum", &codebook);
+  std::cerr << "codebook.size(): " << codebook.size() << std::endl;
+
+  int cntTrain(0);
+  LOOP_ALL_S ( train ) 
+  {
+    //small stat bar
+    if ( (cntTrain % nrOfFivePercent) == 0)
+      std::cerr << "image nr: " << cntTrain << " / " << trainsize << std::endl;
+    
+    EACH_S(classno, imgfilename);
+    
+    std::string finalDest(classNamesTrain.text(classno) +imgfilename);
+    size_t posOfEnding = finalDest.find_last_of(".");
+    size_t lengthOfEnding = finalDest.size() - posOfEnding;
+    finalDest =  finalDest.erase(posOfEnding, lengthOfEnding)+".txt";
+    size_t posOfclassSlash = finalDest.find_last_of("/");
+    size_t posOfFolderSlash = finalDest.find_last_of("/", posOfclassSlash-1);
+    finalDest =  finalDest.erase(0, posOfFolderSlash+1);
+    finalDest = destForFeat + finalDest;
+//    std::cerr << "finalDest: " << finalDest<< std::endl;
+    
+    Image img (imgfilename);
+    NICE::Vector vec;
+    //compute the histogram
+    featureGenerator.convert (img, vec );
+    
+    std::filebuf fb;
+    fb.open (finalDest.c_str(),ios::out);
+    ostream outstream(&fb);
+    outstream << vec;
+    fb.close();
+    
+    cntTrain++;
+  }
+
+  int testsize(0);
+  LOOP_ALL_S ( test ) 
+  {
+    testsize++;
+  }
+  int nrOfFivePercentTest(round(0.05*testsize));
+
+  int cntTest(0);
+  LOOP_ALL_S ( test ) 
+  {
+    //small stat bar
+    if ( (cntTest % nrOfFivePercentTest) == 0)
+      std::cerr << "image nr: " << cntTest << " / " << testsize << std::endl;
+    
+    EACH_S(classno, imgfilename);
+    
+    std::string finalDest(classNamesTest.text(classno) +imgfilename);
+    size_t posOfEnding = finalDest.find_last_of(".");
+    size_t lengthOfEnding = finalDest.size() - posOfEnding;
+    finalDest =  finalDest.erase(posOfEnding, lengthOfEnding)+".txt";
+    size_t posOfclassSlash = finalDest.find_last_of("/");
+    size_t posOfFolderSlash = finalDest.find_last_of("/", posOfclassSlash-1);
+    finalDest =  finalDest.erase(0, posOfFolderSlash+1);
+    finalDest = destForFeat + finalDest;
+//    std::cerr << "finalDest: " << finalDest<< std::endl;
+    
+    Image img (imgfilename);
+    NICE::Vector vec;
+    //compute the histogram
+    featureGenerator.convert (img, vec );
+    
+    std::filebuf fb;
+    fb.open (finalDest.c_str(),ios::out);
+    ostream outstream(&fb);
+    outstream << vec;
+    fb.close();
+    
+    cntTest++;
+  }
+
+  return 0;
+}

+ 102 - 0
progs/datatools.h

@@ -0,0 +1,102 @@
+#ifndef DATATOOLSINCLUDE
+#define DATATOOLSINCLUDE
+
+#include <iostream>
+
+#include <core/basics/Config.h>
+
+#include <vislearning/baselib/Globals.h>
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/cbaselib/LabeledSet.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+template <typename MatrixType, typename VectorType>
+void readData ( const NICE::Config & conf, const OBJREC::LabeledSet & ls, MatrixType & X, Vector & y, std::string extension = ".txt" )
+{
+  std::string cacheroot = conf.gS("cache", "root");
+
+  y.resize ( ls.count() );
+  X.clear();
+
+  LOOP_ALL_S ( ls ) 
+  {
+    EACH_S(classno, imgfn);
+    Globals::setCurrentImgFN ( imgfn ); 
+    std::string cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + extension;
+    
+    std::cerr << "fn: " << imgfn << " cachefn: " << cachefn << std::endl;
+
+    y[ X.size() ] = classno;
+
+    VectorType x;
+    std::ifstream ifs ( cachefn.c_str(), std::ios::in );
+    if ( ! ifs.good() )
+      fthrow(Exception, "File not found: " << cachefn );
+    ifs >> x;
+    X.push_back ( x );
+    ifs.close();
+  }
+}
+
+template <typename MatrixType, typename VectorType>
+void readData ( const NICE::Config & conf, const OBJREC::LabeledSet & ls, MatrixType & X, Vector & y, std::vector<std::string> & filenames, std::string extension = ".txt" )
+{
+  std::string cacheroot = conf.gS("cache", "root");
+
+  y.resize ( ls.count() );
+  X.clear();
+  filenames.clear();
+
+  LOOP_ALL_S ( ls ) 
+  {
+    EACH_S(classno, imgfn);
+    Globals::setCurrentImgFN ( imgfn ); 
+    std::string cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + extension;
+    
+    std::cerr << "fn: " << imgfn << " cachefn: " << cachefn << std::endl;
+    
+    filenames.push_back( imgfn );
+
+    y[ X.size() ] = classno;
+
+    VectorType x;
+    std::ifstream ifs ( cachefn.c_str(), std::ios::in );
+    if ( ! ifs.good() )
+      fthrow(Exception, "File not found: " << cachefn );
+    ifs >> x;
+    X.push_back ( x );
+    ifs.close();
+  }
+}
+
+void readDataAwA ( const NICE::Config & conf, const OBJREC::LabeledSet & ls, std::vector<NICE::Vector> & X, NICE::Vector & y, std::string extension = ".txt" )
+{
+  std::string cacheroot = conf.gS("cache", "root");
+
+  y.resize ( ls.count() );
+  X.clear();
+
+  LOOP_ALL_S ( ls ) 
+  {
+    EACH_S(classno, imgfn);
+    Globals::setCurrentImgFN ( imgfn ); 
+    std::string cachefn = Globals::getCacheFilename ( cacheroot, Globals::SORT_CATEGORIES ) + extension;
+    
+    std::cerr << "fn: " << imgfn << " cachefn: " << cachefn << std::endl;
+
+    y[ X.size() ] = classno;
+
+    std::ifstream ifs ( cachefn.c_str(), std::ios::in );
+    if ( ! ifs.good() )
+      fthrow(Exception, "File not found: " << cachefn );
+    NICE::Vector x (ifs,true);
+//     ifs >> x;
+    X.push_back ( x );
+    ifs.close();
+  }
+}
+
+#endif

+ 117 - 0
progs/eccv2012-15scenes-fasthik.cpp

@@ -0,0 +1,117 @@
+/** 
+* @file eccv2011-15scenes-fasthik.cpp
+* @brief ECCV 2012 Experiment with 15 Scenes
+* @author Erik Rodner
+* @date 01/17/2012
+
+*/
+
+#include <vector>
+
+#include <core/basics/vectorio.h>
+#include <core/basics/Timer.h>
+#include <core/basics/Config.h>
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/baselib/Globals.h>
+
+#include <gp-hik-core/FastMinKernel.h>
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFWeightedDim.h>
+#include <gp-hik-core/parameterizedFunctions/PFExp.h>
+#include <gp-hik-core/tools.h>
+
+#include <gp-hik-exp/GPHIKClassifierNICE.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+#include "datatools.h"
+
+
+// this function is very much adapted to Wus files
+void readSparseExamples ( const string & fn, Examples & examples )
+{
+  cerr << "Reading " << fn << endl;
+  ifstream ifs ( fn.c_str(), ios::in );
+  if ( ! ifs.good() ) {
+    fthrow(Exception, "Unable to read " << fn );
+  }
+  while ( !ifs.eof() )
+  {
+    int classno;
+    if ( !(ifs >> classno) ) break;
+    classno--;
+    SparseVector *v = new SparseVector; 
+    v->restore ( ifs, SparseVector::FORMAT_INDEX_LINE );
+    v->setDim( 10000 );
+    // evil HACK FIXME
+    v->multiply(1.0 / (128.0*1000.0));
+
+
+    Example example;
+    example.svec = v;
+
+    cerr << "read example of size " << v->size() << " belonging to class " << classno << endl;
+    cerr << "min: " << v->min() << " max:" << v->max() << endl;
+    
+    examples.push_back ( pair<int, Example> ( classno, example ) );
+  }
+  ifs.close();
+}
+
+/** 
+    
+    ECCV 2012 Experiment with 15 Scenes 
+    
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+ 
+  GPHIKClassifierNICE classifier ( &conf, "GPHIKClassifier" );
+
+  string trainFilename = conf.gS("main", "train"); 
+  
+  Examples trainExamples;
+  readSparseExamples ( trainFilename, trainExamples );
+
+  FeaturePool fp; // ignored anyway
+  classifier.train ( fp, trainExamples );
+
+  cerr << "Reading test data file" << endl;
+  string testFilename = conf.gS("main", "test"); 
+  Examples testExamples;
+  readSparseExamples ( testFilename, testExamples );
+
+ 
+  Timer t;
+  Matrix confusion ( 15, 15, 0.0 );
+  
+  int index = 1;
+  for ( Examples::iterator example_i = testExamples.begin(); example_i != testExamples.end(); example_i++,index++ )
+  {
+    uint classno_groundtruth = example_i->first;
+
+    t.start();
+    ClassificationResult r = classifier.classify ( example_i->second );
+    uint classno_estimated = r.classno;
+    t.stop();
+   
+    r.scores.store(cerr);
+    cerr << "[" << index << " / " << testExamples.size() << "] " << classno_estimated << " " << classno_groundtruth << " time: " << t.getLast() << endl;
+
+    //confusion( classno_groundtruth, classno_estimated ) += 1;
+    confusion( classno_estimated, classno_groundtruth ) += 1;
+  }
+
+  confusion.normalizeColumnsL1();
+  cerr << confusion << endl;
+
+  cerr << "average recognition rate: " << confusion.trace()/confusion.rows() << endl;
+  return 0;
+}

+ 147 - 0
progs/eccv2012-15scenes.cpp

@@ -0,0 +1,147 @@
+/** 
+* @file eccv2012-15scenes.cpp
+* @brief ECCV 2012 Experiment with 15 Scenes
+* @author Erik Rodner
+* @date 01/17/2012
+
+*/
+
+#include <vector>
+
+#include <core/basics/vectorio.h>
+#include <core/basics/Timer.h>
+#include <core/basics/Config.h>
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/baselib/Globals.h>
+
+#include <gp-hik-core/FastMinKernel.h>
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFWeightedDim.h>
+#include <gp-hik-core/parameterizedFunctions/PFExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFMKL.h>
+
+#include <gp-hik-core/tools.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+#include "datatools.h"
+
+/** 
+    
+    ECCV 2012 Experiment with 15 Scenes 
+    
+    NOTE: usage of this test-function is not recommended. Use eccv2012-15scenes-fasthik instaed with a proper interface
+    
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+ 
+  string pf_s = conf.gS("main", "transform", "absexp");
+  ParameterizedFunction *pf;
+
+  double parameterLowerBound = conf.gD("GPHIKClassifier", "parameter_lower_bound", 1.0 );
+  double parameterUpperBound = conf.gD("GPHIKClassifier", "parameter_upper_bound", 5.0 );
+  if ( pf_s == "absexp" )
+    pf = new PFAbsExp( 1.0, parameterLowerBound, parameterUpperBound );
+  else if ( pf_s == "exp" )
+    pf = new PFExp ( 1.0, parameterLowerBound, parameterUpperBound );
+  else if ( pf_s == "weighted" )
+    pf = new PFWeightedDim ( conf.gI("main", "dimension"), 0.0, 5.0 );
+  else if ( pf_s == "MKL" ) {
+      std::cerr << "use MKL feature transformation specific for VISAPP experiments" << std::endl;
+      std::set<int> steps; steps.insert(4000); steps.insert(6000); //specific for VISAPP
+      pf = new PFMKL( steps, parameterLowerBound, parameterUpperBound );
+    }
+  else
+    fthrow(Exception, "Parameterized function type " << pf_s << " not yet implemented");
+
+  cerr << "Transformation type: " << pf_s << endl;
+
+  string ext = conf.gS("main", "ext", ".txt"); 
+  cerr << "Using cache extension: " << ext << endl;
+
+  MultiDataset md ( &conf );
+	const ClassNames & classNamesTrain = md.getClassNames("train");
+  
+  // read training set
+  vector< vector<double> > trainData;
+  Vector y;
+  const LabeledSet *train = md["train"];
+
+  readData< vector< vector<double> >, vector<double> >  ( conf, *train, trainData, y, ext );
+  transposeVectorOfVectors ( trainData );
+
+  // DEBUG
+#if 0
+  Quantization q ( conf.gI("GPHIKClassifier", "num_bins") );
+  for ( uint i = 0 ; i < trainData.size() ; i++ )
+    for ( uint j = 0 ; j < trainData[i].size(); j++ )
+      trainData[i][j] = q.getPrototype ( q.quantize( trainData[i][j] ) );
+#endif
+  // END DEBUG
+
+  cerr << "Size of the training set: " << trainData.size() << endl;
+
+  double noise = 0.1;
+
+  FastMinKernel *fmk = new FastMinKernel ( trainData, noise );
+
+
+  FMKGPHyperparameterOptimization hyper ( &conf, pf, fmk );
+
+  hyper.optimize ( y );
+ 
+ 
+  // ------------------ TESTING
+  // q'n'd memory extensive solution
+  const LabeledSet *test = md["test"];
+  VVector testData;
+  Vector yTest;
+  readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
+
+  // DEBUG
+#if 0
+  for ( uint i = 0 ; i < testData.size() ; i++ )
+    for ( uint j = 0 ; j < testData[i].size(); j++ )
+      testData[i][j] = q.getPrototype ( q.quantize( testData[i][j] ) );
+#endif
+  //DEBUG END
+
+  Timer t;
+  Matrix confusion ( y.Max()+1, yTest.Max() + 1, 0.0 );
+  for ( uint i = 0 ; i < testData.size(); i++ )
+  {
+    const Vector & xstar = testData[i];
+    // the following is just to be sure that we 
+    // do not count the time necessary for conversion
+    SparseVector xstar_sparse ( xstar );
+
+
+    uint classno_groundtruth = yTest[i];
+    SparseVector scores;
+
+    t.start();
+    uint classno_estimated = hyper.classify ( xstar_sparse, scores );
+    t.stop();
+    
+    scores.store(cerr);
+    cerr << "[" << i << " / " << testData.size() << "] " << classno_estimated << " " << classno_groundtruth << " time: " << t.getLast() << endl;
+
+    //confusion( classno_groundtruth, classno_estimated ) += 1;
+    confusion( classno_estimated, classno_groundtruth ) += 1;
+  }
+
+  confusion.normalizeColumnsL1();
+  cerr << confusion << endl;
+
+  cerr << "average recognition rate: " << confusion.trace()/confusion.rows() << endl;
+
+  return 0;
+}

+ 162 - 0
progs/eccv2012-AwA.cpp

@@ -0,0 +1,162 @@
+/** 
+* @file eccv2012-AwA.cpp
+* @brief ECCV 2012 Experiment with Animals with Attributes
+* @author Alexander Freytag
+* @date 06-02-2012 (dd-mm-yyyy)
+
+*/
+
+#include <vector>
+
+//----------
+
+#include <core/basics/vectorio.h>
+#include <core/basics/Timer.h>
+#include <core/basics/Config.h>
+
+//----------
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/baselib/Globals.h>
+
+//----------
+
+#include <gp-hik-core/FastMinKernel.h>
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFExp.h>
+#include <gp-hik-core/tools.h>
+
+//----------
+
+#include "datatools.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+/** 
+    
+    ECCV 2012 Experiment with Animals with Attributes
+    
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+ 
+  string pf_s = conf.gS("main", "transform", "absexp");
+  int nrRuns = conf.gI("main", "nrRuns", 1);
+  int dim = conf.gI("main", "dim", 2000);
+  ParameterizedFunction *pf;
+
+  double parameterLowerBound = conf.gI("HIKGP", "parameter_lower_bound", 1.0 );
+  double parameterUpperBound = conf.gI("HIKGP", "parameter_upper_bound", 5.0 );
+  if ( pf_s == "absexp" )
+    pf = new PFAbsExp( 1.0, parameterLowerBound, parameterUpperBound );
+  else if ( pf_s == "exp" )
+    pf = new PFExp ( 1.0, parameterLowerBound, parameterUpperBound );
+  else
+    fthrow(Exception, "Parameterized function type " << pf_s << " not yet implemented");
+  
+  cerr << "Transformation type: " << pf_s << endl;
+
+  string ext = conf.gS("main", "ext", ".txt"); 
+  cerr << "Using cache extension: " << ext << endl;
+
+  
+  // read training set
+  vector< vector<double> > trainData;
+  Vector y;
+  
+  double AARR(0.0); // averaged average recognition rate :)
+  
+  for (int run = 0; run < nrRuns; run++)
+  {  
+    MultiDataset md ( &conf );
+    const ClassNames & classNamesTrain = md.getClassNames("train");    
+
+    const LabeledSet *train = md["train"];
+
+    readData< vector< vector<double> >, vector<double> >  ( conf, *train, trainData, y, ext );  //works correctly wit AwA
+
+    transposeVectorOfVectors ( trainData );
+
+    // DEBUG
+  #if 0
+    Quantization q ( conf.gI("HIKGP", "num_bins") );
+    for ( uint i = 0 ; i < trainData.size() ; i++ )
+      for ( uint j = 0 ; j < trainData[i].size(); j++ )
+        trainData[i][j] = q.getPrototype ( q.quantize( trainData[i][j] ) );
+  #endif
+    // END DEBUG
+
+    double noise = 0.1;
+
+    FastMinKernel *fmk = new FastMinKernel ( trainData, noise, dim );
+
+
+    FMKGPHyperparameterOptimization hyper ( &conf, pf, fmk );
+
+    hyper.optimize ( y );
+    
+    // ------------------ TESTING
+    // q'n'd memory extensive solution
+    const LabeledSet *test = md["test"];  
+    VVector testData;
+    Vector yTest;
+    readDataAwA ( conf, *test, testData, yTest, ext ); //ok, reading the data works also correctly with the AwA-dataformat
+
+    // DEBUG
+  #if 0
+    for ( uint i = 0 ; i < testData.size() ; i++ )
+      for ( uint j = 0 ; j < testData[i].size(); j++ )
+        testData[i][j] = q.getPrototype ( q.quantize( testData[i][j] ) );
+  #endif
+    //DEBUG END
+
+    Timer t;
+    Matrix confusion ( y.Max()+1, yTest.Max() + 1, 0.0 );
+    for ( uint i = 0 ; i < testData.size(); i++ )
+    {
+      const Vector & xstar = testData[i];
+      // the following is just to be sure that we 
+      // do not count the time necessary for conversion
+      SparseVector xstar_sparse ( xstar ); //default tolerance is 10e-10
+
+      uint classno_groundtruth = yTest[i];
+      SparseVector scores;
+
+      t.start();
+      uint classno_estimated = hyper.classify ( xstar_sparse, scores );
+      t.stop();
+      
+      scores.store(cerr);
+      cerr << "[" << i << " / " << testData.size() << "] " << classno_estimated << " " << classno_groundtruth << " time: " << t.getLast() << endl;
+
+      confusion( classno_groundtruth, classno_estimated ) += 1;
+      //confusion( classno_estimated, classno_groundtruth ) += 1;
+      
+    }
+
+    confusion.normalizeRowsL1();
+    cerr << confusion << endl;
+
+    cerr << "average recognition rate: " << confusion.trace()/confusion.rows() << endl;
+    
+    AARR += confusion.trace()/confusion.rows();
+    
+//  //don't waste memory;
+//  delete train;
+//  delete test;
+//  delete fmk;
+  }
+  
+  AARR /= (nrRuns);
+  
+  std::cerr << "final averaged recognition rate: " << AARR << std::endl;
+
+  delete pf;
+  return 0;
+}

+ 444 - 0
progs/eccv2012-synthetic.cpp

@@ -0,0 +1,444 @@
+/** 
+* @file eccv2012-synthetic.cpp
+* @brief ECCV 2012 Experiment with synthetic histograms to show the possibility of feature relevance selection
+* @author Alexander Freytag
+* @date 17-02-2012 (dd-mm-yyyy)
+*/
+
+#include <vector>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <limits>
+
+#include <core/basics/vectorio.h>
+#include <core/basics/Config.h>
+#include <core/basics/numerictools.h>
+#include <core/basics/Timer.h>
+#include <core/image/Histogram.h>
+#include <core/vector/VectorT.h>
+
+#include <vislearning/cbaselib/ClassificationResults.h>
+
+#include <gp-hik-core/FastMinKernel.h>
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFWeightedDim.h>
+
+#include <gp-hik-core/tools.h>
+
+
+
+
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+void sampleDataOneExample(std::vector<double> & trainData, const int & classNr)
+{
+  double sum(0.0);
+  double h1,h2,h3,h4,h5,h6,h7,h8;
+  if (classNr == 1)
+  {
+    while (true)
+    {
+      h1 = fabs(randGaussDouble(0.03)); sum += h1;
+      h2 = randDouble(0.25); sum += h2;
+      h3 = fabs(randGaussDouble(0.07)); sum += h3;
+      h4 = fabs(randGaussDouble(0.05)); sum += h4;
+      h5 = randDouble(0.25); sum += h5;
+      h6 = randDouble(0.25); sum += h6;
+      h7 = randDouble(0.25); sum += h7;
+      if (sum <= 1.0) // if sum is smaller than 1.0, everything is ok
+        break;
+      sum = 0.0;
+    }
+    h8 = 1.0-sum;   
+  }
+  else
+  {
+    while (true)
+    {
+      h1 = randDouble(0.25); sum += h1;
+      h2 = fabs(randGaussDouble(0.07)); sum += h2;
+      h3 = fabs(randGaussDouble(0.12)); sum += h3;
+      h4 = fabs(randGaussDouble(0.05)); sum += h4;
+      h5 = randDouble(0.25); sum += h5;
+      h6 = randDouble(0.25); sum += h6;
+      h7 = randDouble(0.25); sum += h7;
+      if (sum <= 1.0) // if sum is smaller than 1.0, everything is ok
+        break;
+      sum = 0.0;
+    }
+    h8 = 1.0-sum;  
+  }
+  trainData.push_back(h1);
+  trainData.push_back(h2);
+  trainData.push_back(h3);
+  trainData.push_back(h4);
+  trainData.push_back(h5);
+  trainData.push_back(h6);
+  trainData.push_back(h7);
+  trainData.push_back(h8);  
+}
+
+void sampleData(std::vector< std::vector<double> > & trainData, NICE::Vector & y, const int & nrExamplesPerClass)
+{
+//   initRand();
+  trainData.clear();
+  y.resize(2*nrExamplesPerClass);
+  for (int i = 0; i < nrExamplesPerClass; i++)
+  {
+    //sample positive example
+    y[2*i] = 1;
+    std::vector<double> trainDataOneExample;
+    sampleDataOneExample(trainDataOneExample, 1);
+    trainData.push_back(trainDataOneExample);
+    
+    //sample negative example
+    trainDataOneExample.clear();
+    y[2*i+1] = -1;
+    sampleDataOneExample(trainDataOneExample, -1);
+    trainData.push_back(trainDataOneExample);
+  }
+}
+
+void evaluateRandomDistribution(const std::vector< std::vector<double> > & trainData, const NICE::Vector & y, std::vector<NICE::Histogram> & histograms)
+{
+  histograms.resize(16); // 8 dimensions in this synthetic example for two classes
+ 
+  //init
+  for (int i = 0; i < 16; i++)
+  {
+    histograms[i] = NICE::Histogram  ( 0.0, 0.25, 10 ); // min, max, numberBins
+  }
+  
+  histograms[0] = NICE::Histogram  ( 0.0, 0.25, 10 );
+  histograms[3] = NICE::Histogram  ( 0.0, 0.25, 10 );
+  
+  histograms[9] = NICE::Histogram  ( 0.0, 0.25, 10 );
+  histograms[11] = NICE::Histogram  ( 0.0, 0.25, 10 );
+  
+  histograms[7] = NICE::Histogram  ( 0.0, 1.0, 10 );
+  histograms[15] = NICE::Histogram  ( 0.0, 1.0, 10 );
+  
+  for (int i = 0; i < 16; i++)
+  {
+    histograms[i].set(0);
+  }
+  
+  //start
+  
+  int clAdd(0);
+  for (int i = 0; i < trainData.size(); i++)
+  {
+//     std::cerr << i << " / " << trainData.size() << std::endl;
+    
+    //evaluation for the first class
+    if (y[i] == 1)
+    {
+      histograms[0].increaseBin((int)floor(trainData[i][0]*40));
+      histograms[1].increaseBin((int)floor(trainData[i][1]*40));
+      histograms[2].increaseBin((int)floor(trainData[i][2]*40));
+      histograms[3].increaseBin((int)floor(trainData[i][3]*40));
+      histograms[4].increaseBin((int)floor(trainData[i][4]*40));
+      histograms[5].increaseBin((int)floor(trainData[i][5]*40));
+      histograms[6].increaseBin((int)floor(trainData[i][6]*40));
+      histograms[7].increaseBin((int)floor(trainData[i][7]*10));
+    }
+    else //evaluation for the second class
+    {
+      histograms[8].increaseBin((int)floor(trainData[i][0]*40));
+      histograms[9].increaseBin((int)floor(trainData[i][1]*40));
+      histograms[10].increaseBin((int)floor(trainData[i][2]*40));
+      histograms[11].increaseBin((int)floor(trainData[i][3]*40));      
+      histograms[12].increaseBin((int)floor(trainData[i][4]*40));
+      histograms[13].increaseBin((int)floor(trainData[i][5]*40));
+      histograms[14].increaseBin((int)floor(trainData[i][6]*40));
+      histograms[15].increaseBin((int)floor(trainData[i][7]*10));
+    }   
+  }
+
+}
+
+/** 
+    
+    ECCV 2012 Experiment with synthetic data
+    
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  initRand();
+  Config conf ( argc, argv );
+  Config confBaseline ( conf );
+  confBaseline.sS("HIKGP", "optimization_method", "none");
+ 
+  string pf_baseline_s = conf.gS("main", "transformBaseline", "absexp");
+  string pf_featRel_s = conf.gS("main", "transformFeatRel", "weightedDim");
+  int nrRuns = conf.gI("main", "nrRuns", 1);
+  int testSize = conf.gI("main", "testSize", 150); // per category
+  
+  bool printRandomDistribution = conf.gB("main", "printRandomDistribution", false);
+  
+  int dim(8);
+  double weightsLowerBound(numeric_limits<double>::min( ));
+  double weightsUpperBound(numeric_limits<double>::max( ));
+//   double weightsLowerBound(-1.0 * weightsUpperBound);
+   
+  ParameterizedFunction *pfBaseline;
+  ParameterizedFunction *pfFeatRel;
+
+  if ( pf_baseline_s == "absexp" )
+    pfBaseline = new PFAbsExp();
+  else if ( pf_baseline_s == "exp" )
+    pfBaseline = new PFExp();
+  else
+    fthrow(Exception, "Parameterized function type " << pf_baseline_s << " not yet implemented");
+  
+  if ( pf_featRel_s == "weightedDim" )
+    pfFeatRel = new PFWeightedDim(dim,weightsLowerBound,weightsUpperBound);//(pfBaseline);
+  else 
+    fthrow(Exception, "Parameterized function type " << pf_featRel_s << " not yet implemented");
+  
+  std::cerr << "Transformation type baseline: " << pf_baseline_s << std::endl;
+  std::cerr << "Transformation type FeatRel: " << pf_featRel_s << std::endl;
+  
+  std::vector<int> trainSizes; // per category
+  
+//   trainSizes.push_back(5);
+//   trainSizes.push_back(10);
+//   trainSizes.push_back(15);
+//   trainSizes.push_back(20);
+//   trainSizes.push_back(50);
+//   trainSizes.push_back(75);
+//   trainSizes.push_back(100);
+  trainSizes.push_back(500);
+  
+  std::vector<std::vector<double> > ARRs_baseline;
+  std::vector<std::vector<double> > ARRs_featRel;
+  
+  std::vector<std::vector<double> > AUCs_baseline;
+  std::vector<std::vector<double> > AUCs_featRel;
+  
+  for (std::vector<int>::const_iterator trainSize = trainSizes.begin(); trainSize != trainSizes.end(); trainSize++)
+  {
+    std::cerr << "trainSize: " << *trainSize << std::endl;
+    double AARRBaseline(0.0); // averaged average recognition rate :)
+    double AARRFeatRel(0.0); // averaged average recognition rate :)
+    
+    double AAUCBaseline(0.0); // averaged area under curve :)
+    double AAUCFeatRel(0.0); // averaged area under curve :)
+    
+    std::vector<double> ARRs_baseline_SingleSize;
+    std::vector<double> ARRs_featRel_SingleSize;
+    
+    std::vector<double> AUCs_baseline_SingleSize;
+    std::vector<double> AUCs_featRel_SingleSize;
+    
+    for (int run = 0; run < nrRuns; run++)
+    {
+      std::cerr << "run: " << run << std::endl;
+      //----------------- TRAINING -------------------------
+      //sample the training data
+      std::vector< std::vector<double> > trainData;
+      NICE::Vector yTrain;
+      sampleData(trainData,yTrain, *trainSize);
+      
+      
+      
+      if (printRandomDistribution)
+      {
+        std::vector<double> borders;
+        borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(1.0);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(0.25);borders.push_back(1.0);
+
+        std::cerr << "print distribution of features " << std::endl;
+        std::vector<NICE::Histogram>  histograms;
+        
+        evaluateRandomDistribution(trainData, yTrain, histograms);
+        for (int i = 0; i < histograms.size(); i++)
+        {
+          int sum (0);
+          std::string fn = "/home/luetz/code/fast-hik/nice/fast-hik/hist";
+          std::stringstream s1;
+          s1 << i/8;
+          fn += s1.str();
+          fn += "-";
+          std::stringstream s2;
+          s2 << i%8;
+          fn += s2.str();
+          std::cerr << "filename: "<< fn.c_str() << std::endl;
+          
+          std::fstream  outfile;
+          outfile.open( fn.c_str(), ios::out );
+          if (outfile.is_open())
+          {
+            for (int k = 0; k < histograms[i].bins(); k++)
+            {
+              outfile << borders[i]*k/ (double)histograms[i].bins() << " " << histograms[i][k] << std::endl;
+              sum += histograms[i][k];
+            }
+            outfile.close();
+          }
+          else{
+           std::cerr << "error while  opening file " << fn << std::endl; 
+          }
+        }
+        std::cerr << "ending the function, we only printed the distributions" << std::endl;
+        return 0;
+      }
+      
+      std::vector<double> meanValues;
+      calculateMeanPerDimension(trainData, meanValues);
+      
+      transposeVectorOfVectors ( trainData );    
+     
+      //baseline without feature relevance
+      double noise = 0.1;
+      FastMinKernel *fmkBaseline = new FastMinKernel ( trainData, noise, dim );
+      FMKGPHyperparameterOptimization hyperBaseline ( &confBaseline, pfBaseline, fmkBaseline );
+      hyperBaseline.optimize ( yTrain );
+      
+      //with optimization of feature relevance (= optimization of weights for each dimension)
+      FastMinKernel *fmkFeatRel = new FastMinKernel ( trainData, noise, dim );
+//       std::cerr << "print Parameter of pfWeightedDim" << std::endl;
+//       std::cerr << pfFeatRel->parameters() << std::endl;
+//       std::cerr << "print Matrix after transformation" << std::endl;
+//       pfFeatRel->applyFunctionToFeatureMatrix(fmkFeatRel->featureMatrix());
+//       fmkFeatRel->featureMatrix().print();
+      
+      FMKGPHyperparameterOptimization hyperFeatRel ( &conf, pfFeatRel, fmkFeatRel );
+      hyperFeatRel.optimize ( yTrain );
+      std::cerr << "meanValues: ";
+      for (std::vector<double>::const_iterator meanIt = meanValues.begin(); meanIt != meanValues.end(); meanIt++)
+      {
+        std::cerr << *meanIt << " ";
+      }
+      std::cerr << std::endl << std::endl;
+      
+      //----------------- TESTING -------------------------
+      //sample the training data
+      std::vector< std::vector<double> > testData;
+      NICE::Vector yTest;
+      sampleData(testData,yTest, testSize);
+      
+//       std::cerr << "Printing testData: " << std::endl;
+//       printMatrix<double>(testData);
+//       std::cerr << yTest << std::endl;
+      
+      Timer t;
+      
+      Matrix confusionBaseline ( 2, 2, 0.0 );
+      Matrix confusionFeatRel ( 2, 2, 0.0 );
+      
+      ClassificationResults resultsBaseline;
+      ClassificationResults resultsFeatRel;
+      
+      for ( uint i = 0 ; i < testData.size(); i++ )
+      {
+        const Vector xstar(testData[i]);
+        // the following is just to be sure that we 
+        // do not count the time necessary for conversion
+        SparseVector xstar_sparse ( xstar ); //default tolerance is 10e-10
+
+        int classno_groundtruth = yTest[i];
+        //dirty :(
+        if ((classno_groundtruth) < 0)
+          classno_groundtruth = 0;
+        
+        SparseVector scoresBaseline;
+        t.start();
+        uint classno_estimated_baseline = hyperBaseline.classify ( xstar_sparse, scoresBaseline );
+        t.stop();        
+        scoresBaseline.store(cerr);
+        cerr << "baseline [" << i << " / " << testData.size() << "] " << classno_estimated_baseline << " " << classno_groundtruth << " time: " << t.getLast() << endl;
+        confusionBaseline( classno_groundtruth, classno_estimated_baseline ) += 1;
+        
+        // building the result
+        ClassificationResult rBaseline ( classno_estimated_baseline, scoresBaseline );      
+        // set ground truth label
+        rBaseline.classno_groundtruth = classno_groundtruth;
+        resultsBaseline.push_back ( rBaseline );
+        
+        SparseVector scoresFeatRel;
+        t.start();
+        uint classno_estimated_featRel = hyperFeatRel.classify ( xstar_sparse, scoresFeatRel );
+        t.stop();        
+        scoresFeatRel.store(cerr);
+        cerr << "FeatRel [" << i << " / " << testData.size() << "] " << classno_estimated_featRel << " " << classno_groundtruth << " time: " << t.getLast() << endl;
+        confusionFeatRel( classno_groundtruth, classno_estimated_featRel ) += 1;        
+        
+        // building the result
+        ClassificationResult rFeatRel ( classno_estimated_featRel, scoresFeatRel );      
+        // set ground truth label
+        rFeatRel.classno_groundtruth = classno_groundtruth;
+        resultsFeatRel.push_back ( rFeatRel );
+      }
+
+      confusionBaseline.normalizeRowsL1();
+      confusionFeatRel.normalizeRowsL1();
+
+      // --------------- ARR evaluation --------------------
+      cerr << confusionBaseline << endl;
+      cerr << "average recognition rate baseline: " << confusionBaseline.trace()/confusionBaseline.rows() << endl;
+      
+      cerr << confusionFeatRel << endl;      
+      cerr << "average recognition rate featRel: " << confusionFeatRel.trace()/confusionFeatRel.rows() << endl;
+          
+      AARRBaseline += (confusionBaseline.trace()/confusionBaseline.rows()) / nrRuns;
+      ARRs_baseline_SingleSize.push_back(confusionBaseline.trace()/confusionBaseline.rows());
+      
+      AARRFeatRel += (confusionFeatRel.trace()/confusionFeatRel.rows()) / nrRuns;      
+      ARRs_featRel_SingleSize.push_back(confusionFeatRel.trace()/confusionFeatRel.rows());
+
+      // --------------- AUC evaluation --------------------
+      double perfvalueBaseline = resultsBaseline.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+      cerr << "AUC Baseline: " << perfvalueBaseline << endl;
+
+      double perfvalueFeatRel = resultsFeatRel.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+      cerr << "AUC FeatRel: " << perfvalueFeatRel << endl;
+      
+      AAUCBaseline += perfvalueBaseline / nrRuns;
+      AUCs_baseline_SingleSize.push_back(perfvalueBaseline);
+      
+      AAUCFeatRel += perfvalueFeatRel / nrRuns;      
+      AUCs_featRel_SingleSize.push_back(perfvalueFeatRel);
+    }
+    
+    ARRs_baseline.push_back(ARRs_baseline_SingleSize);
+    ARRs_featRel.push_back(ARRs_featRel_SingleSize);
+    AUCs_baseline.push_back(AUCs_baseline_SingleSize);
+    AUCs_featRel.push_back(AUCs_featRel_SingleSize);
+  }
+
+
+  std::cerr << "================ EVALUATION ARR======================== " << std::endl;
+  std::cerr << "trainsize << meanBaseline << stdDevBaseline << meanFeatRel << stdDevFeatRel " << std::endl;
+  for (uint trainSizeIdx = 0; trainSizeIdx < trainSizes.size(); trainSizeIdx++)
+  {
+    double meanBaseline( calculating_mean(ARRs_baseline[trainSizeIdx]) );
+    double meanFeatRel( calculating_mean(ARRs_featRel[trainSizeIdx]) );
+    
+    double stdDevBaseline(calculating_std_dev(ARRs_baseline[trainSizeIdx], meanBaseline));
+    double stdDevFeatRel(calculating_std_dev(ARRs_featRel[trainSizeIdx], meanFeatRel));
+    
+    std::cerr << trainSizes[trainSizeIdx] << " " << meanBaseline << " " << stdDevBaseline << " " << meanFeatRel << " " << stdDevFeatRel << std::endl;
+  }
+  
+  std::cerr << std::endl << std::endl << "================ EVALUATION AUC======================== " << std::endl;
+  std::cerr << "trainsize << meanBaseline << stdDevBaseline << meanFeatRel << stdDevFeatRel " << std::endl;
+  for (uint trainSizeIdx = 0; trainSizeIdx < trainSizes.size(); trainSizeIdx++)
+  {
+    double meanBaseline( calculating_mean(AUCs_baseline[trainSizeIdx]) );
+    double meanFeatRel( calculating_mean(AUCs_featRel[trainSizeIdx]) );
+    
+    double stdDevBaseline(calculating_std_dev(AUCs_baseline[trainSizeIdx], meanBaseline));
+    double stdDevFeatRel(calculating_std_dev(AUCs_featRel[trainSizeIdx], meanFeatRel));
+    
+    std::cerr << trainSizes[trainSizeIdx] << " " << meanBaseline << " " << stdDevBaseline << " " << meanFeatRel << " " << stdDevFeatRel << std::endl;
+  }
+  return 0;
+}

+ 149 - 0
progs/saveImageNetBinary.cpp

@@ -0,0 +1,149 @@
+/** 
+* @file testImageNetBinary.cpp
+* @brief perform ImageNet tests with binary classification
+* @author Erik Rodner
+* @date 01/04/2012
+
+*/
+#include <core/basics/Config.h>
+#include <core/matlabAccess/MatFileIO.h>
+
+//----------
+
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/baselib/ProgressBar.h>
+#include <vislearning/matlabAccessHighLevel/ImageNetData.h>
+
+//----------
+
+#include <gp-hik-core/FeatureMatrixT.h>
+#include <gp-hik-core/tools.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  int positiveClass = conf.gI("main", "positive_class");
+
+  cerr << "Positive class is " << positiveClass << endl;
+  
+  sparse_t data;
+  NICE::Vector yl;
+  cerr << "Reading ImageNet data ..." << endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNet ( imageNetPath + "demo/" );
+
+  imageNet.getBatchData ( data, yl, "train", "training" );
+
+  uint n = yl.size();
+  
+  cerr << "Performing hyperparameter optimization ... " << endl;
+  set<int> positives;
+  set<int> negatives;
+
+  map< int, set<int> > mysets;
+  for ( uint i = 0 ; i < n; i++ )
+    mysets[ yl[i] ].insert ( i );
+
+  if ( mysets[ positiveClass ].size() == 0 ) 
+    fthrow(Exception, "Class " << positiveClass << " is not available.");
+
+  // add our positive examples
+  for ( set<int>::const_iterator i = mysets[positiveClass].begin(); i != mysets[positiveClass].end(); i++ )
+    positives.insert ( *i );
+
+  int Nneg = conf.gI("main", "nneg", 1 );
+  for ( map<int, set<int> >::const_iterator k = mysets.begin(); k != mysets.end(); k++ )
+  {
+    int classno = k->first;
+    if ( classno == positiveClass )
+      continue;
+    const set<int> & s = k->second;
+    uint ind = 0;
+    for ( set<int>::const_iterator i = s.begin(); (i != s.end() && ind < Nneg); i++,ind++  )
+      negatives.insert ( *i );
+  }
+  cerr << "Number of positive examples: " << positives.size() << endl;
+  cerr << "Number of negative examples: " << negatives.size() << endl;
+  
+  map<int, int> examples;
+  Vector y ( yl.size() );
+  int ind = 0;
+  for ( uint i = 0 ; i < yl.size(); i++ )
+  {
+    if (positives.find(i) != positives.end()) {
+      y[ examples.size() ] = 1.0;
+      examples.insert( pair<int, int> ( i, ind ) );
+      ind++;
+    } else if ( negatives.find(i) != negatives.end() ) {
+      y[ examples.size() ] = -1.0;
+      examples.insert( pair<int, int> ( i, ind ) );
+      ind++;
+    }
+  }
+  y.resize( examples.size() );
+  cerr << "Examples: " << examples.size() << endl; 
+
+  cerr << "Putting everything in a feature matrix structure ..." << endl;
+  FeatureMatrixT<double> fm ( data, examples, 1000 );
+  
+  cerr << "Writing file ..." << endl;
+  ofstream ofs ( "train.txt", ios::out );
+  if ( !ofs.good() )
+    fthrow(Exception, "Unable to write to train.txt" );
+  // writing features
+  for ( uint i = 0 ; i < fm.get_n(); i++ )
+  {
+    ofs << (y[i] == 1.0) ? 1 : 0;
+    for ( uint k = 0 ; k < fm.get_d(); k++ )
+    {
+      double val = fm(k,i);
+      if ( val != 0 ) 
+      {
+        ofs << " " << k+1 << ":" << val;
+      }
+    }
+    ofs << endl;
+  }
+  ofs.close();
+
+  // ------------------------------ TESTING ------------------------------
+  cerr << "Reading ImageNet test data files (takes some seconds)..." << endl;
+  imageNet.preloadData ( "val", "testing" );
+  imageNet.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );
+
+  ofstream ofs_test ( "test.txt", ios::out );
+  if ( !ofs_test.good() )
+    fthrow(Exception, "Unable to write to test.txt" );
+
+  for ( uint i = 0 ; i < (uint)imageNet.getNumPreloadedExamples(); i++ )
+  {
+    const SparseVector & svec = imageNet.getPreloadedExample ( i );
+    int classno_groundtruth = (((int)imageNet.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+
+    ofs_test << ( classno_groundtruth );
+    for ( SparseVector::const_iterator k = svec.begin(); k != svec.end(); k++ )
+      ofs_test << " " << k->first+1 << ":" << k->second;
+    ofs_test << endl;
+  }
+
+  ofs_test.close();
+
+  return 0;
+}

+ 165 - 0
progs/testFPClassifier.cpp

@@ -0,0 +1,165 @@
+/**
+* @file testFPClassifier.cpp
+* @brief main program for classifier evaluation
+* @author Erik Rodner
+* @date 2007-10-12
+*/
+
+#include <fstream>
+#include <iostream>
+
+#include <core/basics/numerictools.h>
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+
+//----------
+
+#include <vislearning/baselib/Preprocess.h>
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/cbaselib/MutualInformation.h>
+
+#include <vislearning/classifier/classifierbase/FeaturePoolClassifier.h>
+#include <vislearning/classifier/fpclassifier/randomforest/FPCRandomForestTransfer.h>
+#include <vislearning/classifier/classifierinterfaces/VCFeaturePool.h>
+
+#include <vislearning/math/cluster/GMM.h>
+
+//----------
+
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+#undef DEBUG
+
+using namespace OBJREC;
+
+using namespace NICE;
+using namespace std;
+
+int main ( int argc, char **argv )
+{
+  fprintf ( stderr, "testClassifier: init\n" );
+
+  std::set_terminate ( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf ( argc, argv );
+
+  GPHIKClassifierNICE *classifier = new GPHIKClassifierNICE ( &conf, "ClassiferGPHIK" );
+
+  string trainfn = conf.gS ( "data", "trainfile" );
+  string testfn = conf.gS ( "data", "testfile" );
+
+  Examples trainex;
+  ifstream intrain ( trainfn.c_str() );
+
+  int parts = 0;
+
+  while ( intrain.good() )
+  {
+    string line;
+    getline ( intrain, line );
+    vector<string> split;
+    StringTools::split ( line, ' ', split );
+
+    if ( split.size() == 0 )
+      break;
+
+    if ( parts > 0 )
+      assert ( parts == ( int ) split.size() );
+    parts = split.size();
+
+    int classno =  atoi ( split[0].c_str() );
+    SparseVector *sv = new SparseVector();
+
+    for ( uint i = 1; i < split.size();i++ )
+    {
+      vector<string> split2;
+      StringTools::split ( split[i], ':', split2 );
+      assert ( split2.size() == 2 );
+      ( *sv ) [atoi ( split2[0].c_str() ) ] = atof ( split2[1].c_str() );
+    }
+
+    Example example;
+    example.vec = NULL;
+    example.svec = sv;
+
+    trainex.push_back ( pair<int, Example> ( classno, example ) );
+
+  }
+
+  cout << "trainex.size(): " << trainex.size() << endl;
+
+  Examples testex;
+  ifstream intest ( testfn.c_str() );
+
+  parts = 0;
+
+  while ( intest.good() )
+  {
+    string line;
+    getline ( intest, line );
+    vector<string> split;
+    StringTools::split ( line, ' ', split );
+
+    if ( split.size() == 0 )
+      break;
+
+    if ( parts > 0 )
+      assert ( parts == ( int ) split.size() );
+    parts = split.size();
+
+    int classno =  atoi ( split[0].c_str() );
+    SparseVector *sv = new SparseVector();
+
+    for ( uint i = 1; i < split.size();i++ )
+    {
+      vector<string> split2;
+      StringTools::split ( split[i], ':', split2 );
+      assert ( split2.size() == 2 );
+      
+      double val = atof (split2[1].c_str());
+      
+      if(val != 0.0)
+        ( *sv ) [atoi ( split2[0].c_str() ) ] =  val;
+    }
+
+    Example example;
+    example.vec = NULL;
+    example.svec = sv;
+
+    testex.push_back ( pair<int, Example> ( classno, example ) );
+
+  }
+
+  cout << "testex.size(): " << testex.size() << endl;
+
+  FeaturePool fp;
+  classifier->train ( fp, trainex );
+
+  int counter = 0;
+  
+  for ( uint e = 0; e < testex.size(); e++ )
+  {
+    ClassificationResult r = classifier->classify ( testex[e].second );
+    
+    int bestclass = 0;
+    double bestval = r.scores[0];
+    
+    for ( int j = 1 ; j < r.scores.size(); j++ )
+    {
+      if(r.scores[j] > bestval)
+      {
+        bestclass = j;
+        bestval = r.scores[j];
+      }
+    }
+    
+    if(bestclass == testex[e].first)
+      counter++;
+  }
+
+  cout << "avg: " << (double)counter/(double)testex.size() << endl;
+
+  return 0;
+}

+ 141 - 0
progs/testImageNetBinary.cpp

@@ -0,0 +1,141 @@
+/** 
+* @file testImageNetBinary.cpp
+* @brief perform ImageNet tests with binary classification
+* @author Erik Rodner
+* @date 01/04/2012
+*/
+
+#include <core/basics/Config.h>
+#include <core/matlabAccess/MatFileIO.h>
+
+//----------
+
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/baselib/ProgressBar.h>
+#include <vislearning/matlabAccessHighLevel/ImageNetData.h>
+
+//----------
+
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFExp.h>
+#include <gp-hik-core/parameterizedFunctions/PFWeightedDim.h>
+#include <gp-hik-core/tools.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  int positiveClass = conf.gI("main", "positive_class");
+
+  cerr << "Positive class is " << positiveClass << endl;
+  
+  ParameterizedFunction *pf;
+  
+  string pf_s = conf.gS("main", "transform", "absexp");
+  if ( pf_s == "absexp" )
+    pf = new PFAbsExp( 1.0 );
+  else if ( pf_s == "exp" )
+    pf = new PFExp ( 1.0 );
+  else if ( pf_s == "weighted" )
+    pf = new PFWeightedDim ( conf.gI("main", "dimension"), 0.0, 5.0 );
+  else
+    fthrow(Exception, "Parameterized function type " << pf_s << " not yet implemented");
+
+  double noise = conf.gD("GPHIKClassifier", "noise", 0.1);
+  FMKGPHyperparameterOptimization hypopt ( &conf, pf ); 
+
+  sparse_t data;
+  NICE::Vector y;
+  cerr << "Reading ImageNet data ..." << endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNet ( imageNetPath + "demo/" );
+
+  imageNet.getBatchData ( data, y, "train", "training" );
+
+  uint n = y.size();
+  
+  cerr << "Performing hyperparameter optimization ... " << endl;
+  set<int> positives;
+  set<int> negatives;
+
+  map< int, set<int> > mysets;
+  for ( uint i = 0 ; i < n; i++ )
+    mysets[ y[i] ].insert ( i );
+
+  if ( mysets[ positiveClass ].size() == 0 ) 
+    fthrow(Exception, "Class " << positiveClass << " is not available.");
+
+  // add our positive examples
+  for ( set<int>::const_iterator i = mysets[positiveClass].begin(); i != mysets[positiveClass].end(); i++ )
+    positives.insert ( *i );
+
+  int Nneg = conf.gI("main", "nneg", 1 );
+  for ( map<int, set<int> >::const_iterator k = mysets.begin(); k != mysets.end(); k++ )
+  {
+    int classno = k->first;
+    if ( classno == positiveClass )
+      continue;
+    const set<int> & s = k->second;
+    uint ind = 0;
+    for ( set<int>::const_iterator i = s.begin(); (i != s.end() && ind < Nneg); i++,ind++  )
+      negatives.insert ( *i );
+  }
+  cerr << "Number of positive examples: " << positives.size() << endl;
+  cerr << "Number of negative examples: " << negatives.size() << endl;
+  std::cerr << "hypopt.optimize( data, y, positives, negatives ) " << std::endl;
+  hypopt.optimizeBinary ( data, y, positives, negatives, noise );
+
+  // ------------------------------ TESTING ------------------------------
+ 
+  cerr << "Reading ImageNet test data files (takes some seconds)..." << endl;
+  imageNet.preloadData ( "val", "testing" );
+  imageNet.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );
+ 
+  ClassificationResults results;
+  cerr << "Classification step ... with " << imageNet.getNumPreloadedExamples() << " examples" << endl;
+  ProgressBar pb;
+  for ( uint i = 0 ; i < (uint)imageNet.getNumPreloadedExamples(); i++ )
+  {
+    pb.update ( imageNet.getNumPreloadedExamples() );
+
+    const SparseVector & svec = imageNet.getPreloadedExample ( i );
+    SparseVector scores;
+
+    // classification step
+    int classno = hypopt.classify ( svec, scores );
+
+    // building the result
+    ClassificationResult r ( classno, scores );
+  
+    // set ground truth label
+    r.classno_groundtruth = (((int)imageNet.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+    results.push_back ( r );
+  }
+
+  cerr << "Writing results to " << resultsfile << endl;
+  results.writeWEKA ( resultsfile, 0 );
+  double perfvalue = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+
+  cerr << "Performance: " << perfvalue << endl;
+
+  delete pf;
+  
+  return 0;
+}

+ 229 - 0
progs/testImageNetBinaryGPBaseline.cpp

@@ -0,0 +1,229 @@
+/** 
+* @file testImageNetBinaryGPBaseline.cpp
+* @brief perform ImageNet tests with binary classification
+* @author Erik Rodner
+* @date 01/04/2012
+
+*/
+#include <core/basics/Config.h>
+#include <core/basics/Timer.h>
+#include <core/matlabAccess/MatFileIO.h>
+
+//----------
+
+#include <vislearning/baselib/ProgressBar.h>
+
+#include <vislearning/cbaselib/ClassificationResults.h>
+
+#include "vislearning/classifier/classifierbase/KernelClassifier.h"
+#include "vislearning/classifier/kernelclassifier/KCGPRegression.h"
+
+#include <vislearning/matlabAccessHighLevel/ImageNetData.h>
+
+//----------
+
+#include <gp-hik-core/tools.h>
+#include <gp-hik-core/kernels/IntersectionKernelFunction.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  int positiveClass = conf.gI("main", "positive_class");
+
+  cerr << "Positive class is " << positiveClass << endl;
+
+  sparse_t data;
+  NICE::Vector y;
+  cerr << "Reading ImageNet data ..." << endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNet ( imageNetPath + "demo/" );
+
+  imageNet.getBatchData ( data, y, "train", "training" );
+
+  uint n = y.size();
+
+  //noise will be 
+  double noise(0.0);
+  
+  set<int> positives;
+  set<int> negatives;
+
+  map< int, set<int> > mysets;
+  for ( uint i = 0 ; i < n; i++ )
+    mysets[ y[i] ].insert ( i );
+
+  if ( mysets[ positiveClass ].size() == 0 ) 
+    fthrow(Exception, "Class " << positiveClass << " is not available.");
+
+  // add our positive examples
+  for ( set<int>::const_iterator i = mysets[positiveClass].begin(); i != mysets[positiveClass].end(); i++ )
+    positives.insert ( *i );
+
+  int Nneg = conf.gI("main", "nneg", 1 );
+  for ( map<int, set<int> >::const_iterator k = mysets.begin(); k != mysets.end(); k++ )
+  {
+    int classno = k->first;
+    if ( classno == positiveClass )
+      continue;
+    const set<int> & s = k->second;
+    uint ind = 0;
+    for ( set<int>::const_iterator i = s.begin(); (i != s.end() && ind < Nneg); i++,ind++  )
+      negatives.insert ( *i );
+  }
+  cerr << "Number of positive examples: " << positives.size() << endl;
+  cerr << "Number of negative examples: " << negatives.size() << endl;
+  
+  int nrExamplesForTraining(positives.size()+negatives.size());
+  
+  std::vector<NICE::SparseVector> dataMatrixSparse;
+  dataMatrixSparse.resize(nrExamplesForTraining);
+  
+  std::cerr << "data matrix prepared" << std::endl;
+  
+  int dim(data.njc-1);
+  
+  NICE::Vector labelsTrain(nrExamplesForTraining,0);
+  
+  std::map<int,int> indices;  // orig index, new index
+  
+  int counter(0);
+  for ( int i = 0; i < dim; i++ ) //walk over dimensions
+  {
+    for ( int j = data.jc[i]; j < data.jc[i+1] && j < data.ndata; j++ ) //walk over single features, which are sparsely represented
+    {
+      int example_index = data.ir[ j];
+      std::set<int>::const_iterator itPos = positives.find(example_index);
+      std::set<int>::const_iterator itNeg = negatives.find(example_index);
+      if ( itPos != positives.end() )
+      {
+        std::map<int,int>::const_iterator newPosition = indices.find(example_index);
+        
+        //feature already known from a different dimension
+        if (newPosition != indices.end())       
+          dataMatrixSparse[newPosition->second].insert(pair<short,double>((short)i , ((double*)data.data)[j]));
+        //new feature, previous dimension where sparse for it
+        else
+        {
+          indices.insert(pair<int,int>(example_index,counter));
+          dataMatrixSparse[counter].insert(pair<short,double>((short)i , ((double*)data.data)[j]));
+          
+          //set the label-vector to +1 for this feature
+          labelsTrain[counter] = 1;
+
+          counter++;
+        }
+        
+
+      }
+      else if ( itNeg != negatives.end())
+      {
+        std::map<int,int>::const_iterator newPosition = indices.find(example_index);
+        
+        //feature already known from a different dimension
+        if (newPosition != indices.end())       
+          dataMatrixSparse[newPosition->second].insert(pair<short,double>((short)i , ((double*)data.data)[j]));
+        //new feature, previous dimension where sparse for it
+        else
+        {
+          indices.insert(pair<int,int>(example_index,counter));
+          dataMatrixSparse[counter].insert(pair<short,double>((short)i , ((double*)data.data)[j])); 
+          //label vector already contains -1
+          counter++;
+        }
+      }
+    }
+  }
+  
+  std::cerr << "data read completely" << std::endl;
+  
+  for (int i = 0; i < dataMatrixSparse.size(); i++)
+  {
+    dataMatrixSparse[i].setDim(dim);
+  }
+ 
+  std::cerr << "preparations done, start timing experiments" << std::endl;
+ 
+  Timer t;
+  t.start();
+  //standard training comes here
+  NICE::IntersectionKernelFunction<double> hik;
+  
+  std::cerr << "compute kernel matrix will be called" << std::endl;
+  NICE::Matrix K (hik.computeKernelMatrix(dataMatrixSparse, noise));
+  std::cerr << "kernel matrix succesfully computed" << std::endl;
+  
+  OBJREC::KCGPRegression classifier ( &conf);
+
+  std::cerr << "start teaching" << std::endl;
+  
+  classifier.teach ( new KernelData ( &conf, K ), labelsTrain );
+  
+  t.stop();
+  cerr << "Time used for training: " << t.getLast() << endl;
+
+  
+  //end of standard training
+  
+  // ------------------------------ TESTING ------------------------------
+ 
+  cerr << "Reading ImageNet test data files (takes some seconds)..." << endl;
+  imageNet.preloadData ( "val", "testing" );
+  imageNet.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );
+ 
+  ClassificationResults results;
+  cerr << "Classification step ... with " << imageNet.getNumPreloadedExamples() << " examples" << endl;
+  ProgressBar pb;
+  
+  NICE::Matrix confMat(2,2,0.0);
+  
+  for ( uint i = 0 ; i < (uint)imageNet.getNumPreloadedExamples(); i++ )
+  {
+    pb.update ( imageNet.getNumPreloadedExamples() );
+
+    const SparseVector & svec = imageNet.getPreloadedExample ( i );
+
+    t.start();
+    // classification step    
+    Vector kernelVector = hik.computeKernelVector(dataMatrixSparse,svec);
+    double kernelSelf = hik.measureDistance(svec, svec);
+    ClassificationResult r = classifier.classifyKernel ( kernelVector, kernelSelf );
+    
+    t.stop();
+//     cerr << i << " / " << (uint)imageNet.getNumPreloadedExamples() << " Time used for classifying a single example: " << t.getLast() << endl;
+    
+    // set ground truth label
+    r.classno_groundtruth = (((int)imageNet.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+    results.push_back ( r );
+    
+    confMat( r.classno_groundtruth, r.classno ) += 1;
+  }
+  
+  confMat.normalizeRowsL1();
+  std::cerr << "confMat: " << confMat << std::endl;
+  cerr << "average recognition rate: " << confMat.trace()/confMat.rows() << endl;
+
+  cerr << "Writing results to " << resultsfile << endl;
+  results.writeWEKA ( resultsfile, 0 );
+  double perfvalue = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+
+  cerr << "Performance: " << perfvalue << endl;
+
+  return 0;
+}

+ 103 - 0
progs/testImageNetMedian.cpp

@@ -0,0 +1,103 @@
+/** 
+* @file testImageNetMedian.cpp
+* @brief test our median prototype idea
+* @author Erik Rodner
+* @date 01/04/2012
+
+*/
+#include <core/basics/Config.h>
+#include <core/matlabAccess/MatFileIO.h>
+
+//----------
+
+#include <vislearning/matlabAccessHighLevel/ImageNetData.h>
+
+//----------
+
+#include <gp-hik-core/FMKGPHyperparameterOptimization.h>
+#include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
+#include <gp-hik-core/tools.h>
+
+using namespace std;
+using namespace NICE;
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  
+  
+//  std::string root = "/home/dbv/bilder/imagenet/devkit-1.0/demo/";
+  std::string root = "/users2/rodner/data/imagenet/devkit-1.0/demo/";
+  ImageNetData imageNet ( root );
+
+  cerr << "Reading ImageNet data files (takes some seconds)..." << endl;
+  NICE::Vector y;
+  sparse_t data;
+  imageNet.getBatchData ( data, y, "train", "training" );
+
+  uint n = y.size();
+  map<int, int> examples;
+  for ( uint i = 0 ; i < n; i++ )
+    examples.insert( pair<int, int> (i, i) );
+
+  y.resize(n);
+
+  cerr << "Sorting features ..." << endl;
+  FeatureMatrix fm ( data, examples );
+
+  // count the number of examples of each class
+  Vector elementCounts (y.Max()+1, 0.0);
+  for ( uint i = 0 ; i < n; i++ )
+    elementCounts[ y[i] ]++;
+
+  // calculate the median for each class
+  map<int, SparseVector> medians;
+  for ( int dim = 0 ; dim < fm.get_d(); dim++ )
+  {
+    cerr << "Calculating the median values for dimension " << dim << endl;
+    SparseVector classMedians;
+    fm.getFeatureValues(dim).getClassMedians ( classMedians, y, elementCounts );
+    for ( SparseVector::const_iterator i = classMedians.begin(); i != classMedians.end(); i++ )
+    {
+      int classno = i->first;
+      double medianValue = i->second;
+      medians[classno].insert ( pair<int, double> ( dim, medianValue ) );
+    }
+  }
+
+  // ------------------------------ TESTING ------------------------------
+ 
+  cerr << "Reading ImageNet test data files (takes some seconds)..." << endl;
+  imageNet.preloadData ( "val", "testing" );
+ 
+  for ( uint i = 0 ; i < imageNet.getNumPreloadedExamples(); i++ )
+  {
+    const SparseVector & svec = imageNet.getPreloadedExample ( i );
+    set< pair<double, int> > scores;
+    
+    // calculate the distance to each of the classes !
+    for ( map<int, SparseVector>::const_iterator k = medians.begin(); k != medians.end(); k++ )
+    {
+      const SparseVector & prototype = k->second;
+      int classno = k->first;
+      //double kval = prototype.minimumKernel ( svec );
+      double kval = prototype.innerProduct ( svec );
+      scores.insert ( pair<double, int> ( -1.0 * kval, classno ) );
+    }
+
+    cerr << "# groundtruth example: " << imageNet.getPreloadedLabel(i) << " " << i << " / " << imageNet.getNumPreloadedExamples() << endl;
+    uint nBestClasses = std::min( 5, (int)scores.size() );
+    uint kk = 0;
+    for ( set< pair<double, int> >::const_iterator k = scores.begin(); k != scores.end() && kk < nBestClasses; k++, kk++ )
+      cerr << k->second << " ";
+    cerr << endl;
+  }
+  
+  return 0;
+}

+ 266 - 0
progs/testLinsolvers.cpp

@@ -0,0 +1,266 @@
+/** 
+* @file testImageNetBinary.cpp
+* @brief perform ImageNet tests with binary classification
+* @author Erik Rodner
+* @date 01/04/2012
+
+*/
+#include "core/basics/Config.h"
+#include "core/algebra/IterativeLinearSolver.h"
+#include "core/algebra/PartialGenericMatrix.h"
+#include "core/algebra/GBCDSolver.h"
+#include "core/algebra/ILSConjugateGradients.h"
+#include <core/matlabAccess/MatFileIO.h>
+
+
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include "vislearning/baselib/ProgressBar.h"
+
+
+#include <vislearning/matlabAccessHighLevel/ImageNetData.h>
+
+#include <gp-hik-core/kernels/IntersectionKernelFunction.h>
+#include <gp-hik-core/tools.h>
+#include <gp-hik-core/GMHIKernel.h>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+void selectExamples ( const Config *conf, const Vector & y, map<int, int> & examples, Vector & yb )
+{
+  int positiveClass = conf->gI("main", "positive_class");
+
+  map< int, set<int> > mysets;
+  int n = y.size();
+  set<int> positives;
+  set<int> negatives;
+  for ( uint i = 0 ; i < n; i++ )
+    mysets[ y[i] ].insert ( i );
+
+  if ( mysets[ positiveClass ].size() == 0 ) 
+    fthrow(Exception, "Class " << positiveClass << " is not available.");
+
+  // add our positive examples
+  for ( set<int>::const_iterator i = mysets[positiveClass].begin(); i != mysets[positiveClass].end(); i++ )
+    positives.insert ( *i );
+
+  int Nneg = conf->gI("main", "nneg", 1 );
+  for ( map<int, set<int> >::const_iterator k = mysets.begin(); k != mysets.end(); k++ )
+  {
+    int classno = k->first;
+    if ( classno == positiveClass )
+      continue;
+    const set<int> & s = k->second;
+    uint ind = 0;
+    for ( set<int>::const_iterator i = s.begin(); (i != s.end() && ind < Nneg); i++,ind++  )
+      negatives.insert ( *i );
+  }
+  cerr << "Number of positive examples: " << positives.size() << endl;
+  cerr << "Number of negative examples: " << negatives.size() << endl;
+
+  yb.resize(y.size());
+  int ind = 0;
+  for ( uint i = 0 ; i < y.size(); i++ )
+  {
+    if (positives.find(i) != positives.end()) {
+      yb[ examples.size() ] = 1.0;
+      examples.insert( pair<int, int> ( i, ind ) );
+      ind++;
+    } else if ( negatives.find(i) != negatives.end() ) {
+      yb[ examples.size() ] = -1.0;
+      examples.insert( pair<int, int> ( i, ind ) );
+      ind++;
+    }
+  }
+  yb.resize( examples.size() );
+  cerr << "Examples: " << examples.size() << endl; 
+
+}
+
+class BlockHIK : public PartialGenericMatrix 
+{
+  protected:
+    const double *data;
+    int n;
+    int d;
+    double noise;
+    Vector diag;
+
+  public:
+  
+ 
+    BlockHIK ( const double *data, int n, int d, double noise ) { 
+      this->data = data; 
+      this->n = n; 
+      this->d = d; 
+      this->noise = noise; 
+
+      diag.resize(n);
+      for ( uint i = 0 ; i < n ; i++ ) 
+      {
+        double sum = 0.0;
+        for ( uint dim = 0 ; dim < d ; dim++ )
+          sum += data[i * d + dim];
+        diag[i] = sum;
+      }
+    }
+
+    /** multiply a sub-matrix with a given vector: Asub * xsub = ysub */
+    virtual void multiply ( const SetType & rowSet, const SetType & columnSet, NICE::Vector & y, const NICE::Vector & x) const
+    {
+      Matrix K;
+      
+      if ( rowSet.size() == 0 || columnSet.size() == 0 )
+        fthrow(Exception, "Sets are zero ...weird" );
+      K.resize(rowSet.size(), columnSet.size());
+      K.set(0.0);
+  
+      //run over every dimension and add the corresponding min-values to the entries in the kernel matrix
+      int dimension = d;
+      for (int dim = 0; dim < dimension; dim++)
+      {
+        int indi = 0;
+        for ( SetType::const_iterator i = rowSet.begin(); i != rowSet.end(); i++, indi++ )
+        {
+          int indj = 0;
+          int myi = *i;
+          double vali = data[ myi * d + dim ];
+          for ( SetType::const_iterator j = columnSet.begin(); j != columnSet.end(); j++, indj++ )
+          {  
+            int myj = *j;
+            double valj = data[ myj * d + dim ];
+            double val = std::min ( valj, vali );
+
+            if ( indi >= K.rows() || indj >= K.cols() )
+              fthrow(Exception, "... weird indices!!" );
+            K(indi,indj) += val;
+            if ( myi == myj )
+              K(indi, indj) += noise / dimension;
+          } 
+        } 
+      }//dim-loop  
+
+      y.resize( rowSet.size() );
+      y = K*x;
+    }
+
+    /** multiply with a vector: A*x = y */
+    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const
+    {
+      fthrow(Exception, "You do not really want to compute kernel matrices as big as this one!");
+    }
+
+    virtual double getDiagonalElement ( uint i ) const
+    {
+      return diag[i] + noise;
+    }
+
+    virtual uint rows() const
+    {
+      return n;
+    }
+
+    virtual uint cols() const
+    {
+      return n;
+    }
+
+};
+
+double *createFlatData ( const FeatureMatrix & f )
+{
+  int n = f.get_n();
+  int d = f.get_d();
+  double *data = new double [ n * d ]; 
+  memset ( data, 0, n*d*sizeof(double) );
+
+  for (int dim = 0; dim < d; dim++)
+  {
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = f.getFeatureValues(dim).nonzeroElements();
+    int nrZeroIndices = f.getNumberOfZeroElementsPerDimension(dim);
+    if ( nrZeroIndices == n ) continue;
+      
+    for ( multimap< double, SortedVectorSparse<double>::dataelement>::const_iterator i = nonzeroElements.begin(); i != nonzeroElements.end(); i++)
+    {
+      const SortedVectorSparse<double>::dataelement & de = i->second;
+      uint feat = de.first;
+      double fval = de.second;
+      data[ feat*d + dim ] = fval;
+    }
+  }
+  return data;
+}
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  int positiveClass = conf.gI("main", "positive_class");
+
+  cerr << "Positive class is " << positiveClass << endl;
+  
+  sparse_t data;
+  NICE::Vector y;
+  cerr << "Reading ImageNet data ..." << endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNet ( imageNetPath + "demo/" );
+
+  imageNet.getBatchData ( data, y, "train", "training" );
+
+  map<int, int> examples;
+  Vector yb;
+  selectExamples ( &conf, y, examples, yb );
+  
+  double noise = conf.gD("main", "noise", 10);
+  int dimension = conf.gI("main", "dimension", 1000);
+  int numBins = conf.gI("main", "num_bins", 100);
+  Quantization q ( numBins );
+  FastMinKernel fmk ( data, noise, examples, dimension );
+   
+  GMHIKernel gmk ( &fmk );
+
+  bool verbose = true;
+  int max_iterations = 500;
+  vector< IterativeLinearSolver * > methods;
+
+  ILSConjugateGradients *m = new ILSConjugateGradients(verbose, max_iterations);
+  m->setTimeAnalysis ( true );
+  methods.push_back ( m );
+
+  for ( vector< IterativeLinearSolver * >::const_iterator i = methods.begin();
+        i != methods.end(); i++ ) 
+  {
+    IterativeLinearSolver *method = *i;
+    Vector sol (gmk.cols(), 0.0);
+    method->solveLin ( gmk, yb, sol );
+  }
+
+  Vector sol ( gmk.cols(), 0.0 );
+  double *Tlookup = fmk.solveLin( yb, sol, q, NULL, true /* useRandomSubsets */, 100 /* max iterations */, -1, 0.0, true);
+
+  
+  int randomSetSize = conf.gI("main", "random_set_size", 60);
+  int stepComponents = conf.gI("main", "step_components", 50);
+  GBCDSolver gbcd ( randomSetSize, stepComponents, true );
+  gbcd.setTimeAnalysis(true);
+  Vector sol_gbcd;
+  double *cdata = createFlatData ( fmk.featureMatrix() );
+  BlockHIK bhik ( cdata, fmk.get_n(), fmk.get_d(), noise );
+  gbcd.solveLin ( bhik, yb, sol_gbcd );
+
+  delete [] cdata;
+
+  return 0;
+}

+ 131 - 0
progs/testLogDetApproximation.cpp

@@ -0,0 +1,131 @@
+/** 
+* @file testLogDetApproximation.cpp
+* @brief 
+* @author Alexander Freytag
+* @date 05-01-2012 (dd-mm-yyyy)
+*/
+
+#include <iostream>
+#include <cstdlib>
+#include <ctime>
+
+#include "core/vector/MatrixT.h"
+#include "core/vector/VectorT.h"
+
+#include "core/algebra/GenericMatrix.h"
+#include "core/algebra/EigValuesTRLAN.h"
+#include "core/algebra/GMStandard.h"
+
+#include "gp-hik-core/tools.h"
+
+#include "gp-hik-core/algebra/LogDetApproxBaiAndGolub.h"
+
+
+using namespace std;
+using namespace NICE;
+
+/**
+ * @brief Printing main menu.
+ * @author Alexander Freytag
+ * @date 12/06/2011
+ * 
+ * @return void
+ **/
+void print_main_menu()
+{
+  std::cerr << std::endl << " - test program for logDet Approximation" << std::endl;
+  std::cerr << "Input options:" << std::endl;
+  std::cerr << "   -n <number>  dimension of K"<< std::endl;
+  std::cerr << "   -v 1/0  verbose mode"<< std::endl;
+  return;
+}
+
+int main (int argc, char* argv[])
+{
+  int n (5);
+  bool verbose(false);
+  
+  int rc;
+  if (argc<2)
+  {
+    print_main_menu();
+    return -1;
+  }
+  
+  while ((rc=getopt(argc,argv,"n:v:h"))>=0)
+  {
+    switch(rc)
+    {
+      case 'n': n = atoi(optarg); break;
+      case 'v': verbose = atoi(optarg); break;
+      default: print_main_menu();
+    }
+  }
+  
+  if (verbose)
+  {
+    std::cerr << "Testing logDet Approximation for n = " << n << std::endl;
+  }
+  
+  srand ( time(NULL) );
+  
+  NICE::Matrix ARand(generateRandomMatrix(n,n));
+  NICE::Matrix A;
+  // A shall be positive definite
+  A.multiply(ARand, ARand, true);
+  
+  NICE::GMStandard genericA(A);
+  
+  //compute GT LogDet based on eigenvalues of A
+  NICE::Vector eigenvalues;
+  NICE::Matrix eigenvectors;
+  try
+  {
+    NICE::EigValuesTRLAN eigValuesComputation;
+    eigValuesComputation.getEigenvalues(genericA, eigenvalues,eigenvectors, n );
+  }
+  catch (...)
+  {
+    NICE::EVArnoldi eigValuesComputation;
+    eigValuesComputation.getEigenvalues(genericA, eigenvalues,eigenvectors, n );
+  }
+  
+  
+  double logDetGT(0.0);
+  for (int i = 0; i < n; i++)
+  {
+    logDetGT += log(eigenvalues[i]);
+  }
+  
+  if (verbose)
+  {
+    std::cerr << "GT logDet: " << logDetGT << std::endl;
+  }
+  
+  //replace this later on using only the k largest eigenvalues
+  double frobNorm(A.squaredFrobeniusNorm());
+  
+  NICE::LogDetApproxBaiAndGolub logDetApproximator;
+  double logDetApprox(logDetApproximator.getLogDetApproximation(A.trace(), frobNorm, eigenvalues.Max(), eigenvalues.Min(), n ) );
+  
+  if (verbose)
+  {
+    std::cerr << "logDetApprox: " << logDetApprox << std::endl;
+  }
+  
+  double logDetApproxUpperBound(logDetApproximator.getLogDetApproximationUpperBound(A.trace(), frobNorm, eigenvalues.Max(), n ) );
+  
+  if (verbose)
+  {
+    std::cerr << "logDetApproxUpperBound: " << logDetApproxUpperBound << std::endl;
+  }
+  
+  double logDetApproxLowerBound(logDetApproximator.getLogDetApproximationUpperBound(A.trace(), frobNorm, eigenvalues.Min(), n ) );
+  
+  if (verbose)
+  {
+    std::cerr << "logDetApproxLowerBound: " << logDetApproxLowerBound << std::endl;
+  }
+  
+  return 0;
+}

+ 95 - 0
progs/testWackerOptimization.cpp

@@ -0,0 +1,95 @@
+/** 
+* @file testWackerOptimization.cpp
+* @brief test the downhill simplex method on a toy example
+* @author Erik Rodner
+* @date 01/31/2012
+
+*/
+#include "core/basics/Config.h"
+
+#include "optimization/DownhillSimplexOptimizer.h"
+#include "optimization/GoldenCutLineSearcher.h"
+#include "optimization/FileLog.h"
+
+using namespace std;
+using namespace NICE;
+
+const int dim = 1;
+
+class MyCostFunction : public CostFunction
+{
+  public: 
+  
+   MyCostFunction() : CostFunction(dim)
+   {
+   }
+
+   virtual double evaluate(const optimization::matrix_type & x)
+   {
+     double f;
+     cerr << x.rows() << " x " << x.cols() << endl;
+     if ( x.rows() == 1 )
+     {
+       cerr << "current position: " << x[0][0] << endl;
+       f = pow(x[0][0] - 0.3, 2.0);
+       cerr << "function value: " << f << endl;
+
+     } else {
+       cerr << "current position: " << x[0][0] << " " << x[1][0] << endl;
+       f = pow(x[0][0] - 0.3, 2.0) + pow( x[1][0] - 0.2, 2.0 );
+       cerr << "function value: " << f << endl;
+     }
+     return f;
+   }
+
+
+};
+
+/** 
+    
+    test the downhill simplex method on a toy example 
+    
+*/
+int main (int argc, char **argv)
+{   
+    std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+    Config conf ( argc, argv );
+    
+    
+    CostFunction *func = new MyCostFunction(); 
+
+    optimization::matrix_type initialParams (dim, 1);
+    initialParams.Set(0.8);
+    if ( dim == 2 ) 
+      cerr << initialParams[0][0] << " " << initialParams[1][0] << endl;
+
+    optimization::matrix_type scales (dim, 1);
+    scales.Set(1.0);
+
+    SimpleOptProblem optProblem ( func, initialParams, scales );
+ 
+    bool useDownhill = conf.gB("main", "use_downhill", true ); 
+    if ( useDownhill ) {
+      DownhillSimplexOptimizer optimizer;
+      optimizer.setMaxNumIter(true, conf.gI("main", "max_iterations", 10));
+      optimizer.optimizeProb ( optProblem );
+      
+      /* Contraints are not working for DownhillSimplexOptimizer 
+      optProblem.setUpperBound(0, 1.0);
+      optProblem.setLowerBound(0, 0.0);
+      optProblem.setUpperBound(1, 1.0);
+      optProblem.setLowerBound(1, 0.0);  
+      */
+
+    } else {
+      FileLog fl ("/tmp/optimizer.log");
+      GoldenCutLineSearcher optimizer ( func, &fl );
+      optimizer.setBounds(0.0, 1.0);
+      optimizer.optimize();
+    }
+    
+    
+    delete func;
+    return 0;
+}

+ 89 - 0
tests/Makefile.inc

@@ -0,0 +1,89 @@
+# BINARY-DIRECTORY-MAKEFILE
+# conventions:
+# - there are no subdirectories, they are ignored!
+# - all ".C", ".cpp" and ".c" files in the current directory are considered
+#   independent binaries, and linked as such.
+# - the binaries depend on the library of the parent directory
+# - the binary names are created with $(BINNAME), i.e. it will be more or less
+#   the name of the .o file
+# - all binaries will be added to the default build list ALL_BINARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+#SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+#include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# include the libdepend.inc file, which gives additional dependencies for the
+# libraries and binaries. additionally, an automatic dependency from the library
+# of the parent directory is added (commented out in the code below).
+
+-include $(SUBDIR)libdepend.inc
+
+PARENTDIR:=$(patsubst %/,%,$(dir $(patsubst %/,%,$(SUBDIR))))
+$(call PKG_DEPEND_INT,$(PARENTDIR))
+$(call PKG_DEPEND_EXT,CPPUNIT)
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+      $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+CHECKS:=$(BINDIR)$(call LIBNAME,$(SUBDIR))
+ALL_CHECKS+=$(CHECKS)
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. each binary depends on the corresponding .o file and
+# on the libraries specified by the INTLIBS/EXTLIBS. these dependencies can be
+# specified manually or they are automatically stored in a .bd file.
+
+$(foreach head,$(wildcard $(SUBDIR)*.h),$(eval $(shell grep -q Q_OBJECT $(head) && echo $(head) | sed -e's@^@/@;s@.*/\(.*\)\.h$$@$(BINDIR)\1:$(OBJDIR)moc_\1.o@')))
+$(eval $(foreach c,$(CHECKS),$(c):$(BUILDDIR)$(CPPUNIT_MAIN_OBJ) $(OBJS) $(call PRINT_INTLIB_DEPS,$(c),.a)))
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 536 - 0
tests/TestGPHIKClassifier.cpp

@@ -0,0 +1,536 @@
+#ifdef NICE_USELIB_CPPUNIT
+
+#include <string>
+#include <exception>
+#include <iostream>
+#include <fstream>
+
+//----------
+
+#include <core/basics/Timer.h>
+
+//----------
+
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
+
+//----------
+
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+//----------
+
+#include "TestGPHIKClassifier.h"
+
+
+const bool verbose = false;
+const bool verboseStartEnd = true;
+
+using namespace OBJREC;
+using namespace NICE;
+using namespace std;
+
+CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKClassifier );
+
+void TestGPHIKClassifier::setUp() {
+}
+
+void TestGPHIKClassifier::tearDown() {
+}
+
+void myClassifierTest( GPHIKClassifierNICE & classifier, const Matrix & mX, const Vector & vY )
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::myClassifierTest ===================== " << std::endl;
+  
+  Examples examples;
+
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+    if ( i % 2 == 1 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      examples.push_back ( pair<int, Example> ( vY[i], example ) );
+    }
+
+  FeaturePool fp; // will be ignored
+
+  if ( verbose )
+    std::cerr << "preparation done." << std::endl;
+
+  if ( verbose ) 
+    std::cerr << "learning ..." << std::endl;
+  classifier.train ( fp, examples ); 
+
+  if ( verbose )
+    std::cerr << "testing ..." << std::endl;
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+    if ( i % 2 == 0 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      ClassificationResult r = classifier.classify ( example );
+      if (verbose)
+      {
+        r.scores >> std::cerr;
+        std::cerr << "predicted uncertainty: " << r.uncertainty << std::endl;
+      }
+    } 
+
+  examples.clean();
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::myClassifierTest done ===================== " << std::endl;
+
+}
+
+void myClassifierStoreRestoreTest( GPHIKClassifierNICE & classifier, const Matrix & mX, const Vector & vY )
+{
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::myClassifierStoreRestoreTest ===================== " << std::endl;
+  
+  Examples examples;
+
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+    if ( i % 2 == 1 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      examples.push_back ( pair<int, Example> ( vY[i], example ) );
+    }
+
+  FeaturePool fp; // will be ignored
+
+  if ( verbose )
+    std::cerr << "preparation done." << std::endl;
+
+  if ( verbose ) 
+    std::cerr << "learning ..." << std::endl;
+  classifier.train ( fp, examples ); 
+  
+  if ( verbose ) 
+    std::cerr << "storing ..." << std::endl;  
+  //test the store-functionality  
+  string destination("/tmp/GPHIK_store.txt");
+  
+  std::filebuf fb;
+  fb.open (destination.c_str(),ios::out);
+  std::ostream os(&fb);
+//   
+  classifier.store(os);  
+//   
+  fb.close();
+  
+  if ( verbose ) 
+    std::cerr << "loading ..." << std::endl;  
+  
+  Config confTmp;
+  GPHIKClassifierNICE classifierRestored(&confTmp);
+  
+  std::filebuf fbIn;
+  fbIn.open (destination.c_str(),ios::in);
+  std::istream is(&fbIn);
+//   
+  classifierRestored.restore(is);
+//   
+  fbIn.close();    
+
+  if ( verbose )
+    std::cerr << "testing ..." << std::endl;
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+    if ( i % 2 == 0 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      ClassificationResult rOrig = classifier.classify ( example );
+      ClassificationResult rRestored = classifierRestored.classify ( example );
+      
+      //scores are of type FullVector
+      //we use the [] operator, since there are no iterators given in FullVector.h
+      bool equal(true);
+      for (int i = 0; i< rOrig.scores.size(); i++)
+      {
+        if ( fabs(rOrig.scores[i] - rRestored.scores[i]) > 10-6)
+        {
+          equal = false;
+          break;
+        }        
+      }
+      
+      CPPUNIT_ASSERT_EQUAL ( equal, true ); 
+    } 
+
+  examples.clean();
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::myClassifierStoreRestoreTest done ===================== " << std::endl;
+
+}
+
+void myClassifierILTest( GPHIKClassifierNICE & classifierRetrain, GPHIKClassifierNICE & classifierIL, const Matrix & mX, const Vector & vY )
+{
+ 
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::myClassifierILTest ===================== " << std::endl;
+  
+  Examples examples;
+  
+  if (verbose)
+    std::cerr << "vY: " << vY << std::endl;
+
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+  {
+    if ( i % 4 == 1 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      examples.push_back ( pair<int, Example> ( vY[i], example ) );
+    }
+  }
+
+  if (verbose)
+    std::cerr << "examples.size(): " << examples.size()  << std::endl;
+
+  FeaturePool fp; // will be ignored
+
+  if ( verbose )
+    std::cerr << "preparation done." << std::endl;
+
+  if ( verbose ) 
+    std::cerr << "learning ..." << std::endl;
+  classifierIL.train ( fp, examples ); 
+  
+  //choose next example(s)
+  
+  Examples newExamples;
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+  {
+    if ( i % 4 == 3 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      newExamples.push_back ( pair<int, Example> ( vY[i], example ) );
+    }  
+  }
+
+//   if ( verbose ) 
+    std::cerr << std::endl << " =============== " << std::endl << "incremental learning ..." << std::endl;
+  
+  // add them to classifierIL
+//   std::cerr << "We add several new examples" << std::endl;
+  Timer t;
+  t.start();  
+//   for (uint i = 0; i < newExamples.size(); i++)
+  for (uint i = 0; i < 1; i++)
+  {
+    classifierIL.addExample( newExamples[i].second, newExamples[i].first);      
+  }  
+  
+  t.stop();  
+  std::cerr << "Time used for incremental training: " << t.getLast() << std::endl;
+
+  //add the new features to feature pool needed for batch training
+//   for (uint i = 0; i < newExamples.size(); i++)
+  for (uint i = 0; i < 2; i++)
+  {  
+    examples.push_back( newExamples[i] );
+  }
+  
+  std::cerr << std::endl << " =============== " << std::endl << "We train the second classifier from the scratch with the additional new example" << std::endl;
+  t.start(); 
+  
+  classifierRetrain.train ( fp, examples );  
+  
+  t.stop();  
+  std::cerr << "Time used for batch training: " << t.getLast() << std::endl;  
+  
+  //evaluate both and compare the resulting scores
+//  if ( verbose )
+    std::cerr << "testing ..." << std::endl;
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+    if ( i % 2 == 0 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      ClassificationResult resultIL = classifierIL.classify ( example );
+      ClassificationResult resultBatch = classifierRetrain.classify ( example );
+      
+      if (verbose)
+      {
+        std::cerr << "result of IL classifier: " << std::endl;
+        resultIL.scores >> std::cerr;
+        
+        std::cerr << "result of batch classifier: " << std::endl;
+        resultBatch.scores >> std::cerr;
+      }
+      
+      //scores are of type FullVector
+      //we use the [] operator, since there are no iterators given in FullVector.h
+      bool equal(true);
+      for (int i = 0; i< resultIL.scores.size(); i++)
+      {
+        if ( fabs(resultIL.scores[i] - resultBatch.scores[i]) > 10e-3)
+        {
+          equal = false;
+          break;
+        }        
+      }
+      
+      CPPUNIT_ASSERT_EQUAL ( equal, true ); 
+    } 
+
+  examples.clean();
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::myClassifierILTest done ===================== " << std::endl;
+}
+
+void TestGPHIKClassifier::testGPHIKClassifier() 
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::testGPHIKClassifier ===================== " << std::endl;
+
+  Config conf;
+  conf.sD( "GPHIKClassifier", "noise", 0.01 );
+  conf.sD( "GPHIKClassifier", "parameter_lower_bound", 0.5 );
+  conf.sD( "GPHIKClassifier", "parameter_upper_bound", 3.5 );
+  conf.sI( "GPHIKClassifier", "uncertaintyPrediction", 1);
+//   conf.sS( "GPHIKClassifier", "optimization_method", "none");
+  conf.sS( "GPHIKClassifier", "optimization_method", "downhillsimplex");
+  conf.sB( "GPHIKClassifier", "uncertaintyPredictionForClassification", true);
+
+  GPHIKClassifierNICE * classifier  = new GPHIKClassifierNICE ( &conf );
+  
+  Matrix mX;
+  Vector vY;
+  Vector vY_multi;
+
+//   ifstream ifs ("toyExample1.data", ios::in);
+//   ifstream ifs ("toyExampleLargeScale.data", ios::in);
+  ifstream ifs ("toyExampleLargeLargeScale.data", ios::in);
+  CPPUNIT_ASSERT ( ifs.good() );
+  ifs >> mX;
+  ifs >> vY;
+  ifs >> vY_multi;
+  ifs.close();
+  
+  if (verbose)
+  {
+    std::cerr << "data loaded: mX" << std::endl;
+    std::cerr << mX << std::endl;
+    std::cerr << "vY: " << std::endl;
+    std::cerr << vY << std::endl;
+    std::cerr << "vY_multi: " << std::endl;
+    std::cerr << vY_multi << std::endl;
+  }
+
+  if ( verbose )
+    std::cerr << "Binary classification test " << std::endl; 
+
+  myClassifierTest ( *classifier, mX, vY );
+  
+  // ... we remove nothing here since we are only interested in store and restore :)
+  myClassifierStoreRestoreTest ( *classifier, mX, vY );
+  
+  // ... remove previously computed things and start again, this time with incremental settings
+  if (classifier != NULL)
+    delete classifier;
+  
+  classifier  = new GPHIKClassifierNICE ( &conf );
+    GPHIKClassifierNICE * classifierBatch = new GPHIKClassifierNICE ( &conf ); 
+  
+  myClassifierILTest( *classifierBatch, *classifier, mX, vY );
+  
+  if (classifier != NULL)
+    delete classifier;
+  if (classifierBatch != NULL)
+    delete classifierBatch;
+  
+  classifier  = new GPHIKClassifierNICE ( &conf );
+  classifierBatch = new GPHIKClassifierNICE ( &conf );  
+
+  if ( verbose )
+    std::cerr << "Multi-class classification test " << std::endl; 
+  myClassifierTest ( *classifier, mX, vY_multi );
+  
+  // ... we remove nothing here since we are only interested and store and restore :)
+//   
+//   myClassifierStoreRestoreTest ( classifier, mX, vY_multi );
+  
+  // ... remove previously computed things and start again, this time with incremental settings
+  if (classifier != NULL)
+    delete classifier;
+  if (classifierBatch != NULL)
+    delete classifierBatch;
+  
+  classifier  = new GPHIKClassifierNICE ( &conf );
+  classifierBatch = new GPHIKClassifierNICE ( &conf ); 
+  
+  myClassifierILTest( *classifierBatch, *classifier, mX, vY_multi );
+  
+  if (classifier != NULL)
+    delete classifier;
+  if (classifierBatch != NULL)
+    delete classifierBatch;  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::testGPHIKClassifier done ===================== " << std::endl;
+ 
+}
+
+void TestGPHIKClassifier::testGPHIKVariance()
+{
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::testGPHIKVariance ===================== " << std::endl;
+
+  double noise (0.01);
+  
+  Config conf;
+  conf.sD( "GPHIKClassifier", "noise", noise );
+  conf.sD( "GPHIKClassifier", "parameter_lower_bound", 1.0 );
+  conf.sD( "GPHIKClassifier", "parameter_upper_bound", 1.0 );
+  conf.sS( "GPHIKClassifier", "varianceApproximation", "approximate_rough");
+  conf.sB( "GPHIKClassifier", "learn_balanced", true);
+  conf.sB( "GPHIKClassifier", "uncertaintyPredictionForClassification", true);
+  
+  GPHIKClassifierNICE classifier ( &conf );
+  
+  Config confVarApproxQuant(conf);
+  confVarApproxQuant.sB( "GPHIKClassifier", "use_quantization", true );
+  GPHIKClassifierNICE classifierQuant ( &confVarApproxQuant );  
+
+  Config confVarApproxFine1(conf);
+  confVarApproxFine1.sS( "GPHIKClassifier", "varianceApproximation", "approximate_fine");  
+  confVarApproxFine1.sI( "GPHIKClassifier", "nrOfEigenvaluesToConsiderForVarApprox", 1);
+  
+  GPHIKClassifierNICE classifierVarApproxFine1 ( &confVarApproxFine1 );  
+
+  Config confVarApproxFine2(conf);
+  confVarApproxFine2.sS( "GPHIKClassifier", "varianceApproximation", "approximate_fine");    
+  confVarApproxFine2.sI( "GPHIKClassifier", "nrOfEigenvaluesToConsiderForVarApprox", 2);
+  
+  GPHIKClassifierNICE classifierVarApproxFine2 ( &confVarApproxFine2 );    
+  
+  Config confExact(conf);
+  confExact.sS( "GPHIKClassifier", "varianceApproximation", "exact");   
+  
+  GPHIKClassifierNICE classifierVarExact ( &confExact );
+  
+  NICE::Matrix mX;
+  NICE::Vector vY;
+  NICE::Vector vY_multi;
+
+  ifstream ifs ("toyExample2.data", ios::in);
+  CPPUNIT_ASSERT ( ifs.good() );
+  ifs >> mX;
+  ifs >> vY;
+  ifs >> vY_multi;
+  ifs.close();
+
+  if (verbose)
+  {
+    std::cerr << "data loaded: mX" << std::endl;
+    std::cerr << mX << std::endl;
+    std::cerr << "vY: " << std::endl;
+    std::cerr << vY << std::endl;
+    std::cerr << "vY_multi: " << std::endl;
+    std::cerr << vY_multi << std::endl;
+  }
+  
+  Examples examples;
+
+  for ( uint i = 0 ; i < vY.size() ; i++ )
+    if ( i % 2 == 0 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      examples.push_back ( pair<int, Example> ( vY_multi[i], example ) );
+    }
+
+  FeaturePool fp; // will be ignored
+
+  if ( verbose )
+    std::cerr << "preparation for variance testing done." << std::endl;
+
+  if ( verbose ) 
+    std::cerr << "learning for variance testing ..." << std::endl;
+  classifier.train ( fp, examples ); 
+  classifierQuant.train ( fp, examples );
+  classifierVarApproxFine1.train ( fp, examples ); 
+  classifierVarApproxFine2.train ( fp, examples ); 
+  classifierVarExact.train ( fp, examples ); 
+
+  if ( verbose )
+    std::cerr << "testing for variance testing ..." << std::endl;
+  
+  for ( uint i = 0 ; i < vY_multi.size() ; i++ )
+    if ( i % 2 == 1 )
+    {
+      Example example;
+      example.svec = new SparseVector;
+      example.svec->setDim(3);
+      example.svec->set ( 0, mX(i,0) );
+      example.svec->set ( 1, mX(i,1) );
+      example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
+      ClassificationResult r = classifier.classify ( example );
+      ClassificationResult rQuant = classifierQuant.classify ( example );
+      ClassificationResult rVarApproxFine1 = classifierVarApproxFine1.classify ( example );
+      ClassificationResult rVarApproxFine2 = classifierVarApproxFine2.classify ( example );
+      ClassificationResult rExact = classifierVarExact.classify ( example );
+      
+      if (verbose)
+      {
+        std::cerr << "approxUnc: " << r.uncertainty << " approxUncQuant: " << rQuant.uncertainty<< " approxUncFine1: " << rVarApproxFine1.uncertainty << " approxUncFine2: " << rVarApproxFine2.uncertainty << " exactUnc: " << rExact.uncertainty << std::endl;
+      }
+
+      CPPUNIT_ASSERT ( r.uncertainty <=  (1.0 + noise) ); //using the "standard" HIK, this is the upper bound
+      CPPUNIT_ASSERT ( r.uncertainty >  rVarApproxFine1.uncertainty);
+      CPPUNIT_ASSERT ( rQuant.uncertainty >  rVarApproxFine1.uncertainty);
+      CPPUNIT_ASSERT ( rVarApproxFine1.uncertainty >  rVarApproxFine2.uncertainty);
+      CPPUNIT_ASSERT ( rVarApproxFine2.uncertainty >  rExact.uncertainty);
+      
+    } 
+
+  examples.clean();  
+  
+  
+  if (verboseStartEnd)
+    std::cerr << "================== TestGPHIKClassifier::testGPHIKVariance done ===================== " << std::endl;
+  
+}
+
+#endif

+ 31 - 0
tests/TestGPHIKClassifier.h

@@ -0,0 +1,31 @@
+#ifndef _TESTGPHIKCLASSIFIER_H
+#define _TESTGPHIKCLASSIFIER_H
+
+#include <cppunit/extensions/HelperMacros.h>
+
+/**
+ * CppUnit-Testcase. 
+ */
+class TestGPHIKClassifier : public CppUnit::TestFixture {
+
+    CPPUNIT_TEST_SUITE( TestGPHIKClassifier );
+    
+    CPPUNIT_TEST(testGPHIKClassifier);
+    CPPUNIT_TEST(testGPHIKVariance);
+//     CPPUNIT_TEST(testGPHIKIncrementalLearning);
+    
+    CPPUNIT_TEST_SUITE_END();
+  
+ private:
+ 
+ public:
+    void setUp();
+    void tearDown();
+
+    void testGPHIKClassifier();
+    void testGPHIKVariance();
+//     void testGPHIKIncrementalLearning();
+
+};
+
+#endif // _TESTFASTHIK_H

BIN
tests/sparse20x30matrixM.mat


BIN
tests/sparse3x3matrixA.mat


+ 42 - 0
tests/toyExample1.data

@@ -0,0 +1,42 @@
+39 x 2
+0.1394    0.3699
+0.1210    0.3260
+0.1164    0.2588
+0.1210    0.2032
+0.1417    0.1886
+0.1624    0.2325
+0.1624    0.3319
+0.1509    0.3114
+0.1417    0.2412
+0.1417    0.2763
+0.1279    0.3173
+0.3537    0.3582
+0.3306    0.3056
+0.3306    0.2471
+0.3376    0.2061
+0.3583    0.1740
+0.3698    0.1564
+0.3790    0.2558
+0.3744    0.3173
+0.3698    0.3406
+0.3583    0.2646
+0.3629    0.1944
+0.3468    0.3173
+0.3329    0.2588
+0.3514    0.1974
+0.2224    0.3436
+0.2270    0.3348
+0.2293    0.2675
+0.2339    0.2237
+0.2316    0.1623
+0.2408    0.1857
+0.2615    0.2763
+0.2638    0.3436
+0.2592    0.3904
+0.2477    0.4284
+0.2224    0.3582
+0.2177    0.2909
+0.2224    0.2178
+0.2500    0.1213
+39 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+39 < 0 0 0 0 0 0 0 0 0 0 0 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >

+ 9 - 0
tests/toyExample2.data

@@ -0,0 +1,9 @@
+6 x 2
+0.1    0.3
+0.1    0.2
+0.3    0.3
+0.2    0.2
+0.4    0.1
+0.1    0.5
+6 < 0 0 0 1 1 1 >
+6 < 0 0 3 3 1 1 >

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 1502 - 0
tests/toyExampleLargeLargeScale.data


+ 604 - 0
tests/toyExampleLargeScale.data

@@ -0,0 +1,604 @@
+600 x 2
+0.342689 0.175671 
+0.30934 0.268245 
+0.283338 0.31431 
+0.322194 0.211048 
+0.27985 0.217818 
+0.253954 0.195404 
+0.435345 0.125535 
+0.312486 0.137852 
+0.268998 0.2357 
+0.254516 0.213503 
+0.315574 0.130796 
+0.39208 0.178418 
+0.262966 0.128216 
+0.258793 0.151603 
+0.32426 0.234761 
+0.292135 0.138279 
+0.331166 0.145197 
+0.3395 0.169078 
+0.283081 0.134257 
+0.2829 0.147042 
+0.257692 0.28673 
+0.31662 0.209292 
+0.274172 0.194507 
+0.368124 0.127353 
+0.378173 0.236568 
+0.313633 0.166162 
+0.308659 0.154215 
+0.307818 0.180172 
+0.306022 0.194493 
+0.256317 0.182305 
+0.253279 0.138247 
+0.286522 0.154934 
+0.343294 0.296202 
+0.435882 0.149799 
+0.266064 0.141986 
+0.362818 0.130809 
+0.424555 0.154075 
+0.312223 0.176338 
+0.346151 0.185167 
+0.303702 0.224405 
+0.250913 0.27087 
+0.278182 0.187886 
+0.305441 0.161418 
+0.390785 0.176948 
+0.419366 0.139879 
+0.298091 0.134676 
+0.311699 0.242829 
+0.293336 0.238481 
+0.349461 0.179128 
+0.294253 0.191256 
+0.255692 0.154904 
+0.273268 0.193811 
+0.376241 0.206254 
+0.329721 0.17215 
+0.331964 0.234789 
+0.335461 0.186645 
+0.296782 0.158248 
+0.368493 0.148484 
+0.255566 0.188169 
+0.343617 0.135276 
+0.252996 0.204645 
+0.285394 0.333677 
+0.313484 0.175742 
+0.250342 0.203408 
+0.266606 0.188748 
+0.283449 0.129172 
+0.340621 0.179734 
+0.315654 0.199744 
+0.27226 0.134784 
+0.296711 0.185527 
+0.253752 0.198492 
+0.257381 0.257684 
+0.346152 0.220506 
+0.36263 0.183317 
+0.278849 0.181596 
+0.301625 0.247397 
+0.318059 0.23282 
+0.271193 0.143659 
+0.40265 0.205326 
+0.457977 0.223787 
+0.277921 0.132572 
+0.44805 0.266026 
+0.292541 0.133553 
+0.320695 0.152126 
+0.293894 0.132603 
+0.310329 0.158675 
+0.308961 0.228526 
+0.310193 0.201196 
+0.357398 0.276934 
+0.362411 0.134546 
+0.252874 0.249074 
+0.323796 0.231816 
+0.258442 0.173894 
+0.343986 0.134667 
+0.356016 0.163639 
+0.322109 0.210639 
+0.28522 0.223836 
+0.396437 0.198424 
+0.283134 0.21192 
+0.279188 0.215173 
+0.260586 0.22736 
+0.329615 0.19164 
+0.339912 0.133774 
+0.257242 0.151432 
+0.353614 0.163562 
+0.332978 0.182046 
+0.302671 0.248665 
+0.259309 0.151224 
+0.318917 0.240108 
+0.344637 0.135684 
+0.256466 0.283143 
+0.356169 0.209122 
+0.251218 0.224075 
+0.424779 0.215246 
+0.372904 0.150395 
+0.428672 0.125709 
+0.391982 0.182144 
+0.26703 0.265749 
+0.266772 0.152864 
+0.418837 0.250821 
+0.303323 0.235758 
+0.311233 0.15944 
+0.390081 0.292144 
+0.289179 0.154131 
+0.269899 0.233753 
+0.292143 0.269953 
+0.389615 0.181187 
+0.281855 0.168289 
+0.355694 0.130023 
+0.258038 0.191685 
+0.322198 0.160255 
+0.265639 0.205397 
+0.266359 0.195618 
+0.291999 0.161498 
+0.287761 0.170072 
+0.264713 0.332262 
+0.294721 0.140154 
+0.273594 0.165844 
+0.310086 0.169887 
+0.341029 0.225881 
+0.316856 0.137035 
+0.300842 0.221668 
+0.301447 0.210899 
+0.292541 0.135141 
+0.282796 0.135598 
+0.267783 0.151061 
+0.461684 0.192769 
+0.311754 0.238481 
+0.252301 0.171746 
+0.370648 0.194599 
+0.363942 0.159229 
+0.353153 0.187895 
+0.343755 0.214295 
+0.35249 0.132681 
+0.321514 0.191171 
+0.32338 0.135597 
+0.365625 0.141555 
+0.33572 0.236221 
+0.255242 0.240287 
+0.272454 0.25177 
+0.260317 0.137604 
+0.293878 0.138076 
+0.262748 0.191504 
+0.329031 0.143135 
+0.338375 0.250212 
+0.345667 0.147506 
+0.309146 0.198383 
+0.282595 0.295251 
+0.262683 0.15159 
+0.296848 0.163558 
+0.264113 0.274616 
+0.338641 0.211817 
+0.259174 0.264645 
+0.330357 0.20687 
+0.353817 0.22874 
+0.269664 0.226656 
+0.252154 0.148463 
+0.366193 0.150144 
+0.256898 0.245194 
+0.304303 0.183618 
+0.335466 0.151312 
+0.262861 0.200441 
+0.262813 0.252586 
+0.313346 0.194787 
+0.289579 0.247262 
+0.286535 0.23699 
+0.310318 0.142124 
+0.341106 0.206294 
+0.273167 0.156972 
+0.269453 0.187743 
+0.355513 0.183233 
+0.263025 0.199449 
+0.313509 0.331514 
+0.311078 0.252023 
+0.281887 0.26323 
+0.255329 0.173521 
+0.300729 0.214255 
+0.286228 0.136099 
+0.299626 0.157784 
+0.271569 0.22316 
+0.300825 0.303776 
+0.27322 0.25126 
+0.176006 0.402724 
+0.226378 0.348555 
+0.168946 0.262155 
+0.139945 0.341302 
+0.141302 0.305834 
+0.15167 0.264065 
+0.13236 0.287971 
+0.259065 0.450122 
+0.167671 0.301213 
+0.232472 0.315405 
+0.318855 0.278831 
+0.149421 0.336895 
+0.167089 0.266261 
+0.125286 0.322987 
+0.186744 0.359308 
+0.181219 0.298146 
+0.162008 0.412922 
+0.142068 0.288868 
+0.20133 0.317385 
+0.152729 0.340693 
+0.156914 0.393993 
+0.151577 0.271511 
+0.137218 0.435257 
+0.135001 0.288495 
+0.233009 0.308706 
+0.253521 0.278079 
+0.126533 0.327627 
+0.129093 0.344601 
+0.271354 0.292011 
+0.228235 0.290139 
+0.213721 0.357127 
+0.152746 0.388868 
+0.137812 0.376055 
+0.247148 0.391889 
+0.199338 0.316814 
+0.19992 0.434137 
+0.265019 0.338816 
+0.138767 0.355017 
+0.139752 0.313471 
+0.217796 0.265376 
+0.152899 0.257636 
+0.248653 0.313653 
+0.154939 0.31371 
+0.235854 0.259526 
+0.165171 0.300912 
+0.246794 0.338431 
+0.203588 0.363351 
+0.155485 0.377965 
+0.211843 0.290398 
+0.306505 0.385808 
+0.261773 0.398547 
+0.194004 0.282203 
+0.176261 0.26052 
+0.188294 0.343489 
+0.234243 0.430868 
+0.181933 0.355282 
+0.170154 0.350051 
+0.161818 0.263494 
+0.302773 0.265246 
+0.168825 0.310823 
+0.164394 0.423268 
+0.29166 0.35488 
+0.271975 0.386961 
+0.296484 0.309649 
+0.196042 0.314222 
+0.145605 0.298324 
+0.255544 0.452838 
+0.189474 0.312347 
+0.176208 0.272894 
+0.16492 0.380216 
+0.187287 0.414524 
+0.178578 0.294622 
+0.278798 0.27663 
+0.132288 0.296908 
+0.254925 0.33015 
+0.350185 0.258513 
+0.16647 0.387784 
+0.155536 0.261762 
+0.31289 0.421124 
+0.1639 0.278125 
+0.299235 0.435447 
+0.126134 0.307695 
+0.163839 0.313053 
+0.143585 0.53421 
+0.162566 0.331135 
+0.220753 0.256421 
+0.219454 0.4336 
+0.19769 0.37137 
+0.131795 0.403685 
+0.180282 0.261803 
+0.196382 0.262449 
+0.20367 0.318381 
+0.130772 0.333474 
+0.180841 0.299823 
+0.214484 0.290828 
+0.138715 0.341963 
+0.251411 0.39227 
+0.125156 0.30578 
+0.266808 0.337032 
+0.240964 0.331971 
+0.175375 0.294612 
+0.179172 0.366302 
+0.147287 0.296443 
+0.164014 0.261311 
+0.273203 0.254742 
+0.136849 0.28521 
+0.213123 0.34695 
+0.173496 0.325799 
+0.292193 0.255454 
+0.138616 0.33484 
+0.25335 0.300546 
+0.158688 0.311034 
+0.145169 0.361547 
+0.128574 0.270011 
+0.15352 0.26367 
+0.159877 0.378762 
+0.140396 0.433171 
+0.133033 0.290889 
+0.163508 0.271152 
+0.210289 0.291615 
+0.14189 0.280736 
+0.149909 0.292447 
+0.180142 0.266672 
+0.144982 0.277738 
+0.159478 0.274755 
+0.164206 0.442762 
+0.178133 0.262889 
+0.166155 0.348706 
+0.290175 0.379262 
+0.154984 0.394628 
+0.250925 0.259417 
+0.141829 0.286385 
+0.173571 0.32318 
+0.155138 0.334199 
+0.19025 0.284642 
+0.132157 0.273714 
+0.169887 0.327512 
+0.231932 0.328859 
+0.163281 0.304052 
+0.145319 0.36004 
+0.144163 0.303037 
+0.158192 0.259722 
+0.198438 0.331068 
+0.127219 0.323939 
+0.155833 0.30954 
+0.190242 0.28389 
+0.199135 0.277733 
+0.321694 0.453193 
+0.141441 0.268926 
+0.281311 0.338708 
+0.189104 0.267739 
+0.133845 0.310823 
+0.209767 0.418156 
+0.297319 0.297564 
+0.161189 0.259427 
+0.213576 0.457596 
+0.270751 0.290435 
+0.201792 0.389826 
+0.135373 0.254834 
+0.133443 0.307913 
+0.146304 0.263914 
+0.254784 0.28866 
+0.205916 0.275338 
+0.196961 0.277155 
+0.239999 0.304274 
+0.172131 0.28929 
+0.145521 0.255641 
+0.25942 0.282277 
+0.167205 0.260999 
+0.169453 0.345352 
+0.255941 0.301047 
+0.264722 0.378455 
+0.133553 0.308037 
+0.137054 0.309238 
+0.20074 0.274192 
+0.250793 0.336116 
+0.162476 0.296901 
+0.137098 0.250421 
+0.193241 0.277141 
+0.185979 0.273677 
+0.17511 0.379876 
+0.149684 0.265748 
+0.225099 0.317336 
+0.132403 0.250674 
+0.13283 0.294247 
+0.158449 0.338396 
+0.252054 0.266546 
+0.154258 0.287316 
+0.223787 0.363484 
+0.160883 0.270353 
+0.152975 0.283687 
+0.237612 0.267854 
+0.18717 0.29144 
+0.174165 0.34938 
+0.165426 0.355092 
+0.287473 0.27884 
+0.128887 0.361068 
+0.179211 0.299544 
+0.215031 0.155091 
+0.142583 0.193322 
+0.276808 0.171428 
+0.1541 0.183927 
+0.194681 0.127557 
+0.128295 0.150629 
+0.235294 0.134568 
+0.201284 0.162832 
+0.314834 0.212242 
+0.142952 0.303737 
+0.195 0.152865 
+0.287761 0.163026 
+0.156109 0.155853 
+0.20319 0.275679 
+0.154476 0.216572 
+0.141193 0.151162 
+0.178573 0.150035 
+0.289051 0.328297 
+0.174799 0.175858 
+0.166596 0.15483 
+0.248603 0.15139 
+0.189713 0.18169 
+0.256645 0.128374 
+0.137268 0.213468 
+0.152469 0.125282 
+0.178565 0.209226 
+0.170197 0.194244 
+0.205242 0.14935 
+0.197247 0.173981 
+0.222782 0.185638 
+0.255122 0.138357 
+0.137221 0.181269 
+0.162759 0.136556 
+0.126264 0.173721 
+0.250943 0.187721 
+0.153073 0.14711 
+0.219836 0.248307 
+0.190877 0.288343 
+0.210659 0.223544 
+0.162835 0.133229 
+0.349274 0.263972 
+0.191313 0.167455 
+0.14183 0.183345 
+0.171238 0.243158 
+0.236826 0.155454 
+0.192282 0.141581 
+0.155562 0.137083 
+0.168371 0.216514 
+0.207958 0.286036 
+0.12849 0.227428 
+0.140926 0.162835 
+0.159604 0.134924 
+0.316663 0.133871 
+0.150814 0.125524 
+0.133106 0.196074 
+0.149622 0.144502 
+0.15218 0.196792 
+0.1625 0.220862 
+0.265263 0.279839 
+0.192861 0.147449 
+0.275367 0.136736 
+0.125005 0.217594 
+0.157708 0.130746 
+0.137828 0.2188 
+0.205963 0.284573 
+0.314706 0.265755 
+0.223344 0.184088 
+0.23382 0.250284 
+0.213944 0.131451 
+0.298037 0.21233 
+0.161291 0.127927 
+0.141618 0.222095 
+0.139736 0.286058 
+0.220316 0.175143 
+0.231751 0.134027 
+0.169706 0.258055 
+0.260142 0.143679 
+0.217887 0.240266 
+0.131439 0.174702 
+0.214523 0.15396 
+0.1768 0.227648 
+0.224522 0.230168 
+0.16444 0.13183 
+0.187441 0.266578 
+0.150256 0.169155 
+0.224854 0.163051 
+0.258784 0.161583 
+0.210855 0.186293 
+0.125887 0.30692 
+0.131581 0.155051 
+0.146777 0.145637 
+0.351453 0.17521 
+0.212764 0.187016 
+0.191882 0.18859 
+0.188817 0.224918 
+0.150217 0.141471 
+0.296471 0.317944 
+0.227911 0.172533 
+0.254149 0.182712 
+0.182149 0.212013 
+0.178375 0.191318 
+0.148463 0.174887 
+0.216658 0.261188 
+0.127068 0.215572 
+0.181236 0.211954 
+0.171546 0.137991 
+0.228025 0.188707 
+0.140536 0.254658 
+0.249636 0.130417 
+0.182512 0.229795 
+0.212421 0.228258 
+0.165297 0.137963 
+0.298921 0.147804 
+0.235353 0.280178 
+0.289942 0.15012 
+0.146599 0.23232 
+0.142329 0.161157 
+0.270889 0.212884 
+0.163608 0.134177 
+0.156306 0.230362 
+0.187909 0.144162 
+0.253675 0.15226 
+0.277975 0.241759 
+0.132958 0.140637 
+0.132167 0.281881 
+0.14486 0.179008 
+0.14129 0.173667 
+0.13792 0.196095 
+0.144878 0.181669 
+0.270531 0.275731 
+0.220914 0.170835 
+0.166041 0.191592 
+0.169549 0.195291 
+0.291418 0.174062 
+0.132584 0.211085 
+0.180231 0.238647 
+0.145224 0.208966 
+0.372008 0.140979 
+0.129183 0.246742 
+0.214086 0.129494 
+0.157116 0.270378 
+0.141035 0.149166 
+0.162474 0.246944 
+0.13349 0.206489 
+0.132278 0.182272 
+0.216111 0.187533 
+0.220175 0.300967 
+0.167512 0.145929 
+0.168184 0.321369 
+0.133267 0.234592 
+0.229341 0.131162 
+0.257111 0.218668 
+0.333441 0.333555 
+0.133784 0.186297 
+0.287145 0.18554 
+0.139222 0.142987 
+0.150074 0.223927 
+0.214144 0.1276 
+0.25632 0.226794 
+0.156132 0.185504 
+0.159565 0.135037 
+0.196861 0.22244 
+0.211956 0.176154 
+0.148823 0.188648 
+0.203664 0.150215 
+0.218453 0.216136 
+0.262688 0.1386 
+0.186142 0.217442 
+0.148249 0.21515 
+0.199327 0.125026 
+0.182995 0.187074 
+0.196654 0.246484 
+0.224754 0.176581 
+0.130524 0.173274 
+0.177737 0.137875 
+0.187153 0.126132 
+0.178623 0.232132 
+0.187313 0.153289 
+0.155405 0.143394 
+0.218375 0.326502 
+0.137907 0.187893 
+0.149386 0.260504 
+0.193591 0.134313 
+0.239484 0.221013 
+0.175538 0.146035 
+0.197115 0.160234 
+0.175092 0.211225 
+0.137077 0.129546 
+0.172193 0.304747 
+0.167678 0.208687 
+0.267804 0.163603 
+0.154224 0.12527 
+0.163461 0.150108 
+0.148395 0.22809 
+0.24221 0.142793 
+0.210785 0.228961 
+0.160274 0.131187 
+0.250532 0.191618 
+0.184075 0.192361 
+0.211521 0.193562 
+
+600 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 >
+600 < 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 >

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác