Browse Source

added Imagenet test with GP-baseline

Alexander Luetz 13 years ago
parent
commit
cef755e14b

+ 2 - 0
progs/ImagenetBinary.conf

@@ -5,3 +5,5 @@ positive_class = 1
 imageNetLocal = false
 imageNetLocal = false
 
 
 noise = 0.025
 noise = 0.025
+sigmaFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt

+ 2 - 1
progs/libdepend.inc

@@ -1,8 +1,9 @@
 $(call PKG_DEPEND_INT,core/basics)
 $(call PKG_DEPEND_INT,core/basics)
-$(call PKG_DEPEND_INT,vislearning/classifier/kernelclassifier)
+$(call PKG_DEPEND_INT,core/algebra)
 $(call PKG_DEPEND_INT,vislearning/math)
 $(call PKG_DEPEND_INT,vislearning/math)
 $(call PKG_DEPEND_INT,vislearning/baselib)
 $(call PKG_DEPEND_INT,vislearning/baselib)
 $(call PKG_DEPEND_INT,vislearning/cbaselib)
 $(call PKG_DEPEND_INT,vislearning/cbaselib)
+$(call PKG_DEPEND_INT,vislearning/classifier/kernelclassifier)
 $(call PKG_DEPEND_INT,fast-hik)
 $(call PKG_DEPEND_INT,fast-hik)
 
 
 
 

+ 104 - 103
progs/testImageNetBinaryBruteForce.cpp

@@ -1,12 +1,13 @@
 /** 
 /** 
 * @file testImageNetBinaryBruteForce.cpp
 * @file testImageNetBinaryBruteForce.cpp
-* @brief perform ImageNet tests with binary tasks for OCC
+* @brief perform ImageNet tests with binary tasks for OCC using the suggested approximation of the kernel matrix (diagonal matrix with row sums)
 * @author Alexander Lütz
 * @author Alexander Lütz
 * @date 23-05-2012 (dd-mm-yyyy)
 * @date 23-05-2012 (dd-mm-yyyy)
 
 
 */
 */
 #include "core/basics/Config.h"
 #include "core/basics/Config.h"
 #include "core/vector/SparseVectorT.h"
 #include "core/vector/SparseVectorT.h"
+#include "core/basics/Timer.h"
 
 
 #include "vislearning/cbaselib/ClassificationResults.h"
 #include "vislearning/cbaselib/ClassificationResults.h"
 #include "vislearning/baselib/ProgressBar.h"
 #include "vislearning/baselib/ProgressBar.h"
@@ -19,38 +20,11 @@ using namespace std;
 using namespace NICE;
 using namespace NICE;
 using namespace OBJREC;
 using namespace OBJREC;
 
 
-double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0, const bool & verbose = false)
+double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)//, const bool & verbose = false)
 {
 {
   double inner_sum(0.0);
   double inner_sum(0.0);
 
 
-  double s;
-  double d;    
-  
-  //this is the first version, where we needed on average 0.017988 s for each test sample
-//   std::set<int> set_a;
-//   
-//   
-//   for ( NICE::SparseVector::const_iterator i = a.begin(); i != a.end(); i++ )
-//   {
-//     double u (i->second);
-//     double v (b.get(i->first));
-//     s = ( u + v );
-//     if ( fabs(s) < 10e-6 ) continue;
-//     d = u-v;
-//     inner_sum += d*d;
-//     set_a.insert(i->first);
-//   }
-//   
-//   for ( NICE::SparseVector::const_iterator i = b.begin(); i != b.end(); i++ )
-//   {
-//     if (set_a.find(i->first) != set_a.end()) //already worked on in first loop
-//       continue;
-//     
-//     double u (i->second);
-//     if ( fabs(u) < 10e-6 ) continue;
-//     inner_sum += u*u;
-//   }
-  
+  double d;      
   
   
   //new version, where we needed on average 0.001707 s for each test sample
   //new version, where we needed on average 0.001707 s for each test sample
   NICE::SparseVector::const_iterator aIt = a.begin();
   NICE::SparseVector::const_iterator aIt = a.begin();
@@ -58,32 +32,21 @@ double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector
    
    
   while ( (aIt != a.end()) && (bIt != b.end()) )
   while ( (aIt != a.end()) && (bIt != b.end()) )
   {
   {
-//     std::cerr << "a: " << aIt->first << " b: " << bIt->first << std::endl;
     if (aIt->first == bIt->first)
     if (aIt->first == bIt->first)
     {
     {
-      s  = ( aIt->second + bIt->second );
-//       if (!  fabs(s) < 10e-6 ) //for numerical reasons
-//       {
-        d = ( aIt->second - bIt->second );      
-        inner_sum += d * d;
-//       }
+      d = ( aIt->second - bIt->second );      
+      inner_sum += d * d;
       aIt++;
       aIt++;
       bIt++;
       bIt++;
     }
     }
     else if ( aIt->first < bIt->first)
     else if ( aIt->first < bIt->first)
     {
     {
-//       if (! fabs(aIt->second) < 10e-6 )
-//       {
-        inner_sum += aIt->second * aIt->second;
-//       }
+      inner_sum += aIt->second * aIt->second;
       aIt++;      
       aIt++;      
     }
     }
     else
     else
     {
     {
-//       if (! fabs(bIt->second) < 10e-6 )
-//       {
-        inner_sum += bIt->second * bIt->second;
-//       }
+      inner_sum += bIt->second * bIt->second;
       bIt++;       
       bIt++;       
     }
     }
   }
   }
@@ -94,26 +57,37 @@ double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector
     inner_sum += aIt->second * aIt->second;
     inner_sum += aIt->second * aIt->second;
     aIt++; 
     aIt++; 
   }
   }
-  
   //compute remaining values, if a reached the end but not b
   //compute remaining values, if a reached the end but not b
   while (bIt != b.end())
   while (bIt != b.end())
   {
   {
     inner_sum += bIt->second * bIt->second;
     inner_sum += bIt->second * bIt->second;
     bIt++; 
     bIt++; 
   }  
   }  
-  
-  if (verbose)
-    std::cerr << "inner_sum before /= (2.0*sigma*sigma) " << inner_sum << std::endl;
 
 
   inner_sum /= (2.0*sigma*sigma);
   inner_sum /= (2.0*sigma*sigma);
   
   
-  if (verbose)
-    std::cerr << "inner_sum after /= (2.0*sigma*sigma) " << inner_sum << std::endl;
-  double expValue = exp(-inner_sum);
-  if (verbose)
-    std::cerr << "resulting expValue " << expValue << std::endl;
+  return exp(-inner_sum);
+}
 
 
-  return exp(-inner_sum); //expValue;
+void readParameters(const string & filename, const int & size, NICE::Vector & parameterVector)
+{
+  parameterVector.resize(size);
+  parameterVector.set(0.0);
+  
+  ifstream is(filename.c_str());
+  if ( !is.good() )
+    fthrow(IOException, "Unable to read parameters.");  
+//
+  string tmp;
+  int cnt(0);
+  while (! is.eof())
+  {
+    is >> tmp;
+    parameterVector[cnt] = atof(tmp.c_str());
+    cnt++;
+  }
+//   
+  is.close(); 
 }
 }
 
 
 
 
@@ -134,7 +108,20 @@ int main (int argc, char **argv)
   int nrOfClassesToConcidere = conf.gI("main", "nrOfClassesToConcidere", 1000);
   int nrOfClassesToConcidere = conf.gI("main", "nrOfClassesToConcidere", 1000);
   nrOfClassesToConcidere = std::min(nrOfClassesToConcidere, 1000); //we do not have more than 1000 classes
   nrOfClassesToConcidere = std::min(nrOfClassesToConcidere, 1000); //we do not have more than 1000 classes
 
 
-  std::cerr << "Positive class is " << positiveClass << std::endl;
+  string sigmaFile = conf.gS("main", "sigmaFile", "approxVarSigma.txt");  
+  string noiseFile = conf.gS("main", "noiseFile", "approxVarNoise.txt");  
+  
+  
+  NICE::Vector sigmaParas(nrOfClassesToConcidere,kernelSigma);
+  NICE::Vector noiseParas(nrOfClassesToConcidere,0.0);
+  
+  std::cerr << "try to read optimal sigmas from " << sigmaFile << std::endl;
+  readParameters(sigmaFile,nrOfClassesToConcidere, sigmaParas);
+  //------------
+  std::cerr << "try to read optimal noises from " << noiseFile << std::endl;
+  readParameters(noiseFile,nrOfClassesToConcidere, noiseParas);
+  
+  
   
   
   std::vector<SparseVector> trainingData;
   std::vector<SparseVector> trainingData;
   NICE::Vector y;
   NICE::Vector y;
@@ -162,52 +149,62 @@ int main (int argc, char **argv)
   imageNetTest.preloadData ( "val", "testing" );
   imageNetTest.preloadData ( "val", "testing" );
   imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );  
   imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );  
   
   
-  double OverallPerformance(0.0);
+  double OverallPerformance(0.0);  
   
   
   for (int cl = 0; cl < nrOfClassesToConcidere; cl++)
   for (int cl = 0; cl < nrOfClassesToConcidere; cl++)
   {
   {
     std::cerr << "run for class " << cl << std::endl;
     std::cerr << "run for class " << cl << std::endl;
+    int positiveClass = cl+1;
     // ------------------------------ TRAINING ------------------------------
     // ------------------------------ TRAINING ------------------------------
   
   
-    NICE::Vector matrixDInv (nrOfExamplesPerClass, 0.0);
-    //compute D 
-    //start with adding some noise, if necessary
-    if (noise != 0.0)
-      matrixDInv.set(noise);
-    else
-      matrixDInv.set(0.0);
+    kernelSigma = sigmaParas[cl];
     
     
-    std::cerr << "set matrixDInv to noise - now compute the scores for this special type of matrix" << std::endl;
+    std::cerr << "using sigma: " << kernelSigma << " and noise " << noiseParas[cl] << std::endl;
+    Timer tTrain;
+    tTrain.start();
     
     
-    if ( cl == 0)
-    {
-      std::cerr << "print first training example of class zero: " << std::endl;
-      trainingData[0] >> std::cerr;
-    }
+//     std::cerr << "set matrixDInv to noise - now compute the scores for this special type of matrix" << std::endl;
     
     
+    NICE::Matrix kernelMatrix(nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+       
     //now sum up all entries of each row in the original kernel matrix
     //now sum up all entries of each row in the original kernel matrix
     double kernelScore(0.0);
     double kernelScore(0.0);
     for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
     for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
     {
     {
-//       if ( (i % 50) == 0)
-        std::cerr << i << " / " << nrOfExamplesPerClass << std::endl;
       for (int j = i; j < cl*100+nrOfExamplesPerClass; j++)
       for (int j = i; j < cl*100+nrOfExamplesPerClass; j++)
       {
       {
-//         std::cerr <<  j << " / " << nrOfExamplesPerClass << std::endl;
-        if ( (cl == 0) && (i == 0))
-        {
-          kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigma, true /*verbose*/);
-        }
-        else
-          kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigma);
-        if (kernelScore == 0.0) std::cerr << "score of zero for examples " << i << " and "  << j << std::endl;
-        matrixDInv[i-cl*100] += kernelScore;
+        kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigma);//optimalParameters[cl]);
+        kernelMatrix(i-cl*100,j-cl*100) = kernelScore;
+        
+//         matrixDInv[i-cl*100] += kernelScore;
         if (i != j)
         if (i != j)
-          matrixDInv[j-cl*100] += kernelScore; 
+//           matrixDInv[j-cl*100] += kernelScore; 
+            kernelMatrix(j-cl*100,i-cl*100) = kernelScore;
       }
       }
     }  
     }  
     
     
-    std::cerr << "invert the main diagonal" << std::endl;
+    Timer tTrainPrecise;
+    tTrainPrecise.start();     
+    
+    NICE::Vector matrixDInv (nrOfExamplesPerClass, 0.0);
+    //compute D 
+    //start with adding some noise, if necessary
+    if (noiseParas[cl] != 0.0)
+      matrixDInv.set(noiseParas[cl]);
+    else
+      matrixDInv.set(0.0);    
+    
+    for (int i = 0; i < nrOfExamplesPerClass; i++)
+    {
+      for (int j = i; j < nrOfExamplesPerClass; j++)
+      {
+        matrixDInv[i] += kernelMatrix(i,j);
+        if (i != j)
+          matrixDInv[j] += kernelMatrix(i,j);
+      }
+    }
+    
+//     std::cerr << "invert the main diagonal" << std::endl;
     
     
     //compute its inverse
     //compute its inverse
     for (int i = 0; i < nrOfExamplesPerClass; i++)
     for (int i = 0; i < nrOfExamplesPerClass; i++)
@@ -215,8 +212,13 @@ int main (int argc, char **argv)
       matrixDInv[i] = 1.0 / matrixDInv[i];
       matrixDInv[i] = 1.0 / matrixDInv[i];
     }
     }
     
     
-    std::cerr << "resulting D-Vector (or matrix :) ) " << std::endl;
-    std::cerr << matrixDInv << std::endl;
+    tTrainPrecise.stop(); 
+    std::cerr << "Precise time used for training class " << cl << ": " << tTrainPrecise.getLast() << std::endl;    
+    tTrain.stop();
+    std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl;    
+    
+//     std::cerr << "resulting D-Vector (or matrix :) ) " << std::endl;
+//     std::cerr << matrixDInv << std::endl;
     
     
     std::cerr << "training done - now perform the evaluation" << std::endl;
     std::cerr << "training done - now perform the evaluation" << std::endl;
 
 
@@ -226,35 +228,26 @@ int main (int argc, char **argv)
     ClassificationResults results;
     ClassificationResults results;
     std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl;
     std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl;
     ProgressBar pb;
     ProgressBar pb;
+    Timer tTest;
+    tTest.start();    
+    Timer tTestSingle;
+    double timeForSingleExamples(0.0);    
     for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ )
     for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ )
     {
     {
       pb.update ( imageNetTest.getNumPreloadedExamples() );
       pb.update ( imageNetTest.getNumPreloadedExamples() );
 
 
       const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
       const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
-//       SparseVector svec = imageNetTest.getPreloadedExample ( i );
-      
-      if ( i == 0)
-      {
-        std::cerr << "print first test example: " << std::endl;
-        std::cerr << "this is of class " << (int)imageNetTest.getPreloadedLabel ( i ) << std::endl;
-//         svec >> std::cerr; 
-        svec.store(std::cerr);
-      }      
+
       
       
-      double kernelSelf (measureDistance(svec,svec, kernelSigma) ) ;
+      double kernelSelf (measureDistance(svec,svec, kernelSigma) );
       NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
       NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
       
       
       for (int j = 0; j < nrOfExamplesPerClass; j++)
       for (int j = 0; j < nrOfExamplesPerClass; j++)
       {
       {
         kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigma);
         kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigma);
-      }
-      
-      if ( i == 0)
-      {
-        std::cerr << "print first kernel vector: " << kernelVector << std::endl;
-      }
-      
-    
+      }     
+
+      tTestSingle.start();
       NICE::Vector rightPart (nrOfExamplesPerClass);
       NICE::Vector rightPart (nrOfExamplesPerClass);
       for (int j = 0; j < nrOfExamplesPerClass; j++)
       for (int j = 0; j < nrOfExamplesPerClass; j++)
       {
       {
@@ -262,6 +255,8 @@ int main (int argc, char **argv)
       }
       }
 
 
       double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
       double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
       
       
       FullVector scores ( 2 );
       FullVector scores ( 2 );
       scores[0] = 0.0;
       scores[0] = 0.0;
@@ -278,6 +273,12 @@ int main (int argc, char **argv)
       
       
       results.push_back ( r );
       results.push_back ( r );
     }
     }
+    
+    tTest.stop();
+    std::cerr << "Time used for evaluating class " << cl << ": " << tTest.getLast() << std::endl;       
+    
+    timeForSingleExamples/= imageNetTest.getNumPreloadedExamples();
+    std::cerr << "Time used for evaluation single elements of class " << cl << " : " << timeForSingleExamples << std::endl;    
 
 
 //     std::cerr << "Writing results to " << resultsfile << std::endl;
 //     std::cerr << "Writing results to " << resultsfile << std::endl;
 //     results.writeWEKA ( resultsfile, 1 );
 //     results.writeWEKA ( resultsfile, 1 );

+ 273 - 0
progs/testImageNetBinaryGPBaseline.cpp

@@ -0,0 +1,273 @@
+/** 
+* @file testImageNetBinaryGPBaseline.cpp
+* @brief perform ImageNet tests with binary tasks for OCC using the baseline GP
+* @author Alexander Lütz
+* @date 29-05-2012 (dd-mm-yyyy)
+
+*/
+#include "core/basics/Config.h"
+#include "core/basics/Timer.h"
+#include "core/vector/SparseVectorT.h"
+#include "core/algebra/CholeskyRobust.h"
+#include "core/vector/Algorithms.h"
+
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include "vislearning/baselib/ProgressBar.h"
+
+#include "fast-hik/tools.h"
+#include "fast-hik/MatFileIO.h"
+#include "fast-hik/ImageNetData.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)//, const bool & verbose = false)
+{
+  double inner_sum(0.0);
+
+  double d;    
+    
+  //new version, where we needed on average 0.001707 s for each test sample
+  NICE::SparseVector::const_iterator aIt = a.begin();
+  NICE::SparseVector::const_iterator bIt = b.begin();
+   
+  while ( (aIt != a.end()) && (bIt != b.end()) )
+  {
+    if (aIt->first == bIt->first)
+    {
+      d = ( aIt->second - bIt->second );      
+      inner_sum += d * d;
+      aIt++;
+      bIt++;
+    }
+    else if ( aIt->first < bIt->first)
+    {
+      inner_sum += aIt->second * aIt->second;
+      aIt++;      
+    }
+    else
+    {
+      inner_sum += bIt->second * bIt->second;
+      bIt++;       
+    }
+  }
+  
+  //compute remaining values, if b reached the end but not a
+  while (aIt != a.end())
+  {
+    inner_sum += aIt->second * aIt->second;
+    aIt++; 
+  }
+  //compute remaining values, if a reached the end but not b
+  while (bIt != b.end())
+  {
+    inner_sum += bIt->second * bIt->second;
+    bIt++; 
+  }  
+  inner_sum /= (2.0*sigma*sigma);
+  
+  return exp(-inner_sum); //expValue;
+}
+
+void readParameters(const string & filename, const int & size, NICE::Vector & parameterVector)
+{
+  parameterVector.resize(size);
+  parameterVector.set(0.0);
+  
+  ifstream is(filename.c_str());
+  if ( !is.good() )
+    fthrow(IOException, "Unable to read parameters.");  
+//
+  string tmp;
+  int cnt(0);
+  while (! is.eof())
+  {
+    is >> tmp;
+    parameterVector[cnt] = atof(tmp.c_str());
+    cnt++;
+  }
+//   
+  is.close(); 
+}
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  double kernelSigma = conf.gD("main", "kernelSigma", 2.0);
+  int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50);
+  nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class
+  int nrOfClassesToConcidere = conf.gI("main", "nrOfClassesToConcidere", 1000);
+  nrOfClassesToConcidere = std::min(nrOfClassesToConcidere, 1000); //we do not have more than 1000 classes
+
+  string sigmaFile = conf.gS("main", "sigmaFile", "approxVarSigma.txt");  
+  string noiseFile = conf.gS("main", "noiseFile", "approxVarNoise.txt");  
+  
+  
+  NICE::Vector sigmaParas(nrOfClassesToConcidere,kernelSigma);
+  NICE::Vector noiseParas(nrOfClassesToConcidere,0.0);
+  
+  std::cerr << "try to read optimal sigmas from " << sigmaFile << std::endl;
+  readParameters(sigmaFile,nrOfClassesToConcidere, sigmaParas);
+  //------------
+  std::cerr << "try to read optimal noises from " << noiseFile << std::endl;
+  readParameters(noiseFile,nrOfClassesToConcidere, noiseParas);
+  
+  std::vector<SparseVector> trainingData;
+  NICE::Vector y;
+  
+  std::cerr << "Reading ImageNet data ..." << std::endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNetTrain ( imageNetPath + "demo/" );
+
+  imageNetTrain.preloadData( "train", "training" );
+  trainingData = imageNetTrain.getPreloadedData();
+  y = imageNetTrain.getPreloadedLabels();
+    
+  std::cerr << "Reading of training data finished" << std::endl;
+  std::cerr << "trainingData.size(): " << trainingData.size() << std::endl;
+  std::cerr << "y.size(): " << y.size() << std::endl;
+  
+  std::cerr << "Reading ImageNet test data files (takes some seconds)..." << std::endl;
+  ImageNetData imageNetTest ( imageNetPath + "demo/" );
+  imageNetTest.preloadData ( "val", "testing" );
+  imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );  
+  
+  double OverallPerformance(0.0);  
+  
+  for (int cl = 0; cl < nrOfClassesToConcidere; cl++)
+  {
+    std::cerr << "run for class " << cl << std::endl;
+    int positiveClass = cl+1;
+    // ------------------------------ TRAINING ------------------------------
+  
+    kernelSigma = sigmaParas[cl];
+    
+    std::cerr << "using sigma: " << kernelSigma << " and noise " << noiseParas[cl] << std::endl;
+    Timer tTrain;
+    tTrain.start();
+    NICE::Matrix kernelMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+      
+    //now compute the kernelScores for every element
+    double kernelScore(0.0);
+    for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
+    {
+      for (int j = i; j < cl*100+nrOfExamplesPerClass; j++)
+      {
+        kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigma);//optimalParameters[cl]);
+        kernelMatrix(i-cl*100,j-cl*100) = kernelScore;
+        if (i != j)
+          kernelMatrix(j-cl*100,i-cl*100) = kernelScore;
+      }
+    }  
+    
+    //adding some noise, if necessary
+    if (noiseParas[cl] != 0.0)
+    {
+      kernelMatrix.addIdentity(noiseParas[cl]);
+    }
+    else
+    {
+      //zero was already set
+    }    
+   
+    //compute its inverse
+    //noise is already added :)
+    Timer tTrainPrecise;
+    tTrainPrecise.start();    
+    CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+    
+    NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);      
+    cr.robustChol ( kernelMatrix, choleskyMatrix );    
+    
+    tTrainPrecise.stop();
+    std::cerr << "Precise time used for training class " << cl << ": " << tTrainPrecise.getLast() << std::endl;        
+    
+    tTrain.stop();
+    std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl;    
+       
+    std::cerr << "training done - now perform the evaluation" << std::endl;
+
+
+    // ------------------------------ TESTING ------------------------------
+   
+    ClassificationResults results;
+    std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl;
+    ProgressBar pb;
+    Timer tTest;
+    tTest.start();    
+    Timer tTestSingle;
+    double timeForSingleExamples(0.0);
+    for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ )
+    {
+      pb.update ( imageNetTest.getNumPreloadedExamples() );
+
+      const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
+      
+      double kernelSelf (measureDistance(svec,svec, kernelSigma) );
+      NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
+      
+      for (int j = 0; j < nrOfExamplesPerClass; j++)
+      {
+        kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigma);
+      }     
+      
+      tTestSingle.start();
+      NICE::Vector rightPart (nrOfExamplesPerClass);
+      choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
+        
+      double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = 1.0 - uncertainty;
+
+      ClassificationResult r ( scores[1]<0.5 ? 0 : 1, scores );    
+      
+      // set ground truth label
+      r.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      
+//       std::cerr << "scores: " << std::endl;
+//       scores >> std::cerr;
+//       std::cerr << "gt: " <<  r.classno_groundtruth << " -- " << r.classno << std::endl;
+      
+      results.push_back ( r );
+    }
+    
+    tTest.stop();
+    std::cerr << "Time used for evaluating class " << cl << ": " << tTest.getLast() << std::endl;       
+    
+    timeForSingleExamples/= imageNetTest.getNumPreloadedExamples();
+    std::cerr << "Time used for evaluation single elements of class " << cl << " : " << timeForSingleExamples << std::endl;
+    
+
+//     std::cerr << "Writing results to " << resultsfile << std::endl;
+//     results.writeWEKA ( resultsfile, 1 );
+    double perfvalue = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+
+    std::cerr << "Performance: " << perfvalue << std::endl;
+    
+    OverallPerformance += perfvalue;    
+  }
+  
+  OverallPerformance /= nrOfClassesToConcidere;
+  
+  std::cerr << "overall performance: " << OverallPerformance << std::endl;
+  
+  return 0;
+}