Przeglądaj źródła

framework restructured (option to repeat training and testing, option to specify static parameters, ...

Alexander Luetz 13 lat temu
rodzic
commit
8a4946e687
3 zmienionych plików z 300 dodań i 273 usunięć
  1. 18 13
      progs/ImagenetBinary.conf
  2. 1 1
      progs/approxVarNoise.txt
  3. 281 259
      progs/testImageNetBinaryBruteForce.cpp

+ 18 - 13
progs/ImagenetBinary.conf

@@ -1,27 +1,32 @@
 [main]
 # whether to use eriks folder (only works on dionysos)
 imageNetLocal = false
+shareParameters = true
+noise = 0.01
+sigma = 1.0
 
 #GP variance approximation
-sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
-noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarNoise.txt
 #GP variance
-sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
-noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarNoise.txt
 #GP mean approximation
-sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
-noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarNoise.txt
 #GP mean
-sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
-noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarNoise.txt
 #Parzen
-sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
-noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarNoise.txt
 #SVDD
-sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
-noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/gpvarNoise.txt
 
 indexOfFirstClass = 0
-indexOfLastClass = 999
+indexOfLastClass = 0
+runsPerClassToAverageTraining = 1000
+runsPerClassToAverageTraining = 1000
 
 nrOfExamplesPerClass = 50

+ 1 - 1
progs/approxVarNoise.txt

@@ -997,4 +997,4 @@
 0.2
 0.025
 0.05
-0.025
+0.025

+ 281 - 259
progs/testImageNetBinaryBruteForce.cpp

@@ -77,14 +77,15 @@ double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector
   return exp(-inner_sum);
 }
 
-void readParameters(const string & filename, const int & size, NICE::Vector & parameterVector)
+void readParameters(string & filename, const int & size, NICE::Vector & parameterVector)
 {
   parameterVector.resize(size);
   parameterVector.set(0.0);
   
   ifstream is(filename.c_str());
+
   if ( !is.good() )
-    fthrow(IOException, "Unable to read parameters.");  
+      fthrow(IOException, "Unable to read parameters.");  
 //
   string tmp;
   int cnt(0);
@@ -100,304 +101,291 @@ void readParameters(const string & filename, const int & size, NICE::Vector & pa
 
 //------------------- TRAINING METHODS --------------------
 
-void inline trainGPVarApprox(NICE::Vector & matrixDInv, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+void inline trainGPVarApprox(NICE::Vector & matrixDInv, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
 {
 
     std::cerr << "nrOfExamplesPerClass : " << nrOfExamplesPerClass << std::endl;
   
     Timer tTrainPreciseTimer;
     tTrainPreciseTimer.start();     
-    
-//     time_t time;
-//     std::cerr <<
-    std::cerr << time(NULL) << std::endl;
-    
-    //tic tTrainPrecise
-    clock_t  tTrainPreciseStart = clock() * CLOCKS_PER_SEC;    
-    
-    usleep(35);
-    
-    matrixDInv.resize(nrOfExamplesPerClass);
-    matrixDInv.set(0.0);
-    //compute D 
-    //start with adding some noise, if necessary
-    if (noise != 0.0)
-      matrixDInv.set(noise);
-    else
-      matrixDInv.set(0.0);    
-    
-    for (int i = 0; i < nrOfExamplesPerClass; i++)
+
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
     {
-      for (int j = i; j < nrOfExamplesPerClass; j++)
+      matrixDInv.resize(nrOfExamplesPerClass);
+      matrixDInv.set(0.0);
+      //compute D 
+      //start with adding some noise, if necessary
+      if (noise != 0.0)
+        matrixDInv.set(noise);
+      else
+        matrixDInv.set(0.0);    
+      
+      for (int i = 0; i < nrOfExamplesPerClass; i++)
       {
-        matrixDInv[i] += kernelMatrix(i,j);
-        if (i != j)
-          matrixDInv[j] += kernelMatrix(i,j);
+        for (int j = i; j < nrOfExamplesPerClass; j++)
+        {
+          matrixDInv[i] += kernelMatrix(i,j);
+          if (i != j)
+            matrixDInv[j] += kernelMatrix(i,j);
+        }
+      }
+      
+      //compute its inverse
+      for (int i = 0; i < nrOfExamplesPerClass; i++)
+      {
+        matrixDInv[i] = 1.0 / matrixDInv[i];
       }
-    }
-    
-    //compute its inverse
-    for (int i = 0; i < nrOfExamplesPerClass; i++)
-    {
-      matrixDInv[i] = 1.0 / matrixDInv[i];
     }
     
     tTrainPreciseTimer.stop(); 
-    std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPreciseTimer.getLast() << std::endl;    
-    //toc tTrainPrecise
-    clock_t  currentTime = clock() * CLOCKS_PER_SEC;
-    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
-    
-    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
-    std::cerr << "current time: " << currentTime << std::endl;
-    std::cerr << "Precise time used for GPVarApprox training class " << classNumber << ": " << currentTime-tTrainPreciseStart << std::endl;
-    
-    std::cerr << "final time in system clock whatever:" << std::endl;
-    std::cerr << time(NULL) << std::endl;
+    std::cerr << "Precise time used for GPVarApprox training class " << classNumber << ": " << tTrainPreciseTimer.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
 }
 
-void inline trainGPVar(NICE::Matrix & choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+void inline trainGPVar(NICE::Matrix & choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
 {
 
-/*    Timer tTrainPrecise;
-    tTrainPrecise.start();  */   
+    Timer tTrainPrecise;
+    tTrainPrecise.start();     
     
-    //tic tTrainPrecise
-    time_t  tTrainPreciseStart = clock();    
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
+    {  
     
-    CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
-    
-    choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass);
-    choleskyMatrix.set(0.0);      
-    cr.robustChol ( kernelMatrix, choleskyMatrix );      
+      CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+      
+      choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass);
+      choleskyMatrix.set(0.0);      
+      cr.robustChol ( kernelMatrix, choleskyMatrix );   
+    }
  
-//     tTrainPrecise.stop(); 
-//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
-    //toc tTrainPrecise
-    time_t  currentTime = clock();
-    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
-    
-    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
-    std::cerr << "current time: " << currentTime << std::endl;
-    std::cerr << "Precise time used for GPVar training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+    tTrainPrecise.stop(); 
+    std::cerr << "Precise time used for GPVar training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
 }
 
-void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
 {
 
-/*    Timer tTrainPrecise;
-    tTrainPrecise.start();  */   
-    
-    //tic tTrainPrecise
-    time_t  tTrainPreciseStart = clock();    
-    
-    NICE::Vector matrixDInv(nrOfExamplesPerClass,0.0);
-    //compute D 
-    //start with adding some noise, if necessary
-    if (noise != 0.0)
-      matrixDInv.set(noise);
-    else
-      matrixDInv.set(0.0);    
-    
-    for (int i = 0; i < nrOfExamplesPerClass; i++)
-    {
-      for (int j = i; j < nrOfExamplesPerClass; j++)
+    Timer tTrainPrecise;
+    tTrainPrecise.start();     
+    
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
+    {  
+      NICE::Vector matrixDInv(nrOfExamplesPerClass,0.0);
+      //compute D 
+      //start with adding some noise, if necessary
+      if (noise != 0.0)
+        matrixDInv.set(noise);
+      else
+        matrixDInv.set(0.0);    
+      
+      for (int i = 0; i < nrOfExamplesPerClass; i++)
       {
-        matrixDInv[i] += kernelMatrix(i,j);
-        if (i != j)
-          matrixDInv[j] += kernelMatrix(i,j);
+        for (int j = i; j < nrOfExamplesPerClass; j++)
+        {
+          matrixDInv[i] += kernelMatrix(i,j);
+          if (i != j)
+            matrixDInv[j] += kernelMatrix(i,j);
+        }
       }
+      
+      //compute its inverse (and multiply every element with the label vector, which contains only one-entries...)
+      GPMeanApproxRightPart.resize(nrOfExamplesPerClass);    
+      for (int i = 0; i < nrOfExamplesPerClass; i++)
+      {
+        GPMeanApproxRightPart[i] = 1.0 / matrixDInv[i];
+      } 
     }
     
-    //compute its inverse (and multiply every element with the label vector, which contains only one-entries...)
-    GPMeanApproxRightPart.resize(nrOfExamplesPerClass);    
-    for (int i = 0; i < nrOfExamplesPerClass; i++)
-    {
-      GPMeanApproxRightPart[i] = 1.0 / matrixDInv[i];
-    } 
-    
     
-//     tTrainPrecise.stop(); 
-//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
-    //toc tTrainPrecise
-    time_t  currentTime = clock();
-    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
-    
-    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
-    std::cerr << "current time: " << currentTime << std::endl;
-    std::cerr << "Precise time used for GPMeanApprox training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+    tTrainPrecise.stop(); 
+    std::cerr << "Precise time used for GPMeanApprox training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
 }
     
-void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
 {
 
-/*    Timer tTrainPrecise;
-    tTrainPrecise.start();  */   
-    
-    //tic tTrainPrecise
-    time_t  tTrainPreciseStart = clock();    
-    
-    CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
-    
-    NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
-    cr.robustChol ( kernelMatrix, choleskyMatrix );  
-    
-    GPMeanRightPart.resize(nrOfExamplesPerClass);
-    GPMeanRightPart.set(0.0);
+    Timer tTrainPrecise;
+    tTrainPrecise.start();      
+
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
+    {  
     
-    NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
-    choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart );
+      CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+      
+      NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+      cr.robustChol ( kernelMatrix, choleskyMatrix );  
+      
+      GPMeanRightPart.resize(nrOfExamplesPerClass);
+      GPMeanRightPart.set(0.0);
+      
+      NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
+      choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart );
+    }
  
-//     tTrainPrecise.stop(); 
-//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
-    //toc tTrainPrecise
-    time_t  currentTime = clock();
-    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
-    
-    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
-    std::cerr << "current time: " << currentTime << std::endl;
-    std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+    tTrainPrecise.stop(); 
+    std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
 }    
 
-KCMinimumEnclosingBall *trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+KCMinimumEnclosingBall *trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
 {
  
     Config conf;
     // set the outlier ratio (Paul optimized this paramter FIXME)
     conf.sD( "SVDD", "outlier_fraction", 0.1 );
+      
     KCMinimumEnclosingBall *svdd = new KCMinimumEnclosingBall ( &conf, NULL /* no kernel function */, "SVDD" /* config section */ );
 
     KernelData kernelData ( &conf, kernelMatrix, "Kernel" );
  
- /*    Timer tTrainPrecise;
-    tTrainPrecise.start();  */   
-  
-    //tic tTrainPrecise
-    time_t  tTrainPreciseStart = clock();  
-    
-//     tTrainPrecise.stop(); 
-//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
-    //toc tTrainPrecise
-    time_t  currentTime = clock();
-    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
-    
+    Timer tTrainPrecise;
+    tTrainPrecise.start();     
 
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
+    {     
     
-    NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
-    // KCMinimumEnclosingBall does not store the kernel data object, therefore, we are save with passing a local copy
-    svdd->teach ( &kernelData, y );
+      NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
+      // KCMinimumEnclosingBall does not store the kernel data object, therefore, we are save with passing a local copy
+      svdd->teach ( &kernelData, y );
+    }
     
-    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
-    std::cerr << "current time: " << currentTime << std::endl;
-    std::cerr << "Precise time used for SVDD training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+    tTrainPrecise.stop(); 
+    std::cerr << "Precise time used for SVDD training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;        
 
     return svdd;
 }
 
 // ------------- EVALUATION METHODS ---------------------
-void inline evaluateGPVarApprox(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Vector & matrixDInv, ClassificationResult & r, double & timeForSingleExamples)
+void inline evaluateGPVarApprox(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Vector & matrixDInv, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
-      Timer tTestSingle;
-      tTestSingle.start();
+    double uncertainty;
+    
+    Timer tTestSingle;
+    tTestSingle.start();
+      
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {       
       NICE::Vector rightPart (kernelVector.size());
       for (int j = 0; j < kernelVector.size(); j++)
       {
         rightPart[j] = kernelVector[j] * matrixDInv[j];
       }
 
-      double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
-      
-      tTestSingle.stop();
-      timeForSingleExamples += tTestSingle.getLast();      
+      uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+    }
       
-      FullVector scores ( 2 );
-      scores[0] = 0.0;
-      scores[1] = 1.0 - uncertainty;
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = 1.0 - uncertainty;
 
-      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
 }
 
-void inline evaluateGPVar(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples)
+void inline evaluateGPVar(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
-      Timer tTestSingle;
-      tTestSingle.start();
+    double uncertainty;
+    
+    Timer tTestSingle;
+    tTestSingle.start();
+      
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {       
       NICE::Vector rightPart (kernelVector.size(),0.0);
       
       choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
       
-      double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
-      
-      tTestSingle.stop();
-      timeForSingleExamples += tTestSingle.getLast();      
+      uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+    }
       
-      FullVector scores ( 2 );
-      scores[0] = 0.0;
-      scores[1] = 1.0 - uncertainty;
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = 1.0 - uncertainty;
 
-      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
 }
 
-void inline evaluateGPMeanApprox(const NICE::Vector & kernelVector, const NICE::Vector & rightPart, ClassificationResult & r, double & timeForSingleExamples)
+void inline evaluateGPMeanApprox(const NICE::Vector & kernelVector, const NICE::Vector & rightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
-      Timer tTestSingle;
-      tTestSingle.start();
-
-      double mean = kernelVector.scalarProduct ( rightPart );
+    double mean;
+  
+    Timer tTestSingle;
+    tTestSingle.start();
       
-      tTestSingle.stop();
-      timeForSingleExamples += tTestSingle.getLast();      
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {           
+      mean = kernelVector.scalarProduct ( rightPart );
+    }
       
-      FullVector scores ( 2 );
-      scores[0] = 0.0;
-      scores[1] = mean;
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = mean;
 
-      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
 }
 
-void inline evaluateGPMean(const NICE::Vector & kernelVector,  const NICE::Vector & GPMeanRightPart, ClassificationResult & r, double & timeForSingleExamples)
+void inline evaluateGPMean(const NICE::Vector & kernelVector,  const NICE::Vector & GPMeanRightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
-      Timer tTestSingle;
-      tTestSingle.start();
-      
-      double mean = kernelVector.scalarProduct ( GPMeanRightPart );
-      
-      tTestSingle.stop();
-      timeForSingleExamples += tTestSingle.getLast();      
-      
-      FullVector scores ( 2 );
-      scores[0] = 0.0;
-      scores[1] = mean;
+    double mean;
+    
+    Timer tTestSingle;
+    tTestSingle.start();
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {       
+      mean = kernelVector.scalarProduct ( GPMeanRightPart );
+    }
+
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = mean;
 
-      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
 }
 
-void inline evaluateParzen(const NICE::Vector & kernelVector,  ClassificationResult & r, double & timeForSingleExamples)
+void inline evaluateParzen(const NICE::Vector & kernelVector,  ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
-      Timer tTestSingle;
-      tTestSingle.start();
-      
-      double score( kernelVector.Sum() / (double) kernelVector.size() ); //maybe we could directly call kernelVector.Mean()      
-      
-      tTestSingle.stop();
-      timeForSingleExamples += tTestSingle.getLast();      
+    double score;
+    
+    Timer tTestSingle;
+    tTestSingle.start();
+    
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {       
+      double score( kernelVector.Sum() / (double) kernelVector.size() ); //maybe we could directly call kernelVector.Mean()
+    }
       
-      FullVector scores ( 2 );
-      scores[0] = 0.0;
-      scores[1] = score;
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = score;
 
-      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
 }
 
-void inline evaluateSVDD( KCMinimumEnclosingBall *svdd, const NICE::Vector & kernelVector,  ClassificationResult & r, double & timeForSingleExamples)
+void inline evaluateSVDD( KCMinimumEnclosingBall *svdd, const NICE::Vector & kernelVector,  ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
-      Timer tTestSingle;
-      tTestSingle.start();
+    Timer tTestSingle;
+    tTestSingle.start();
       
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {        
       // In the following, we assume that we are using a Gaussian kernel
       r = svdd->classifyKernel ( kernelVector, 1.0 /* kernel self */ );
+    }
 
-      tTestSingle.stop();
-      timeForSingleExamples += tTestSingle.getLast();      
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
 }
 
 /** 
@@ -418,27 +406,12 @@ int main (int argc, char **argv)
   indexOfLastClass = std::min(indexOfLastClass, 999); //we do not have more than 1000 classes
   
   int nrOfClassesToConcidere =  (indexOfLastClass - indexOfLastClass)+1;
-
-  //read the optimal parameters for the different methods
   
-  // GP variance approximation
-  string sigmaGPVarApproxFile = conf.gS("main", "sigmaGPVarApproxFile", "approxVarSigma.txt");  
-  string noiseGPVarApproxFile = conf.gS("main", "noiseGPVarApproxFile", "approxVarNoise.txt");   
-  // GP variance
-  string sigmaGPVarFile = conf.gS("main", "sigmaGPVarFile", "approxVarSigma.txt");  
-  string noiseGPVarFile = conf.gS("main", "noiseGPVarFile", "approxVarNoise.txt");  
-  //GP mean approximation
-  string sigmaGPMeanApproxFile = conf.gS("main", "sigmaGPMeanApproxFile", "approxVarSigma.txt");  
-  string noiseGPMeanApproxFile = conf.gS("main", "noiseGPMeanApproxFile", "approxVarNoise.txt");    
-  //GP mean
-  string sigmaGPMeanFile = conf.gS("main", "sigmaGPMeanFile", "approxVarSigma.txt");  
-  string noiseGPMeanFile = conf.gS("main", "noiseGPMeanFile", "approxVarNoise.txt");      
-  //Parzen
-  string sigmaParzenFile = conf.gS("main", "sigmaParzenFile", "approxVarSigma.txt");  
-  string noiseParzenFile = conf.gS("main", "noiseParzenFile", "approxVarNoise.txt");    
-  //SVDD
-  string sigmaSVDDFile = conf.gS("main", "sigmaSVDDFile", "approxVarSigma.txt");  
-  string noiseSVDDFile = conf.gS("main", "noiseSVDDFile", "approxVarNoise.txt");      
+  int runsPerClassToAverageTraining = conf.gI( "main", "runsPerClassToAverageTraining", 1 );
+  int runsPerClassToAverageTesting = conf.gI( "main", "runsPerClassToAverageTesting", 1 );
+  
+  bool shareParameters = conf.gB("main" , "shareParameters", true);
+
   
   // GP variance approximation  
   NICE::Vector sigmaGPVarApproxParas(nrOfClassesToConcidere,0.0);
@@ -457,26 +430,73 @@ int main (int argc, char **argv)
   NICE::Vector noiseParzenParas(nrOfClassesToConcidere,0.0);
   //SVDD  
   NICE::Vector sigmaSVDDParas(nrOfClassesToConcidere,0.0);
-  NICE::Vector noiseSVDDParas(nrOfClassesToConcidere,0.0); 
-
-  // GP variance approximation    
-  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarApproxParas);
-  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarApproxParas);  
-  // GP variance    
-  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarParas);
-  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarParas);  
-  //GP mean approximation   
-  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanApproxParas);
-  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanApproxParas);  
-  //GP mean  
-  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas);
-  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas); 
-  //Parzen    
-  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas);
-  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas);  
-  //SVDD    
-  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaSVDDParas);
-  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseSVDDParas);   
+  NICE::Vector noiseSVDDParas(nrOfClassesToConcidere,0.0);
+    
+  if (!shareParameters)
+  {
+    //read the optimal parameters for the different methods
+    
+    // GP variance approximation
+    string sigmaGPVarApproxFile = conf.gS("main", "sigmaGPVarApproxFile", "approxVarSigma.txt");  
+    string noiseGPVarApproxFile = conf.gS("main", "noiseGPVarApproxFile", "approxVarNoise.txt");   
+    // GP variance
+    string sigmaGPVarFile = conf.gS("main", "sigmaGPVarFile", "approxVarSigma.txt");  
+    string noiseGPVarFile = conf.gS("main", "noiseGPVarFile", "approxVarNoise.txt");  
+    //GP mean approximation
+    string sigmaGPMeanApproxFile = conf.gS("main", "sigmaGPMeanApproxFile", "approxVarSigma.txt");  
+    string noiseGPMeanApproxFile = conf.gS("main", "noiseGPMeanApproxFile", "approxVarNoise.txt");    
+    //GP mean
+    string sigmaGPMeanFile = conf.gS("main", "sigmaGPMeanFile", "approxVarSigma.txt");  
+    string noiseGPMeanFile = conf.gS("main", "noiseGPMeanFile", "approxVarNoise.txt");      
+    //Parzen
+    string sigmaParzenFile = conf.gS("main", "sigmaParzenFile", "approxVarSigma.txt");  
+    string noiseParzenFile = conf.gS("main", "noiseParzenFile", "approxVarNoise.txt");    
+    //SVDD
+    string sigmaSVDDFile = conf.gS("main", "sigmaSVDDFile", "approxVarSigma.txt");  
+    string noiseSVDDFile = conf.gS("main", "noiseSVDDFile", "approxVarNoise.txt");      
+
+    // GP variance approximation    
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarApproxParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarApproxParas);  
+    // GP variance    
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarParas);  
+    //GP mean approximation   
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanApproxParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanApproxParas);  
+    //GP mean  
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas); 
+    //Parzen    
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas);  
+    //SVDD    
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaSVDDParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseSVDDParas);   
+  }
+  else
+  {
+    double noise = conf.gD( "main", "noise", 0.01 );
+    double sigma = conf.gD( "main", "sigma", 1.0 );
+    
+    sigmaGPVarApproxParas.set(sigma);
+    noiseGPVarApproxParas.set(noise);
+    // GP variance  
+    sigmaGPVarParas.set(sigma);
+    noiseGPVarParas.set(noise);
+    //GP mean approximation  
+    sigmaGPMeanApproxParas.set(sigma);
+    noiseGPMeanApproxParas.set(noise);
+    //GP mean  
+    sigmaGPMeanParas.set(sigma);
+    noiseGPMeanParas.set(noise);
+    //Parzen  
+    sigmaParzenParas.set(sigma);
+    noiseParzenParas.set(noise);
+    //SVDD  
+    sigmaSVDDParas.set(sigma);
+    noiseSVDDParas.set(noise);    
+  }
   
   
   // -------- optimal parameters read --------------  
@@ -522,7 +542,7 @@ int main (int argc, char **argv)
   double kernelSigmaParzen;
   double kernelSigmaSVDD;
   
-  for (int cl = indexOfFirstClass; cl < indexOfLastClass; cl++)
+  for (int cl = indexOfFirstClass; cl <= indexOfLastClass; cl++)
   {
     std::cerr << "run for class " << cl << std::endl;
     int positiveClass = cl+1; //labels are from 1 to 1000, but our indices from 0 to 999
@@ -571,27 +591,29 @@ int main (int argc, char **argv)
     }  
     
     //train GP Var Approx
+    
     NICE::Vector matrixDInv;
-    trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);
+    for (int i = 0; i < runsPerClassToAverageTraining; i++)
+    trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
     
     //train GP Var
     NICE::Matrix GPVarCholesky;
-    trainGPVar(GPVarCholesky, noiseGPVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);    
+    trainGPVar(GPVarCholesky, noiseGPVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );    
     
     //train GP Mean Approx
     NICE::Vector GPMeanApproxRightPart;
-    trainGPMeanApprox(GPMeanApproxRightPart, noiseGPMeanApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);
+    trainGPMeanApprox(GPMeanApproxRightPart, noiseGPMeanApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
     
     //train GP Mean
     NICE::Vector GPMeanRightPart;
-    trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);    
+    trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );    
     
     //train Parzen 
     //nothing to do :)
     
     //train SVDD
     //TODO what do we need here?
-    KCMinimumEnclosingBall *svdd = trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);
+    KCMinimumEnclosingBall *svdd = trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
   
     tTrain.stop();
     std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl;      
@@ -639,27 +661,27 @@ int main (int argc, char **argv)
       
       //evaluate GP Var Approx
       ClassificationResult rGPVarApprox;      
-      evaluateGPVarApprox(kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox);
+      evaluateGPVarApprox( kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox, runsPerClassToAverageTesting );
       
       //evaluate GP Var
       ClassificationResult rGPVar;
-      evaluateGPVar(kernelVector, kernelSelf, GPVarCholesky, rGPVar, timeForSingleExamplesGPVar);      
+      evaluateGPVar( kernelVector, kernelSelf, GPVarCholesky, rGPVar, timeForSingleExamplesGPVar, runsPerClassToAverageTesting );      
       
       //evaluate GP Mean Approx
       ClassificationResult rGPMeanApprox;      
-      evaluateGPMeanApprox(kernelVector, matrixDInv, rGPMeanApprox, timeForSingleExamplesGPMeanApprox);
+      evaluateGPMeanApprox( kernelVector, matrixDInv, rGPMeanApprox, timeForSingleExamplesGPMeanApprox, runsPerClassToAverageTesting );
       
       //evaluate GP Mean
       ClassificationResult rGPMean;
-      evaluateGPMean(kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean);       
+      evaluateGPMean( kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean, runsPerClassToAverageTesting );       
       
       //evaluate Parzen
       ClassificationResult rParzen;
-      evaluateParzen(kernelVector, rParzen, timeForSingleExamplesParzen); 
+      evaluateParzen( kernelVector, rParzen, timeForSingleExamplesParzen, runsPerClassToAverageTesting ); 
       
       //evaluate SVDD
       ClassificationResult rSVDD;
-      evaluateSVDD(svdd, kernelVector, rSVDD, timeForSingleExamplesSVDD);       
+      evaluateSVDD( svdd, kernelVector, rSVDD, timeForSingleExamplesSVDD, runsPerClassToAverageTesting );       
 
       
       // set ground truth label