浏览代码

testImageNetBinaryBruteForce modified

bodesheim 12 年之前
父节点
当前提交
f5049117c3
共有 1 个文件被更改,包括 389 次插入2 次删除
  1. 389 2
      progs/testImageNetBinaryBruteForce.cpp

+ 389 - 2
progs/testImageNetBinaryBruteForce.cpp

@@ -394,6 +394,244 @@ void inline trainGPSRVar(NICE::Matrix & choleskyMatrix, const double & noise, co
     std::cerr << "Precise time used for GPSRVar training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
 }
 
+// GP FITC approx
+void inline trainGPFITCMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples )
+{
+  std::vector<int> examplesToChoose;
+  indicesOfChosenExamples.clear();
+  
+  //add all examples for possible choice
+  for (int i = 0; i < nrOfExamplesPerClass; i++)
+  {
+    examplesToChoose.push_back(i);
+  }
+  
+  //now chose randomly some examples as active subset
+  int index;
+  for (int i = 0; i < std::min(nrOfRegressors,nrOfExamplesPerClass); i++)
+  {
+    index = rand() % examplesToChoose.size();
+    indicesOfChosenExamples.push_back(examplesToChoose[index]);
+    examplesToChoose.erase(examplesToChoose.begin() + index);
+  }
+  
+  NICE::Vector diagK (nrOfExamplesPerClass, 0.0);
+  //set every element
+  for (int i = 0; i < nrOfExamplesPerClass; i++ )
+  {
+    diagK(i) = kernelMatrix(i,i);
+  }
+  
+  NICE::Matrix Ku (indicesOfChosenExamples.size(), nrOfExamplesPerClass, 0.0);
+  int rowCnt(0);
+  //set every row
+  for (uint i = 0; i < indicesOfChosenExamples.size(); i++, rowCnt++ )
+  {
+    //set every element of this row
+    NICE::Vector col = kernelMatrix.getRow(indicesOfChosenExamples[i]);
+    for (int j = 0; j < nrOfExamplesPerClass; j++)
+    {
+      Ku(rowCnt,j) = col(j);
+    }
+  }
+  
+  //we could speed this up if we would order the indices
+  NICE::Matrix Kuu (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+  double tmp(0.0);
+  for (uint i = 0; i < indicesOfChosenExamples.size(); i++ )
+  {
+    for (uint j = i; j < indicesOfChosenExamples.size(); j++ )
+    {
+      tmp = kernelMatrix(indicesOfChosenExamples[i], indicesOfChosenExamples[j]);
+      Kuu(i,j) = tmp;
+      if (i != j)
+        Kuu(j,i) = tmp;
+    }
+  }
+  
+  NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :) 
+  
+    Timer tTrainPrecise;
+    tTrainPrecise.start();      
+
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
+    {       
+      
+//       NICE::Vector projectedLabels;
+//       projectedLabels.multiply(Kmn,y);
+      
+      CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+      
+      NICE::Matrix Luu (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+      cr.robustChol ( Kuu, Luu );  
+      
+      NICE::Matrix V (Ku);
+      choleskySolveMatrixLargeScale( Luu, V);
+      
+      NICE::Vector dg (diagK);
+      NICE::Vector sumV (diagK.size(),0.0);
+      for (uint i=0; i<V.cols(); i++)
+      {
+        for (uint j=0; j<V.rows(); j++)
+        {
+          sumV(i) += V(j,i)*V(j,i);
+        }
+        sumV(i) += noise;
+      }
+      dg += sumV;
+      
+      for (uint i=0; i<V.cols(); i++)
+      {
+        for (uint j=0; j<V.rows(); j++)
+        {
+          V(j,i) /= sqrt(dg(i));
+        }
+      }     
+      
+      NICE::Matrix Lu (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+      NICE::Matrix tmpVV (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+      tmpVV.multiply(V,V,false,true);
+      tmpVV.addIdentity(1.0);
+      cr.robustChol ( tmpVV, Lu );
+      
+      NICE::Vector r (dg);
+      for (uint i=0; i<r.size(); i++)
+      {
+        r(i) = 1.0/sqrt(r(i));
+      }
+      
+      NICE::Vector be (indicesOfChosenExamples.size(), 0.0);
+      choleskySolveLargeScale (Lu, V*r, be);
+      choleskySolveLargeScale (Lu.transpose(), be, be);
+        
+      GPMeanRightPart.resize(indicesOfChosenExamples.size());
+      GPMeanRightPart.set(0.0);
+      
+      choleskySolveLargeScale ( Luu.transpose(), be, GPMeanRightPart );
+    }
+ 
+    tTrainPrecise.stop(); 
+    std::cerr << "Precise time used for GPFITCMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
+}
+
+// GP FITC approx
+void inline trainGPFITCVar(NICE::Matrix & choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples )
+{
+  std::vector<int> examplesToChoose;
+  indicesOfChosenExamples.clear();
+  
+  //add all examples for possible choice
+  for (int i = 0; i < nrOfExamplesPerClass; i++)
+  {
+    examplesToChoose.push_back(i);
+  }
+  
+  //now chose randomly some examples as active subset
+  int index;
+  for (int i = 0; i < std::min(nrOfRegressors,nrOfExamplesPerClass); i++)
+  {
+    index = rand() % examplesToChoose.size();
+    indicesOfChosenExamples.push_back(examplesToChoose[index]);
+    examplesToChoose.erase(examplesToChoose.begin() + index);
+  }
+  
+  NICE::Vector diagK (nrOfExamplesPerClass, 0.0);
+  //set every element
+  for (int i = 0; i < nrOfExamplesPerClass; i++ )
+  {
+    diagK(i) = kernelMatrix(i,i);
+  }
+  
+  NICE::Matrix Ku (indicesOfChosenExamples.size(), nrOfExamplesPerClass, 0.0);
+  int rowCnt(0);
+  //set every row
+  for (uint i = 0; i < indicesOfChosenExamples.size(); i++, rowCnt++ )
+  {
+    //set every element of this row
+    NICE::Vector col = kernelMatrix.getRow(indicesOfChosenExamples[i]);
+    for (int j = 0; j < nrOfExamplesPerClass; j++)
+    {
+      Ku(rowCnt,j) = col(j);
+    }
+  }
+  
+  //we could speed this up if we would order the indices
+  NICE::Matrix Kuu (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+  double tmp(0.0);
+  for (uint i = 0; i < indicesOfChosenExamples.size(); i++ )
+  {
+    for (uint j = i; j < indicesOfChosenExamples.size(); j++ )
+    {
+      tmp = kernelMatrix(indicesOfChosenExamples[i], indicesOfChosenExamples[j]);
+      Kuu(i,j) = tmp;
+      if (i != j)
+        Kuu(j,i) = tmp;
+    }
+  }
+  
+  NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :) 
+  
+    Timer tTrainPrecise;
+    tTrainPrecise.start();      
+
+    for (int run = 0; run < runsPerClassToAverageTraining; run++)
+    {       
+      
+//       NICE::Vector projectedLabels;
+//       projectedLabels.multiply(Kmn,y);
+      
+      CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+      
+      NICE::Matrix Luu (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+      cr.robustChol ( Kuu, Luu );  
+      
+      NICE::Matrix V (Ku);
+      choleskySolveMatrixLargeScale( Luu, V);
+      
+      NICE::Vector dg (diagK);
+      NICE::Vector sumV (diagK.size(),0.0);
+      for (uint i=0; i<V.cols(); i++)
+      {
+        for (uint j=0; j<V.rows(); j++)
+        {
+          sumV(i) += V(j,i)*V(j,i);
+        }
+        sumV(i) += noise;
+      }
+      dg += sumV;
+      
+      for (uint i=0; i<V.cols(); i++)
+      {
+        for (uint j=0; j<V.rows(); j++)
+        {
+          V(j,i) /= sqrt(dg(i));
+        }
+      }     
+      
+      NICE::Matrix Lu (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+      NICE::Matrix tmpVV (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+      tmpVV.multiply(V,V,false,true);
+      tmpVV.addIdentity(1.0);
+      cr.robustChol ( tmpVV, Lu );
+      
+      NICE::Matrix iKuu (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+      iKuu.addIdentity(1.0);
+      choleskySolveMatrixLargeScale ( Luu.transpose(), iKuu );
+      choleskySolveMatrixLargeScale ( Luu, iKuu );
+      
+      NICE::Matrix LuLuu (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
+      LuLuu.multiply(Lu,Luu);
+      choleskyMatrix.setIdentity();
+      choleskySolveMatrixLargeScale ( LuLuu.transpose(), choleskyMatrix);
+      choleskySolveMatrixLargeScale ( LuLuu, choleskyMatrix);
+      
+      choleskyMatrix -= iKuu;
+    }
+ 
+    tTrainPrecise.stop();  
+    std::cerr << "Precise time used for GPFITCVar training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;    
+}
+
 void inline trainGPOptMean(NICE::Vector & rightPartGPOptMean, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
 {
     DiagonalMatrixApprox diagApprox ( true /*verbose*/ );
@@ -697,6 +935,71 @@ void inline evaluateGPSRVar(const NICE::Vector & kernelVector,  const NICE::Matr
     r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
 }
 
+void inline evaluateGPFITCMean(const NICE::Vector & kernelVector,  const NICE::Vector & GPFITCMeanRightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting, const int & nrOfRegressors, const std::vector<int> & indicesOfChosenExamples)
+{
+    double mean;
+    
+    //grep the entries corresponding to the active set
+    NICE::Vector kernelVectorM;
+    kernelVectorM.resize(nrOfRegressors);
+    for (int i = 0; i < nrOfRegressors; i++)
+    {
+      kernelVectorM[i] = kernelVector[indicesOfChosenExamples[i]];
+    }
+
+    Timer tTestSingle;
+    tTestSingle.start();
+    
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {
+      // \mean = \k_*^T \cdot K^{-1} \cdot y    
+      mean = kernelVectorM.scalarProduct ( GPFITCMeanRightPart );
+    }
+
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = mean;
+
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+void inline evaluateGPFITCVar(const NICE::Vector & kernelVector,  const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples, const double & noise)
+{
+    double uncertainty;
+    
+    //grep the entries corresponding to the active set
+    NICE::Vector kernelVectorM;
+    kernelVectorM.resize(nrOfRegressors);
+    for (int i = 0; i < nrOfRegressors; i++)
+    {
+      kernelVectorM[i] = kernelVector[indicesOfChosenExamples[i]];
+    } 
+    
+    Timer tTestSingle;
+    tTestSingle.start();
+    
+    for (int run = 0; run < runsPerClassToAverageTesting; run++)
+    {
+      NICE::Vector tmp (nrOfRegressors,0.0);  
+      tmp = choleskyMatrix*kernelVectorM;
+      tmp *= kernelVectorM;
+           
+      uncertainty = 1.0 + tmp.Sum();
+    }
+
+    tTestSingle.stop();
+    timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;      
+    
+    FullVector scores ( 2 );
+    scores[0] = 0.0;
+    scores[1] = 1.0 - uncertainty;
+
+    r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
 //this method is completely the same as evaluateGPMeanApprox, but for convenience, it is its own method
 void inline evaluateGPOptMean(const NICE::Vector & kernelVector, const NICE::Vector & rightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
 {
@@ -824,6 +1127,8 @@ int main (int argc, char **argv)
   bool GPVar = conf.gB( "main", "GPVar", false);
   bool GPSRMean = conf.gB( "main", "GPSRMean", false);
   bool GPSRVar = conf.gB( "main", "GPSRVar", false);  
+  bool GPFITCMean = conf.gB( "main", "GPFITCMean", false);
+  bool GPFITCVar = conf.gB( "main", "GPFITCVar", false); 
   bool GPOptMean = conf.gB( "main", "GPOptMean", false);
   bool GPOptVar = conf.gB( "main", "GPOptVar", false);    
   bool Parzen = conf.gB( "main", "Parzen", false);
@@ -853,6 +1158,14 @@ int main (int argc, char **argv)
     std::cerr << "GPSRVar used" << std::endl;
   else 
     std::cerr << "GPSRVar not used" << std::endl;
+  if (GPFITCMean)
+    std::cerr << "GPFITCMean used" << std::endl;
+  else 
+    std::cerr << "GPFITCMean not used" << std::endl;
+  if (GPFITCVar)
+    std::cerr << "GPFITCVar used" << std::endl;
+  else 
+    std::cerr << "GPFITCVar not used" << std::endl; 
   if (GPOptMean)
     std::cerr << "GPOptMean used" << std::endl;
   else 
@@ -889,6 +1202,12 @@ int main (int argc, char **argv)
   //GP SR var
   NICE::Vector sigmaGPSRVarParas(nrOfClassesToConcidere,0.0);
   NICE::Vector noiseGPSRVarParas(nrOfClassesToConcidere,0.0);
+  //GP FITC mean  
+  NICE::Vector sigmaGPFITCMeanParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseGPFITCMeanParas(nrOfClassesToConcidere,0.0);
+  //GP FITC var
+  NICE::Vector sigmaGPFITCVarParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseGPFITCVarParas(nrOfClassesToConcidere,0.0);  
   //GP Opt mean  
   NICE::Vector sigmaGPOptMeanParas(nrOfClassesToConcidere,0.0);
   NICE::Vector noiseGPOptMeanParas(nrOfClassesToConcidere,0.0);
@@ -943,6 +1262,12 @@ int main (int argc, char **argv)
     //GP SR var  
     readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPSRVarParas);
     readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPSRVarParas); 
+    //GP FITC mean  
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPFITCMeanParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPFITCMeanParas);
+    //GP FITC var  
+    readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPFITCVarParas);
+    readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPFITCVarParas);      
     //GP Opt mean  
     readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPOptMeanParas);
     readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPOptMeanParas);
@@ -979,6 +1304,12 @@ int main (int argc, char **argv)
     //GP SR var  
     sigmaGPSRVarParas.set(sigma);
     noiseGPSRVarParas.set(noise); 
+    //GP FITC mean  
+    sigmaGPFITCMeanParas.set(sigma);
+    noiseGPFITCMeanParas.set(noise);
+    //GP FITC var  
+    sigmaGPFITCVarParas.set(sigma);
+    noiseGPFITCVarParas.set(noise);
     //GP Opt mean  
     sigmaGPOptMeanParas.set(sigma);
     noiseGPOptMeanParas.set(noise);
@@ -1028,6 +1359,8 @@ int main (int argc, char **argv)
   double OverallPerformanceGPMean(0.0);
   double OverallPerformanceGPSRMean(0.0);
   double OverallPerformanceGPSRVar(0.0);  
+  double OverallPerformanceGPFITCMean(0.0);
+  double OverallPerformanceGPFITCVar(0.0); 
   double OverallPerformanceGPOptMean(0.0);
   double OverallPerformanceGPOptVar(0.0);   
   double OverallPerformanceParzen(0.0);
@@ -1040,6 +1373,8 @@ int main (int argc, char **argv)
   double kernelSigmaGPMean;
   double kernelSigmaGPSRMean;
   double kernelSigmaGPSRVar;
+  double kernelSigmaGPFITCMean;
+  double kernelSigmaGPFITCVar; 
   double kernelSigmaGPOptMean;
   double kernelSigmaGPOptVar;  
   double kernelSigmaParzen;
@@ -1057,6 +1392,8 @@ int main (int argc, char **argv)
     kernelSigmaGPMean = sigmaGPMeanParas[cl];
     kernelSigmaGPSRMean = sigmaGPSRMeanParas[cl];
     kernelSigmaGPSRVar = sigmaGPSRVarParas[cl];
+    kernelSigmaGPSRMean = sigmaGPFITCMeanParas[cl];
+    kernelSigmaGPSRVar = sigmaGPFITCVarParas[cl];    
     kernelSigmaGPOptMean = sigmaGPOptMeanParas[cl];
     kernelSigmaGPOptVar = sigmaGPOptVarParas[cl];    
     kernelSigmaParzen = sigmaParzenParas[cl];
@@ -1124,6 +1461,20 @@ int main (int argc, char **argv)
     if (GPSRVar)
       trainGPSRVar(GPSRVarCholesky, noiseGPSRVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPSRVar );  
     
+    //train GP FITC Mean
+    NICE::Vector GPFITCMeanRightPart;
+    std::vector<int> indicesOfChosenExamplesGPFITCMean;
+    int nrOfRegressors = conf.gI( "GPFITC", "nrOfRegressors", nrOfExamplesPerClass/5);
+    nrOfRegressors = std::min( nrOfRegressors, nrOfExamplesPerClass );
+    if (GPFITCMean)
+      trainGPFITCMean(GPFITCMeanRightPart, noiseGPFITCMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPFITCMean ); 
+
+     //train GP FITC Var
+    NICE::Matrix GPFITCVarCholesky;   
+    std::vector<int> indicesOfChosenExamplesGPFITCVar;
+    if (GPFITCVar)
+      trainGPFITCVar(GPFITCVarCholesky, noiseGPFITCVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPFITCVar );    
+    
     //train GP Opt Mean
     NICE::Vector GPOptMeanRightPart;
     if (GPOptMean)
@@ -1159,7 +1510,9 @@ int main (int argc, char **argv)
     ClassificationResults resultsGPMeanApprox;
     ClassificationResults resultsGPMean;
     ClassificationResults resultsGPSRMean;
-    ClassificationResults resultsGPSRVar;    
+    ClassificationResults resultsGPSRVar;  
+    ClassificationResults resultsGPFITCMean;
+    ClassificationResults resultsGPFITCVar;
     ClassificationResults resultsGPOptMean;
     ClassificationResults resultsGPOptVar;    
     ClassificationResults resultsParzen;
@@ -1176,6 +1529,8 @@ int main (int argc, char **argv)
     double timeForSingleExamplesGPMean(0.0);    
     double timeForSingleExamplesGPSRMean(0.0);
     double timeForSingleExamplesGPSRVar(0.0);
+    double timeForSingleExamplesGPFITCMean(0.0);
+    double timeForSingleExamplesGPFITCVar(0.0);
     double timeForSingleExamplesGPOptMean(0.0);
     double timeForSingleExamplesGPOptVar(0.0);    
     double timeForSingleExamplesParzen(0.0);    
@@ -1228,7 +1583,17 @@ int main (int argc, char **argv)
       //evaluate GP SR Var
       ClassificationResult rGPSRVar;
       if (GPSRVar)
-        evaluateGPSRVar( kernelVector, GPSRVarCholesky, rGPSRVar, timeForSingleExamplesGPSRVar, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPSRVar, noiseGPSRVarParas[cl] );       
+        evaluateGPSRVar( kernelVector, GPSRVarCholesky, rGPSRVar, timeForSingleExamplesGPSRVar, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPSRVar, noiseGPSRVarParas[cl] );  
+      
+      //evaluate GP FITC Mean
+      ClassificationResult rGPFITCMean;
+      if (GPFITCMean)
+        evaluateGPFITCMean( kernelVector, GPFITCMeanRightPart, rGPFITCMean, timeForSingleExamplesGPFITCMean, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPFITCMean );       
+      
+      //evaluate GP FITC Var
+      ClassificationResult rGPFITCVar;
+      if (GPFITCVar)
+        evaluateGPFITCVar( kernelVector, GPFITCVarCholesky, rGPFITCVar, timeForSingleExamplesGPFITCVar, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPFITCVar, noiseGPFITCVarParas[cl] );             
       
       //evaluate GP Opt Mean
       ClassificationResult rGPOptMean;
@@ -1258,6 +1623,8 @@ int main (int argc, char **argv)
       rGPMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
       rGPSRMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
       rGPSRVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rGPFITCMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rGPFITCVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
       rGPOptMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
       rGPOptVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;      
       rParzen.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
@@ -1270,6 +1637,8 @@ int main (int argc, char **argv)
       resultsGPMean.push_back ( rGPMean );
       resultsGPSRMean.push_back ( rGPSRMean );
       resultsGPSRVar.push_back ( rGPSRVar );
+      resultsGPFITCMean.push_back ( rGPFITCMean );
+      resultsGPFITCVar.push_back ( rGPFITCVar );
       resultsGPOptMean.push_back ( rGPOptMean );
       resultsGPOptVar.push_back ( rGPOptVar );      
       resultsParzen.push_back ( rParzen );
@@ -1285,6 +1654,8 @@ int main (int argc, char **argv)
     timeForSingleExamplesGPMean/= imageNetTest.getNumPreloadedExamples();
     timeForSingleExamplesGPSRMean/= imageNetTest.getNumPreloadedExamples();
     timeForSingleExamplesGPSRVar/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesGPFITCMean/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesGPFITCVar/= imageNetTest.getNumPreloadedExamples();
     timeForSingleExamplesGPOptMean/= imageNetTest.getNumPreloadedExamples();
     timeForSingleExamplesGPOptVar/= imageNetTest.getNumPreloadedExamples();    
     timeForSingleExamplesParzen/= imageNetTest.getNumPreloadedExamples();
@@ -1296,6 +1667,8 @@ int main (int argc, char **argv)
     std::cerr << "GPMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMean << std::endl;    
     std::cerr << "GPSRMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPSRMean << std::endl;    
     std::cerr << "GPSRVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPSRVar << std::endl;    
+    std::cerr << "GPFITCMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPFITCMean << std::endl;    
+    std::cerr << "GPFITCVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPFITCVar << std::endl; 
     std::cerr << "GPOptMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPOptMean << std::endl;    
     std::cerr << "GPOptVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPOptVar << std::endl;    
     std::cerr << "Parzen -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesParzen << std::endl;    
@@ -1308,6 +1681,8 @@ int main (int argc, char **argv)
     double perfvalueGPMean( 0.0 );
     double perfvalueGPSRMean( 0.0 );
     double perfvalueGPSRVar( 0.0 );
+    double perfvalueGPFITCMean( 0.0 );
+    double perfvalueGPFITCVar( 0.0 );    
     double perfvalueGPOptMean( 0.0 );
     double perfvalueGPOptVar( 0.0 );    
     double perfvalueParzen( 0.0 );
@@ -1325,6 +1700,10 @@ int main (int argc, char **argv)
       perfvalueGPSRMean = resultsGPSRMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
     if (GPSRVar)
       perfvalueGPSRVar = resultsGPSRVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    if (GPFITCMean)
+      perfvalueGPFITCMean = resultsGPFITCMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    if (GPFITCVar)
+      perfvalueGPFITCVar = resultsGPFITCVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );    
     if (GPOptMean)
       perfvalueGPOptMean = resultsGPOptMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
     if (GPOptVar)
@@ -1341,6 +1720,8 @@ int main (int argc, char **argv)
     std::cerr << "Performance GPMean: " << perfvalueGPMean << std::endl;
     std::cerr << "Performance GPSRMean: " << perfvalueGPSRMean << std::endl;
     std::cerr << "Performance GPSRVar: " << perfvalueGPSRVar << std::endl;
+    std::cerr << "Performance GPFITCMean: " << perfvalueGPFITCMean << std::endl;
+    std::cerr << "Performance GPFITCVar: " << perfvalueGPFITCVar << std::endl;    
     std::cerr << "Performance GPOptMean: " << perfvalueGPOptMean << std::endl;
     std::cerr << "Performance GPOptVar: " << perfvalueGPOptVar << std::endl;    
     std::cerr << "Performance Parzen: " << perfvalueParzen << std::endl;
@@ -1352,6 +1733,8 @@ int main (int argc, char **argv)
     OverallPerformanceGPMean += perfvalueGPMean;
     OverallPerformanceGPSRMean += perfvalueGPSRMean;
     OverallPerformanceGPSRVar += perfvalueGPSRVar;
+    OverallPerformanceGPFITCMean += perfvalueGPFITCMean;
+    OverallPerformanceGPFITCVar += perfvalueGPFITCVar;   
     OverallPerformanceGPOptMean += perfvalueGPOptMean;
     OverallPerformanceGPOptVar += perfvalueGPOptVar;    
     OverallPerformanceParzen += perfvalueParzen;
@@ -1368,6 +1751,8 @@ int main (int argc, char **argv)
   OverallPerformanceGPMean /= nrOfClassesToConcidere;
   OverallPerformanceGPSRMean /= nrOfClassesToConcidere;
   OverallPerformanceGPSRVar /= nrOfClassesToConcidere;
+  OverallPerformanceGPFITCMean /= nrOfClassesToConcidere;
+  OverallPerformanceGPFITCVar /= nrOfClassesToConcidere;
   OverallPerformanceGPOptMean /= nrOfClassesToConcidere;
   OverallPerformanceGPOptVar /= nrOfClassesToConcidere;  
   OverallPerformanceParzen /= nrOfClassesToConcidere;
@@ -1379,6 +1764,8 @@ int main (int argc, char **argv)
   std::cerr << "overall performance GPMean: " << OverallPerformanceGPMean << std::endl;
   std::cerr << "overall performance GPSRMean: " << OverallPerformanceGPSRMean << std::endl;
   std::cerr << "overall performance GPSRVar: " << OverallPerformanceGPSRVar << std::endl;
+  std::cerr << "overall performance GPFITCMean: " << OverallPerformanceGPFITCMean << std::endl;
+  std::cerr << "overall performance GPFITCVar: " << OverallPerformanceGPFITCVar << std::endl;
   std::cerr << "overall performance GPOptMean: " << OverallPerformanceGPOptMean << std::endl;
   std::cerr << "overall performance GPOptVar: " << OverallPerformanceGPOptVar << std::endl;  
   std::cerr << "overall performance Parzen: " << OverallPerformanceParzen << std::endl;