/** * @file testImageNetBinaryBruteForce.cpp * @brief perform ImageNet tests with binary tasks for OCC using GP mean and variance, sophisticated approximations of both, Parzen Density Estimation and SVDD * @author Alexander Lütz * @date 23-05-2012 (dd-mm-yyyy) */ #include #include #include "core/basics/Config.h" #include "core/basics/Timer.h" #include "core/algebra/CholeskyRobust.h" #include "core/vector/Algorithms.h" #include "core/vector/SparseVectorT.h" #include "vislearning/cbaselib/ClassificationResults.h" #include "vislearning/baselib/ProgressBar.h" #include "vislearning/classifier/kernelclassifier/KCMinimumEnclosingBall.h" #include "fast-hik/tools.h" #include "fast-hik/MatFileIO.h" #include "fast-hik/ImageNetData.h" using namespace std; using namespace NICE; using namespace OBJREC; // --------------- THE KERNEL FUNCTION ( exponential kernel with euclidian distance ) ---------------------- double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)//, const bool & verbose = false) { double inner_sum(0.0); double d; //new version, where we needed on average 0.001707 s for each test sample NICE::SparseVector::const_iterator aIt = a.begin(); NICE::SparseVector::const_iterator bIt = b.begin(); while ( (aIt != a.end()) && (bIt != b.end()) ) { if (aIt->first == bIt->first) { d = ( aIt->second - bIt->second ); inner_sum += d * d; aIt++; bIt++; } else if ( aIt->first < bIt->first) { inner_sum += aIt->second * aIt->second; aIt++; } else { inner_sum += bIt->second * bIt->second; bIt++; } } //compute remaining values, if b reached the end but not a while (aIt != a.end()) { inner_sum += aIt->second * aIt->second; aIt++; } //compute remaining values, if a reached the end but not b while (bIt != b.end()) { inner_sum += bIt->second * bIt->second; bIt++; } inner_sum /= (2.0*sigma*sigma); return exp(-inner_sum); } void readParameters(string & filename, const int & size, NICE::Vector & parameterVector) { parameterVector.resize(size); parameterVector.set(0.0); ifstream is(filename.c_str()); if ( !is.good() ) fthrow(IOException, "Unable to read parameters."); // string tmp; int cnt(0); while (! is.eof()) { is >> tmp; parameterVector[cnt] = atof(tmp.c_str()); cnt++; } // is.close(); } //------------------- TRAINING METHODS -------------------- void inline trainGPVarApprox(NICE::Vector & matrixDInv, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining ) { std::cerr << "nrOfExamplesPerClass : " << nrOfExamplesPerClass << std::endl; Timer tTrainPreciseTimer; tTrainPreciseTimer.start(); for (int run = 0; run < runsPerClassToAverageTraining; run++) { matrixDInv.resize(nrOfExamplesPerClass); matrixDInv.set(0.0); //compute D //start with adding some noise, if necessary if (noise != 0.0) matrixDInv.set(noise); else matrixDInv.set(0.0); for (int i = 0; i < nrOfExamplesPerClass; i++) { for (int j = i; j < nrOfExamplesPerClass; j++) { matrixDInv[i] += kernelMatrix(i,j); if (i != j) matrixDInv[j] += kernelMatrix(i,j); } } //compute its inverse for (int i = 0; i < nrOfExamplesPerClass; i++) { matrixDInv[i] = 1.0 / matrixDInv[i]; } } tTrainPreciseTimer.stop(); std::cerr << "Precise time used for GPVarApprox training class " << classNumber << ": " << tTrainPreciseTimer.getLast()/(double)runsPerClassToAverageTraining << std::endl; } void inline trainGPVar(NICE::Matrix & choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining ) { Timer tTrainPrecise; tTrainPrecise.start(); for (int run = 0; run < runsPerClassToAverageTraining; run++) { CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/); choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass); choleskyMatrix.set(0.0); cr.robustChol ( kernelMatrix, choleskyMatrix ); } tTrainPrecise.stop(); std::cerr << "Precise time used for GPVar training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl; } void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining ) { Timer tTrainPrecise; tTrainPrecise.start(); for (int run = 0; run < runsPerClassToAverageTraining; run++) { NICE::Vector matrixDInv(nrOfExamplesPerClass,0.0); //compute D //start with adding some noise, if necessary if (noise != 0.0) matrixDInv.set(noise); else matrixDInv.set(0.0); for (int i = 0; i < nrOfExamplesPerClass; i++) { for (int j = i; j < nrOfExamplesPerClass; j++) { matrixDInv[i] += kernelMatrix(i,j); if (i != j) matrixDInv[j] += kernelMatrix(i,j); } } //compute its inverse (and multiply every element with the label vector, which contains only one-entries...) GPMeanApproxRightPart.resize(nrOfExamplesPerClass); for (int i = 0; i < nrOfExamplesPerClass; i++) { GPMeanApproxRightPart[i] = 1.0 / matrixDInv[i]; } } tTrainPrecise.stop(); std::cerr << "Precise time used for GPMeanApprox training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl; } void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining ) { Timer tTrainPrecise; tTrainPrecise.start(); for (int run = 0; run < runsPerClassToAverageTraining; run++) { CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/); NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0); cr.robustChol ( kernelMatrix, choleskyMatrix ); GPMeanRightPart.resize(nrOfExamplesPerClass); GPMeanRightPart.set(0.0); NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :) choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart ); } tTrainPrecise.stop(); std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl; } KCMinimumEnclosingBall *trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining ) { Config conf; // set the outlier ratio (Paul optimized this paramter FIXME) conf.sD( "SVDD", "outlier_fraction", 0.1 ); KCMinimumEnclosingBall *svdd = new KCMinimumEnclosingBall ( &conf, NULL /* no kernel function */, "SVDD" /* config section */, false /* update cholesky */ ); KernelData kernelData ( &conf, kernelMatrix, "Kernel" ); Timer tTrainPrecise; tTrainPrecise.start(); for (int run = 0; run < runsPerClassToAverageTraining; run++) { NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :) // KCMinimumEnclosingBall does not store the kernel data object, therefore, we are save with passing a local copy svdd->teach ( &kernelData, y ); } tTrainPrecise.stop(); std::cerr << "Precise time used for SVDD training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl; return svdd; } // ------------- EVALUATION METHODS --------------------- void inline evaluateGPVarApprox(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Vector & matrixDInv, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting) { double uncertainty; Timer tTestSingle; tTestSingle.start(); for (int run = 0; run < runsPerClassToAverageTesting; run++) { NICE::Vector rightPart (kernelVector.size()); for (int j = 0; j < kernelVector.size(); j++) { rightPart[j] = kernelVector[j] * matrixDInv[j]; } uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart ); } tTestSingle.stop(); timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting; FullVector scores ( 2 ); scores[0] = 0.0; scores[1] = 1.0 - uncertainty; r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores ); } void inline evaluateGPVar(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting) { double uncertainty; Timer tTestSingle; tTestSingle.start(); for (int run = 0; run < runsPerClassToAverageTesting; run++) { NICE::Vector rightPart (kernelVector.size(),0.0); choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart ); uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart ); } tTestSingle.stop(); timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting; FullVector scores ( 2 ); scores[0] = 0.0; scores[1] = 1.0 - uncertainty; r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores ); } void inline evaluateGPMeanApprox(const NICE::Vector & kernelVector, const NICE::Vector & rightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting) { double mean; Timer tTestSingle; tTestSingle.start(); for (int run = 0; run < runsPerClassToAverageTesting; run++) { mean = kernelVector.scalarProduct ( rightPart ); } tTestSingle.stop(); timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting; FullVector scores ( 2 ); scores[0] = 0.0; scores[1] = mean; r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores ); } void inline evaluateGPMean(const NICE::Vector & kernelVector, const NICE::Vector & GPMeanRightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting) { double mean; Timer tTestSingle; tTestSingle.start(); for (int run = 0; run < runsPerClassToAverageTesting; run++) { mean = kernelVector.scalarProduct ( GPMeanRightPart ); } tTestSingle.stop(); timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting; FullVector scores ( 2 ); scores[0] = 0.0; scores[1] = mean; r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores ); } void inline evaluateParzen(const NICE::Vector & kernelVector, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting) { double score; Timer tTestSingle; tTestSingle.start(); for (int run = 0; run < runsPerClassToAverageTesting; run++) { double score( kernelVector.Sum() / (double) kernelVector.size() ); //maybe we could directly call kernelVector.Mean() } tTestSingle.stop(); timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting; FullVector scores ( 2 ); scores[0] = 0.0; scores[1] = score; r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores ); } void inline evaluateSVDD( KCMinimumEnclosingBall *svdd, const NICE::Vector & kernelVector, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting) { Timer tTestSingle; tTestSingle.start(); for (int run = 0; run < runsPerClassToAverageTesting; run++) { // In the following, we assume that we are using a Gaussian kernel r = svdd->classifyKernel ( kernelVector, 1.0 /* kernel self */ ); } tTestSingle.stop(); timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting; } /** test the basic functionality of fast-hik hyperparameter optimization */ int main (int argc, char **argv) { std::set_terminate(__gnu_cxx::__verbose_terminate_handler); Config conf ( argc, argv ); string resultsfile = conf.gS("main", "results", "results.txt" ); int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50); nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class int indexOfFirstClass = conf.gI("main", "indexOfFirstClass", 0); indexOfFirstClass = std::max(indexOfFirstClass, 0); //we do not have less than 0 classes int indexOfLastClass = conf.gI("main", "indexOfLastClass", 999); indexOfLastClass = std::min(indexOfLastClass, 999); //we do not have more than 1000 classes int nrOfClassesToConcidere = (indexOfLastClass - indexOfLastClass)+1; int runsPerClassToAverageTraining = conf.gI( "main", "runsPerClassToAverageTraining", 1 ); int runsPerClassToAverageTesting = conf.gI( "main", "runsPerClassToAverageTesting", 1 ); bool shareParameters = conf.gB("main" , "shareParameters", true); // GP variance approximation NICE::Vector sigmaGPVarApproxParas(nrOfClassesToConcidere,0.0); NICE::Vector noiseGPVarApproxParas(nrOfClassesToConcidere,0.0); // GP variance NICE::Vector sigmaGPVarParas(nrOfClassesToConcidere,0.0); NICE::Vector noiseGPVarParas(nrOfClassesToConcidere,0.0); //GP mean approximation NICE::Vector sigmaGPMeanApproxParas(nrOfClassesToConcidere,0.0); NICE::Vector noiseGPMeanApproxParas(nrOfClassesToConcidere,0.0); //GP mean NICE::Vector sigmaGPMeanParas(nrOfClassesToConcidere,0.0); NICE::Vector noiseGPMeanParas(nrOfClassesToConcidere,0.0); //Parzen NICE::Vector sigmaParzenParas(nrOfClassesToConcidere,0.0); NICE::Vector noiseParzenParas(nrOfClassesToConcidere,0.0); //SVDD NICE::Vector sigmaSVDDParas(nrOfClassesToConcidere,0.0); NICE::Vector noiseSVDDParas(nrOfClassesToConcidere,0.0); if (!shareParameters) { //read the optimal parameters for the different methods // GP variance approximation string sigmaGPVarApproxFile = conf.gS("main", "sigmaGPVarApproxFile", "approxVarSigma.txt"); string noiseGPVarApproxFile = conf.gS("main", "noiseGPVarApproxFile", "approxVarNoise.txt"); // GP variance string sigmaGPVarFile = conf.gS("main", "sigmaGPVarFile", "approxVarSigma.txt"); string noiseGPVarFile = conf.gS("main", "noiseGPVarFile", "approxVarNoise.txt"); //GP mean approximation string sigmaGPMeanApproxFile = conf.gS("main", "sigmaGPMeanApproxFile", "approxVarSigma.txt"); string noiseGPMeanApproxFile = conf.gS("main", "noiseGPMeanApproxFile", "approxVarNoise.txt"); //GP mean string sigmaGPMeanFile = conf.gS("main", "sigmaGPMeanFile", "approxVarSigma.txt"); string noiseGPMeanFile = conf.gS("main", "noiseGPMeanFile", "approxVarNoise.txt"); //Parzen string sigmaParzenFile = conf.gS("main", "sigmaParzenFile", "approxVarSigma.txt"); string noiseParzenFile = conf.gS("main", "noiseParzenFile", "approxVarNoise.txt"); //SVDD string sigmaSVDDFile = conf.gS("main", "sigmaSVDDFile", "approxVarSigma.txt"); string noiseSVDDFile = conf.gS("main", "noiseSVDDFile", "approxVarNoise.txt"); // GP variance approximation readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarApproxParas); readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarApproxParas); // GP variance readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarParas); readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarParas); //GP mean approximation readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanApproxParas); readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanApproxParas); //GP mean readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas); readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas); //Parzen readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas); readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas); //SVDD readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaSVDDParas); readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseSVDDParas); } else { double noise = conf.gD( "main", "noise", 0.01 ); double sigma = conf.gD( "main", "sigma", 1.0 ); sigmaGPVarApproxParas.set(sigma); noiseGPVarApproxParas.set(noise); // GP variance sigmaGPVarParas.set(sigma); noiseGPVarParas.set(noise); //GP mean approximation sigmaGPMeanApproxParas.set(sigma); noiseGPMeanApproxParas.set(noise); //GP mean sigmaGPMeanParas.set(sigma); noiseGPMeanParas.set(noise); //Parzen sigmaParzenParas.set(sigma); noiseParzenParas.set(noise); //SVDD sigmaSVDDParas.set(sigma); noiseSVDDParas.set(noise); } // -------- optimal parameters read -------------- std::vector trainingData; NICE::Vector y; std::cerr << "Reading ImageNet data ..." << std::endl; bool imageNetLocal = conf.gB("main", "imageNetLocal" , false); string imageNetPath; if (imageNetLocal) imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/"; else imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/"; ImageNetData imageNetTrain ( imageNetPath + "demo/" ); imageNetTrain.preloadData( "train", "training" ); trainingData = imageNetTrain.getPreloadedData(); y = imageNetTrain.getPreloadedLabels(); std::cerr << "Reading of training data finished" << std::endl; std::cerr << "trainingData.size(): " << trainingData.size() << std::endl; std::cerr << "y.size(): " << y.size() << std::endl; std::cerr << "Reading ImageNet test data files (takes some seconds)..." << std::endl; ImageNetData imageNetTest ( imageNetPath + "demo/" ); imageNetTest.preloadData ( "val", "testing" ); imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" ); double OverallPerformanceGPVarApprox(0.0); double OverallPerformanceGPVar(0.0); double OverallPerformanceGPMeanApprox(0.0); double OverallPerformanceGPMean(0.0); double OverallPerformanceParzen(0.0); double OverallPerformanceSVDD(0.0); double kernelSigmaGPVarApprox; double kernelSigmaGPVar; double kernelSigmaGPMeanApprox; double kernelSigmaGPMean; double kernelSigmaParzen; double kernelSigmaSVDD; for (int cl = indexOfFirstClass; cl <= indexOfLastClass; cl++) { std::cerr << "run for class " << cl << std::endl; int positiveClass = cl+1; //labels are from 1 to 1000, but our indices from 0 to 999 // ------------------------------ TRAINING ------------------------------ kernelSigmaGPVarApprox = sigmaGPVarApproxParas[cl]; kernelSigmaGPVar = sigmaGPVarParas[cl]; kernelSigmaGPMeanApprox = sigmaGPMeanApproxParas[cl]; kernelSigmaGPMean = sigmaGPMeanParas[cl]; kernelSigmaParzen = sigmaParzenParas[cl]; kernelSigmaSVDD = sigmaSVDDParas[cl]; Timer tTrain; tTrain.start(); NICE::Matrix kernelMatrix(nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0); //TODO in theory we have to compute a single kernel Matrix for every method, since every method may have its own optimal parameter // I'm sure, we can speed it up a bit and compute it only for every different parameter //nonetheless, it's not as nice as we originally thought (same matrix for every method) //NOTE since we're only interested in runtimes, we can ignore this (and still do some further code optimization...) //TODO /* //adding some noise, if necessary if (noiseParas[cl] != 0.0) { kernelMatrix.addIdentity(noiseParas[cl]); } else { //zero was already set } */ //now sum up all entries of each row in the original kernel matrix double kernelScore(0.0); for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++) { for (int j = i; j < cl*100+nrOfExamplesPerClass; j++) { kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigmaGPVarApprox); kernelMatrix(i-cl*100,j-cl*100) = kernelScore; if (i != j) kernelMatrix(j-cl*100,i-cl*100) = kernelScore; } } //train GP Var Approx NICE::Vector matrixDInv; for (int i = 0; i < runsPerClassToAverageTraining; i++) trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining ); //train GP Var NICE::Matrix GPVarCholesky; trainGPVar(GPVarCholesky, noiseGPVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining ); //train GP Mean Approx NICE::Vector GPMeanApproxRightPart; trainGPMeanApprox(GPMeanApproxRightPart, noiseGPMeanApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining ); //train GP Mean NICE::Vector GPMeanRightPart; trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining ); //train Parzen //nothing to do :) //train SVDD //TODO what do we need here? KCMinimumEnclosingBall *svdd = trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining ); tTrain.stop(); std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl; std::cerr << "training done - now perform the evaluation" << std::endl; // ------------------------------ TESTING ------------------------------ std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl; ClassificationResults resultsGPVarApprox; ClassificationResults resultsGPVar; ClassificationResults resultsGPMeanApprox; ClassificationResults resultsGPMean; ClassificationResults resultsParzen; ClassificationResults resultsSVDD; ProgressBar pb; Timer tTest; tTest.start(); Timer tTestSingle; double timeForSingleExamplesGPVarApprox(0.0); double timeForSingleExamplesGPVar(0.0); double timeForSingleExamplesGPMeanApprox(0.0); double timeForSingleExamplesGPMean(0.0); double timeForSingleExamplesParzen(0.0); double timeForSingleExamplesSVDD(0.0); for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ ) { pb.update ( imageNetTest.getNumPreloadedExamples() ); const SparseVector & svec = imageNetTest.getPreloadedExample ( i ); //TODO: again we should use method-specific optimal parameters. If we're only interested in the runtimes, this doesn't matter double kernelSelf (measureDistance(svec,svec, kernelSigmaGPVarApprox) ); NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0); for (int j = 0; j < nrOfExamplesPerClass; j++) { kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigmaGPVarApprox); } //evaluate GP Var Approx ClassificationResult rGPVarApprox; evaluateGPVarApprox( kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox, runsPerClassToAverageTesting ); //evaluate GP Var ClassificationResult rGPVar; evaluateGPVar( kernelVector, kernelSelf, GPVarCholesky, rGPVar, timeForSingleExamplesGPVar, runsPerClassToAverageTesting ); //evaluate GP Mean Approx ClassificationResult rGPMeanApprox; evaluateGPMeanApprox( kernelVector, matrixDInv, rGPMeanApprox, timeForSingleExamplesGPMeanApprox, runsPerClassToAverageTesting ); //evaluate GP Mean ClassificationResult rGPMean; evaluateGPMean( kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean, runsPerClassToAverageTesting ); //evaluate Parzen ClassificationResult rParzen; evaluateParzen( kernelVector, rParzen, timeForSingleExamplesParzen, runsPerClassToAverageTesting ); //evaluate SVDD ClassificationResult rSVDD; evaluateSVDD( svdd, kernelVector, rSVDD, timeForSingleExamplesSVDD, runsPerClassToAverageTesting ); // set ground truth label rGPVarApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0; rGPVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0; rGPMeanApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0; rGPMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0; rParzen.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0; rSVDD.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0; // std::cerr << "scores: " << std::endl; // scores >> std::cerr; // std::cerr << "gt: " << r.classno_groundtruth << " -- " << r.classno << std::endl; resultsGPVarApprox.push_back ( rGPVarApprox ); resultsGPVar.push_back ( rGPVar ); resultsGPMeanApprox.push_back ( rGPMeanApprox ); resultsGPMean.push_back ( rGPMean ); resultsParzen.push_back ( rParzen ); resultsSVDD.push_back ( rSVDD ); } tTest.stop(); std::cerr << "Time used for evaluating class " << cl << ": " << tTest.getLast() << std::endl; timeForSingleExamplesGPVarApprox/= imageNetTest.getNumPreloadedExamples(); timeForSingleExamplesGPVar/= imageNetTest.getNumPreloadedExamples(); timeForSingleExamplesGPMeanApprox/= imageNetTest.getNumPreloadedExamples(); timeForSingleExamplesGPMean/= imageNetTest.getNumPreloadedExamples(); timeForSingleExamplesParzen/= imageNetTest.getNumPreloadedExamples(); timeForSingleExamplesSVDD/= imageNetTest.getNumPreloadedExamples(); std::cerr << "GPVarApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVarApprox << std::endl; std::cerr << "GPVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVar << std::endl; std::cerr << "GPMeanApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMeanApprox << std::endl; std::cerr << "GPMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMean << std::endl; std::cerr << "Parzen -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesParzen << std::endl; std::cerr << "SVDD -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesSVDD << std::endl; // std::cerr << "Writing results to " << resultsfile << std::endl; // results.writeWEKA ( resultsfile, 1 ); double perfvalueGPVarApprox = resultsGPVarApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); double perfvalueGPVar = resultsGPVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); double perfvalueGPMeanApprox = resultsGPMeanApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); double perfvalueGPMean = resultsGPMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); double perfvalueParzen = resultsParzen.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); double perfvalueSVDD = resultsSVDD.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); std::cerr << "Performance GPVarApprox: " << perfvalueGPVarApprox << std::endl; std::cerr << "Performance GPVar: " << perfvalueGPVar << std::endl; std::cerr << "Performance GPMeanApprox: " << perfvalueGPMeanApprox << std::endl; std::cerr << "Performance GPMean: " << perfvalueGPMean << std::endl; std::cerr << "Performance Parzen: " << perfvalueParzen << std::endl; std::cerr << "Performance SVDD: " << perfvalueSVDD << std::endl; OverallPerformanceGPVarApprox += perfvalueGPVar; OverallPerformanceGPVar += perfvalueGPVarApprox; OverallPerformanceGPMeanApprox += perfvalueGPMeanApprox; OverallPerformanceGPMean += perfvalueGPMean; OverallPerformanceParzen += perfvalueParzen; OverallPerformanceSVDD += perfvalueSVDD; // clean up memory used by SVDD delete svdd; } OverallPerformanceGPVarApprox /= nrOfClassesToConcidere; OverallPerformanceGPVar /= nrOfClassesToConcidere; OverallPerformanceGPMeanApprox /= nrOfClassesToConcidere; OverallPerformanceGPMean /= nrOfClassesToConcidere; OverallPerformanceParzen /= nrOfClassesToConcidere; OverallPerformanceSVDD /= nrOfClassesToConcidere; std::cerr << "overall performance GPVarApprox: " << OverallPerformanceGPVarApprox << std::endl; std::cerr << "overall performance GPVar: " << OverallPerformanceGPVar << std::endl; std::cerr << "overall performance GPMeanApprox: " << OverallPerformanceGPMeanApprox << std::endl; std::cerr << "overall performance GPMean: " << OverallPerformanceGPMean << std::endl; std::cerr << "overall performance Parzen: " << OverallPerformanceParzen << std::endl; std::cerr << "overall performance SVDD: " << OverallPerformanceSVDD << std::endl; return 0; }