|
@@ -28,7 +28,7 @@ using namespace NICE;
|
|
using namespace OBJREC;
|
|
using namespace OBJREC;
|
|
|
|
|
|
// --------------- THE KERNEL FUNCTION ( exponential kernel with euclidian distance ) ----------------------
|
|
// --------------- THE KERNEL FUNCTION ( exponential kernel with euclidian distance ) ----------------------
|
|
-double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)//, const bool & verbose = false)
|
|
|
|
|
|
+double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)
|
|
{
|
|
{
|
|
double inner_sum(0.0);
|
|
double inner_sum(0.0);
|
|
|
|
|
|
@@ -38,6 +38,7 @@ double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector
|
|
NICE::SparseVector::const_iterator aIt = a.begin();
|
|
NICE::SparseVector::const_iterator aIt = a.begin();
|
|
NICE::SparseVector::const_iterator bIt = b.begin();
|
|
NICE::SparseVector::const_iterator bIt = b.begin();
|
|
|
|
|
|
|
|
+ //compute the euclidian distance between both feature vectores (given as SparseVectors)
|
|
while ( (aIt != a.end()) && (bIt != b.end()) )
|
|
while ( (aIt != a.end()) && (bIt != b.end()) )
|
|
{
|
|
{
|
|
if (aIt->first == bIt->first)
|
|
if (aIt->first == bIt->first)
|
|
@@ -72,13 +73,18 @@ double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector
|
|
bIt++;
|
|
bIt++;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ //normalization of the exponent
|
|
inner_sum /= (2.0*sigma*sigma);
|
|
inner_sum /= (2.0*sigma*sigma);
|
|
|
|
|
|
|
|
+ //finally, compute the RBF-kernel score (RBF = radial basis function)
|
|
return exp(-inner_sum);
|
|
return exp(-inner_sum);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+// --------------- INPUT METHOD ----------------------
|
|
void readParameters(string & filename, const int & size, NICE::Vector & parameterVector)
|
|
void readParameters(string & filename, const int & size, NICE::Vector & parameterVector)
|
|
{
|
|
{
|
|
|
|
+ //we read the parameters which are given from a Matlab-Script (each line contains a single number, which is the optimal parameter for this class)
|
|
|
|
+
|
|
parameterVector.resize(size);
|
|
parameterVector.resize(size);
|
|
parameterVector.set(0.0);
|
|
parameterVector.set(0.0);
|
|
|
|
|
|
@@ -120,6 +126,8 @@ void inline trainGPVarApprox(NICE::Vector & matrixDInv, const double & noise, co
|
|
else
|
|
else
|
|
matrixDInv.set(0.0);
|
|
matrixDInv.set(0.0);
|
|
|
|
|
|
|
|
+ // the approximation creates a diagonal matrix (which is easy to invert)
|
|
|
|
+ // with entries equal the row sums of the original kernel matrix
|
|
for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
{
|
|
{
|
|
for (int j = i; j < nrOfExamplesPerClass; j++)
|
|
for (int j = i; j < nrOfExamplesPerClass; j++)
|
|
@@ -149,11 +157,12 @@ void inline trainGPVar(NICE::Matrix & choleskyMatrix, const double & noise, cons
|
|
|
|
|
|
for (int run = 0; run < runsPerClassToAverageTraining; run++)
|
|
for (int run = 0; run < runsPerClassToAverageTraining; run++)
|
|
{
|
|
{
|
|
-
|
|
|
|
CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
|
|
CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
|
|
|
|
|
|
choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass);
|
|
choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass);
|
|
choleskyMatrix.set(0.0);
|
|
choleskyMatrix.set(0.0);
|
|
|
|
+
|
|
|
|
+ //compute the cholesky decomposition of K in order to compute K^{-1} \cdot k_* for new test samples
|
|
cr.robustChol ( kernelMatrix, choleskyMatrix );
|
|
cr.robustChol ( kernelMatrix, choleskyMatrix );
|
|
}
|
|
}
|
|
|
|
|
|
@@ -177,6 +186,8 @@ void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double
|
|
else
|
|
else
|
|
matrixDInv.set(0.0);
|
|
matrixDInv.set(0.0);
|
|
|
|
|
|
|
|
+ // the approximation creates a diagonal matrix (which is easy to invert)
|
|
|
|
+ // with entries equal the row sums of the original kernel matrix
|
|
for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
{
|
|
{
|
|
for (int j = i; j < nrOfExamplesPerClass; j++)
|
|
for (int j = i; j < nrOfExamplesPerClass; j++)
|
|
@@ -187,7 +198,7 @@ void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- //compute its inverse (and multiply every element with the label vector, which contains only one-entries...)
|
|
|
|
|
|
+ //compute its inverse (and multiply every element with the label vector, which contains only one-entries and therefore be skipped...)
|
|
GPMeanApproxRightPart.resize(nrOfExamplesPerClass);
|
|
GPMeanApproxRightPart.resize(nrOfExamplesPerClass);
|
|
for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
{
|
|
{
|
|
@@ -212,12 +223,16 @@ void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, co
|
|
CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
|
|
CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
|
|
|
|
|
|
NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
|
|
NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
|
|
|
|
+
|
|
|
|
+ //compute the cholesky decomposition of K in order to compute K^{-1} \cdot y
|
|
cr.robustChol ( kernelMatrix, choleskyMatrix );
|
|
cr.robustChol ( kernelMatrix, choleskyMatrix );
|
|
|
|
|
|
GPMeanRightPart.resize(nrOfExamplesPerClass);
|
|
GPMeanRightPart.resize(nrOfExamplesPerClass);
|
|
GPMeanRightPart.set(0.0);
|
|
GPMeanRightPart.set(0.0);
|
|
|
|
|
|
NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
|
|
NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
|
|
|
|
+
|
|
|
|
+ // pre-compute K^{-1} \cdot y, which is the same for every new test sample
|
|
choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart );
|
|
choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart );
|
|
}
|
|
}
|
|
|
|
|
|
@@ -225,14 +240,167 @@ void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, co
|
|
std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
|
|
std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+// GP subset of regressors
|
|
|
|
+void inline trainGPSRMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples )
|
|
|
|
+{
|
|
|
|
+ std::vector<int> examplesToChoose;
|
|
|
|
+ indicesOfChosenExamples.clear();
|
|
|
|
+
|
|
|
|
+ //add all examples for possible choice
|
|
|
|
+ for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
|
|
+ {
|
|
|
|
+ examplesToChoose.push_back(i);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //now chose randomly some examples as active subset
|
|
|
|
+ int index;
|
|
|
|
+ for (int i = 0; i < std::min(nrOfRegressors,nrOfExamplesPerClass); i++)
|
|
|
|
+ {
|
|
|
|
+ index = rand() % examplesToChoose.size();
|
|
|
|
+ indicesOfChosenExamples.push_back(examplesToChoose[index]);
|
|
|
|
+ examplesToChoose.erase(examplesToChoose.begin() + index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ NICE::Matrix Kmn (indicesOfChosenExamples.size(), nrOfExamplesPerClass, 0.0);
|
|
|
|
+ int rowCnt(0);
|
|
|
|
+ //set every row
|
|
|
|
+ for (int i = 0; i < indicesOfChosenExamples.size(); i++, rowCnt++ )
|
|
|
|
+ {
|
|
|
|
+ //set every element of this row
|
|
|
|
+ NICE::Vector col = kernelMatrix.getRow(indicesOfChosenExamples[i]);
|
|
|
|
+ for (int j = 0; j < nrOfExamplesPerClass; j++)
|
|
|
|
+ {
|
|
|
|
+ Kmn(rowCnt,j) = col(j);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //we could speed this up if we would order the indices
|
|
|
|
+ NICE::Matrix Kmm (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
|
|
|
|
+ double tmp(0.0);
|
|
|
|
+ for (int i = 0; i < indicesOfChosenExamples.size(); i++ )
|
|
|
|
+ {
|
|
|
|
+ for (int j = i; j < indicesOfChosenExamples.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ tmp = kernelMatrix(indicesOfChosenExamples[i], indicesOfChosenExamples[j]);
|
|
|
|
+ Kmm(i,j) = tmp;
|
|
|
|
+ if (i != j)
|
|
|
|
+ Kmm(j,i) = tmp;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ Timer tTrainPrecise;
|
|
|
|
+ tTrainPrecise.start();
|
|
|
|
+
|
|
|
|
+ for (int run = 0; run < runsPerClassToAverageTraining; run++)
|
|
|
|
+ {
|
|
|
|
+ NICE::Matrix innerMatrix;
|
|
|
|
+ innerMatrix.multiply(Kmn, Kmn, true /* tranpose first matrix*/, false /* transpose second matrix*/);
|
|
|
|
+
|
|
|
|
+ innerMatrix.addScaledMatrix( noise, Kmm );
|
|
|
|
+
|
|
|
|
+ NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
|
|
|
|
+ NICE::Vector projectedLabels;
|
|
|
|
+ projectedLabels.multiply(Kmn,y);
|
|
|
|
+
|
|
|
|
+ CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
|
|
|
|
+
|
|
|
|
+ NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
|
|
|
|
+
|
|
|
|
+ //compute the cholesky decomposition of K in order to compute K^{-1} \cdot y
|
|
|
|
+ cr.robustChol ( innerMatrix, choleskyMatrix );
|
|
|
|
+
|
|
|
|
+ GPMeanRightPart.resize(indicesOfChosenExamples.size());
|
|
|
|
+ GPMeanRightPart.set(0.0);
|
|
|
|
+
|
|
|
|
+ // pre-compute K^{-1} \cdot y, which is the same for every new test sample
|
|
|
|
+ choleskySolveLargeScale ( choleskyMatrix, projectedLabels, GPMeanRightPart );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tTrainPrecise.stop();
|
|
|
|
+ std::cerr << "Precise time used for GPSRMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+// GP subset of regressors
|
|
|
|
+void inline trainGPSRVar(NICE::Matrix choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples )
|
|
|
|
+{
|
|
|
|
+ std::vector<int> examplesToChoose;
|
|
|
|
+ indicesOfChosenExamples.clear();
|
|
|
|
+
|
|
|
|
+ //add all examples for possible choice
|
|
|
|
+ for (int i = 0; i < nrOfExamplesPerClass; i++)
|
|
|
|
+ {
|
|
|
|
+ examplesToChoose.push_back(i);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //now chose randomly some examples as active subset
|
|
|
|
+ int index;
|
|
|
|
+ for (int i = 0; i < std::min(nrOfRegressors,nrOfExamplesPerClass); i++)
|
|
|
|
+ {
|
|
|
|
+ index = rand() % examplesToChoose.size();
|
|
|
|
+ indicesOfChosenExamples.push_back(examplesToChoose[index]);
|
|
|
|
+ examplesToChoose.erase(examplesToChoose.begin() + index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ NICE::Matrix Kmn (indicesOfChosenExamples.size(), nrOfExamplesPerClass, 0.0);
|
|
|
|
+ int rowCnt(0);
|
|
|
|
+ //set every row
|
|
|
|
+ for (int i = 0; i < indicesOfChosenExamples.size(); i++, rowCnt++ )
|
|
|
|
+ {
|
|
|
|
+ //set every element of this row
|
|
|
|
+ NICE::Vector col = kernelMatrix.getRow(indicesOfChosenExamples[i]);
|
|
|
|
+ for (int j = 0; j < nrOfExamplesPerClass; j++)
|
|
|
|
+ {
|
|
|
|
+ Kmn(rowCnt,j) = col(j);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //we could speed this up if we would order the indices
|
|
|
|
+ NICE::Matrix Kmm (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
|
|
|
|
+ double tmp(0.0);
|
|
|
|
+ for (int i = 0; i < indicesOfChosenExamples.size(); i++ )
|
|
|
|
+ {
|
|
|
|
+ for (int j = i; j < indicesOfChosenExamples.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ tmp = kernelMatrix(indicesOfChosenExamples[i], indicesOfChosenExamples[j]);
|
|
|
|
+ Kmm(i,j) = tmp;
|
|
|
|
+ if (i != j)
|
|
|
|
+ Kmm(j,i) = tmp;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ Timer tTrainPrecise;
|
|
|
|
+ tTrainPrecise.start();
|
|
|
|
+
|
|
|
|
+ for (int run = 0; run < runsPerClassToAverageTraining; run++)
|
|
|
|
+ {
|
|
|
|
+ NICE::Matrix innerMatrix;
|
|
|
|
+ innerMatrix.multiply(Kmn, Kmn, true /* tranpose first matrix*/, false /* transpose second matrix*/);
|
|
|
|
+
|
|
|
|
+ innerMatrix.addScaledMatrix( noise, Kmm );
|
|
|
|
+
|
|
|
|
+ CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
|
|
|
|
+
|
|
|
|
+ choleskyMatrix.resize( nrOfExamplesPerClass, nrOfExamplesPerClass );
|
|
|
|
+ choleskyMatrix.set( 0.0 );
|
|
|
|
+
|
|
|
|
+ //compute the cholesky decomposition of K in order to compute K^{-1} \cdot y
|
|
|
|
+ cr.robustChol ( innerMatrix, choleskyMatrix );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tTrainPrecise.stop();
|
|
|
|
+ std::cerr << "Precise time used for GPSRMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
|
|
|
|
+}
|
|
|
|
+
|
|
KCMinimumEnclosingBall *trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
|
|
KCMinimumEnclosingBall *trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
|
|
{
|
|
{
|
|
|
|
|
|
Config conf;
|
|
Config conf;
|
|
// set the outlier ratio (Paul optimized this paramter FIXME)
|
|
// set the outlier ratio (Paul optimized this paramter FIXME)
|
|
conf.sD( "SVDD", "outlier_fraction", 0.1 );
|
|
conf.sD( "SVDD", "outlier_fraction", 0.1 );
|
|
|
|
+ conf.sB( "SVDD", "verbose", false );
|
|
KCMinimumEnclosingBall *svdd = new KCMinimumEnclosingBall ( &conf, NULL /* no kernel function */, "SVDD" /* config section */);
|
|
KCMinimumEnclosingBall *svdd = new KCMinimumEnclosingBall ( &conf, NULL /* no kernel function */, "SVDD" /* config section */);
|
|
-
|
|
|
|
KernelData kernelData ( &conf, kernelMatrix, "Kernel" , false /* update cholesky */ );
|
|
KernelData kernelData ( &conf, kernelMatrix, "Kernel" , false /* update cholesky */ );
|
|
|
|
|
|
Timer tTrainPrecise;
|
|
Timer tTrainPrecise;
|
|
@@ -262,6 +430,8 @@ void inline evaluateGPVarApprox(const NICE::Vector & kernelVector, const double
|
|
|
|
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
{
|
|
{
|
|
|
|
+ // uncertainty = k{**} - \k_*^T \cdot D^{-1} \cdot k_* where D is our nice approximation of K
|
|
|
|
+
|
|
NICE::Vector rightPart (kernelVector.size());
|
|
NICE::Vector rightPart (kernelVector.size());
|
|
for (int j = 0; j < kernelVector.size(); j++)
|
|
for (int j = 0; j < kernelVector.size(); j++)
|
|
{
|
|
{
|
|
@@ -290,8 +460,9 @@ void inline evaluateGPVar(const NICE::Vector & kernelVector, const double & kern
|
|
|
|
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
{
|
|
{
|
|
- NICE::Vector rightPart (kernelVector.size(),0.0);
|
|
|
|
|
|
+ // uncertainty = k{**} - \k_*^T \cdot D^{-1} \cdot k_*
|
|
|
|
|
|
|
|
+ NICE::Vector rightPart (kernelVector.size(),0.0);
|
|
choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
|
|
choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
|
|
|
|
|
|
uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
|
|
uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
|
|
@@ -315,7 +486,8 @@ void inline evaluateGPMeanApprox(const NICE::Vector & kernelVector, const NICE::
|
|
tTestSingle.start();
|
|
tTestSingle.start();
|
|
|
|
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
- {
|
|
|
|
|
|
+ {
|
|
|
|
+ // \mean = \k_*^T \cdot D^{-1} \cdot y where D is our nice approximation of K
|
|
mean = kernelVector.scalarProduct ( rightPart );
|
|
mean = kernelVector.scalarProduct ( rightPart );
|
|
}
|
|
}
|
|
|
|
|
|
@@ -335,8 +507,10 @@ void inline evaluateGPMean(const NICE::Vector & kernelVector, const NICE::Vecto
|
|
|
|
|
|
Timer tTestSingle;
|
|
Timer tTestSingle;
|
|
tTestSingle.start();
|
|
tTestSingle.start();
|
|
|
|
+
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
- {
|
|
|
|
|
|
+ {
|
|
|
|
+ // \mean = \k_*^T \cdot K^{-1} \cdot y
|
|
mean = kernelVector.scalarProduct ( GPMeanRightPart );
|
|
mean = kernelVector.scalarProduct ( GPMeanRightPart );
|
|
}
|
|
}
|
|
|
|
|
|
@@ -350,6 +524,70 @@ void inline evaluateGPMean(const NICE::Vector & kernelVector, const NICE::Vecto
|
|
r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
|
|
r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void inline evaluateGPSRMean(const NICE::Vector & kernelVector, const NICE::Vector & GPSRMeanRightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting, const int & nrOfRegressors, const std::vector<int> & indicesOfChosenExamples)
|
|
|
|
+{
|
|
|
|
+ double mean;
|
|
|
|
+
|
|
|
|
+ //grep the entries corresponding to the active set
|
|
|
|
+ NICE::Vector kernelVectorM;
|
|
|
|
+ kernelVectorM.resize(nrOfRegressors);
|
|
|
|
+ for (int i = 0; i < nrOfRegressors; i++)
|
|
|
|
+ {
|
|
|
|
+ kernelVectorM[i] = kernelVector[indicesOfChosenExamples[i]];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ Timer tTestSingle;
|
|
|
|
+ tTestSingle.start();
|
|
|
|
+
|
|
|
|
+ for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
|
|
+ {
|
|
|
|
+ // \mean = \k_*^T \cdot K^{-1} \cdot y
|
|
|
|
+ mean = kernelVectorM.scalarProduct ( GPSRMeanRightPart );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tTestSingle.stop();
|
|
|
|
+ timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
|
|
|
|
+
|
|
|
|
+ FullVector scores ( 2 );
|
|
|
|
+ scores[0] = 0.0;
|
|
|
|
+ scores[1] = mean;
|
|
|
|
+
|
|
|
|
+ r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void inline evaluateGPSRVar(const NICE::Vector & kernelVector, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples, const double & noise)
|
|
|
|
+{
|
|
|
|
+ double uncertainty;
|
|
|
|
+
|
|
|
|
+ //grep the entries corresponding to the active set
|
|
|
|
+ NICE::Vector kernelVectorM;
|
|
|
|
+ kernelVectorM.resize(nrOfRegressors);
|
|
|
|
+ for (int i = 0; i < nrOfRegressors; i++)
|
|
|
|
+ {
|
|
|
|
+ kernelVectorM[i] = kernelVector[indicesOfChosenExamples[i]];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ Timer tTestSingle;
|
|
|
|
+ tTestSingle.start();
|
|
|
|
+
|
|
|
|
+ for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
|
|
+ {
|
|
|
|
+ NICE::Vector rightPart (nrOfRegressors,0.0);
|
|
|
|
+ choleskySolveLargeScale ( choleskyMatrix, kernelVectorM, rightPart );
|
|
|
|
+
|
|
|
|
+ uncertainty = noise*kernelVectorM.scalarProduct ( rightPart );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tTestSingle.stop();
|
|
|
|
+ timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
|
|
|
|
+
|
|
|
|
+ FullVector scores ( 2 );
|
|
|
|
+ scores[0] = 0.0;
|
|
|
|
+ scores[1] = 1.0 - uncertainty;
|
|
|
|
+
|
|
|
|
+ r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
|
|
|
|
+}
|
|
|
|
+
|
|
void inline evaluateParzen(const NICE::Vector & kernelVector, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
|
|
void inline evaluateParzen(const NICE::Vector & kernelVector, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
|
|
{
|
|
{
|
|
double score;
|
|
double score;
|
|
@@ -358,8 +596,9 @@ void inline evaluateParzen(const NICE::Vector & kernelVector, ClassificationRes
|
|
tTestSingle.start();
|
|
tTestSingle.start();
|
|
|
|
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
for (int run = 0; run < runsPerClassToAverageTesting; run++)
|
|
- {
|
|
|
|
- double score( kernelVector.Sum() / (double) kernelVector.size() ); //maybe we could directly call kernelVector.Mean()
|
|
|
|
|
|
+ {
|
|
|
|
+ //the Parzen score is nothing but the averaged similarity to every training sample
|
|
|
|
+ score = kernelVector.Sum() / (double) kernelVector.size(); //maybe we could directly call kernelVector.Mean() here
|
|
}
|
|
}
|
|
|
|
|
|
tTestSingle.stop();
|
|
tTestSingle.stop();
|
|
@@ -399,6 +638,7 @@ int main (int argc, char **argv)
|
|
int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50);
|
|
int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50);
|
|
nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class
|
|
nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class
|
|
|
|
|
|
|
|
+ //which classes to considere? we assume consecutive class numers
|
|
int indexOfFirstClass = conf.gI("main", "indexOfFirstClass", 0);
|
|
int indexOfFirstClass = conf.gI("main", "indexOfFirstClass", 0);
|
|
indexOfFirstClass = std::max(indexOfFirstClass, 0); //we do not have less than 0 classes
|
|
indexOfFirstClass = std::max(indexOfFirstClass, 0); //we do not have less than 0 classes
|
|
int indexOfLastClass = conf.gI("main", "indexOfLastClass", 999);
|
|
int indexOfLastClass = conf.gI("main", "indexOfLastClass", 999);
|
|
@@ -406,9 +646,11 @@ int main (int argc, char **argv)
|
|
|
|
|
|
int nrOfClassesToConcidere = (indexOfLastClass - indexOfLastClass)+1;
|
|
int nrOfClassesToConcidere = (indexOfLastClass - indexOfLastClass)+1;
|
|
|
|
|
|
- int runsPerClassToAverageTraining = conf.gI( "main", "runsPerClassToAverageTraining", 1 );
|
|
|
|
|
|
+ //repetitions for every class to achieve reliable time evalutions
|
|
|
|
+ int runsPerClassToAverageTraining = conf.gI( "main", "runsPerClassToAverageTraining", 1 );
|
|
int runsPerClassToAverageTesting = conf.gI( "main", "runsPerClassToAverageTesting", 1 );
|
|
int runsPerClassToAverageTesting = conf.gI( "main", "runsPerClassToAverageTesting", 1 );
|
|
|
|
|
|
|
|
+ // share parameters among methods and classes?
|
|
bool shareParameters = conf.gB("main" , "shareParameters", true);
|
|
bool shareParameters = conf.gB("main" , "shareParameters", true);
|
|
|
|
|
|
|
|
|
|
@@ -424,6 +666,12 @@ int main (int argc, char **argv)
|
|
//GP mean
|
|
//GP mean
|
|
NICE::Vector sigmaGPMeanParas(nrOfClassesToConcidere,0.0);
|
|
NICE::Vector sigmaGPMeanParas(nrOfClassesToConcidere,0.0);
|
|
NICE::Vector noiseGPMeanParas(nrOfClassesToConcidere,0.0);
|
|
NICE::Vector noiseGPMeanParas(nrOfClassesToConcidere,0.0);
|
|
|
|
+ //GP SR mean
|
|
|
|
+ NICE::Vector sigmaGPSRMeanParas(nrOfClassesToConcidere,0.0);
|
|
|
|
+ NICE::Vector noiseGPSRMeanParas(nrOfClassesToConcidere,0.0);
|
|
|
|
+ //GP SR var
|
|
|
|
+ NICE::Vector sigmaGPSRVarParas(nrOfClassesToConcidere,0.0);
|
|
|
|
+ NICE::Vector noiseGPSRVarParas(nrOfClassesToConcidere,0.0);
|
|
//Parzen
|
|
//Parzen
|
|
NICE::Vector sigmaParzenParas(nrOfClassesToConcidere,0.0);
|
|
NICE::Vector sigmaParzenParas(nrOfClassesToConcidere,0.0);
|
|
NICE::Vector noiseParzenParas(nrOfClassesToConcidere,0.0);
|
|
NICE::Vector noiseParzenParas(nrOfClassesToConcidere,0.0);
|
|
@@ -466,6 +714,12 @@ int main (int argc, char **argv)
|
|
//GP mean
|
|
//GP mean
|
|
readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas);
|
|
readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas);
|
|
readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas);
|
|
readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas);
|
|
|
|
+ //GP SR mean
|
|
|
|
+ readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPSRMeanParas);
|
|
|
|
+ readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPSRMeanParas);
|
|
|
|
+ //GP SR var
|
|
|
|
+ readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPSRVarParas);
|
|
|
|
+ readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPSRVarParas);
|
|
//Parzen
|
|
//Parzen
|
|
readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas);
|
|
readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas);
|
|
readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas);
|
|
readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas);
|
|
@@ -475,6 +729,7 @@ int main (int argc, char **argv)
|
|
}
|
|
}
|
|
else
|
|
else
|
|
{
|
|
{
|
|
|
|
+ //use static variables for all methods and classis
|
|
double noise = conf.gD( "main", "noise", 0.01 );
|
|
double noise = conf.gD( "main", "noise", 0.01 );
|
|
double sigma = conf.gD( "main", "sigma", 1.0 );
|
|
double sigma = conf.gD( "main", "sigma", 1.0 );
|
|
|
|
|
|
@@ -489,6 +744,12 @@ int main (int argc, char **argv)
|
|
//GP mean
|
|
//GP mean
|
|
sigmaGPMeanParas.set(sigma);
|
|
sigmaGPMeanParas.set(sigma);
|
|
noiseGPMeanParas.set(noise);
|
|
noiseGPMeanParas.set(noise);
|
|
|
|
+ //GP SR mean
|
|
|
|
+ sigmaGPSRMeanParas.set(sigma);
|
|
|
|
+ noiseGPSRMeanParas.set(noise);
|
|
|
|
+ //GP SR var
|
|
|
|
+ sigmaGPSRVarParas.set(sigma);
|
|
|
|
+ noiseGPSRVarParas.set(noise);
|
|
//Parzen
|
|
//Parzen
|
|
sigmaParzenParas.set(sigma);
|
|
sigmaParzenParas.set(sigma);
|
|
noiseParzenParas.set(noise);
|
|
noiseParzenParas.set(noise);
|
|
@@ -530,6 +791,8 @@ int main (int argc, char **argv)
|
|
double OverallPerformanceGPVar(0.0);
|
|
double OverallPerformanceGPVar(0.0);
|
|
double OverallPerformanceGPMeanApprox(0.0);
|
|
double OverallPerformanceGPMeanApprox(0.0);
|
|
double OverallPerformanceGPMean(0.0);
|
|
double OverallPerformanceGPMean(0.0);
|
|
|
|
+ double OverallPerformanceGPSRMean(0.0);
|
|
|
|
+ double OverallPerformanceGPSRVar(0.0);
|
|
double OverallPerformanceParzen(0.0);
|
|
double OverallPerformanceParzen(0.0);
|
|
double OverallPerformanceSVDD(0.0);
|
|
double OverallPerformanceSVDD(0.0);
|
|
|
|
|
|
@@ -538,6 +801,8 @@ int main (int argc, char **argv)
|
|
double kernelSigmaGPVar;
|
|
double kernelSigmaGPVar;
|
|
double kernelSigmaGPMeanApprox;
|
|
double kernelSigmaGPMeanApprox;
|
|
double kernelSigmaGPMean;
|
|
double kernelSigmaGPMean;
|
|
|
|
+ double kernelSigmaGPSRMean;
|
|
|
|
+ double kernelSigmaGPSRVar;
|
|
double kernelSigmaParzen;
|
|
double kernelSigmaParzen;
|
|
double kernelSigmaSVDD;
|
|
double kernelSigmaSVDD;
|
|
|
|
|
|
@@ -551,30 +816,23 @@ int main (int argc, char **argv)
|
|
kernelSigmaGPVar = sigmaGPVarParas[cl];
|
|
kernelSigmaGPVar = sigmaGPVarParas[cl];
|
|
kernelSigmaGPMeanApprox = sigmaGPMeanApproxParas[cl];
|
|
kernelSigmaGPMeanApprox = sigmaGPMeanApproxParas[cl];
|
|
kernelSigmaGPMean = sigmaGPMeanParas[cl];
|
|
kernelSigmaGPMean = sigmaGPMeanParas[cl];
|
|
|
|
+ kernelSigmaGPMean = sigmaGPSRMeanParas[cl];
|
|
|
|
+ kernelSigmaGPSRVar = sigmaGPSRVarParas[cl];
|
|
kernelSigmaParzen = sigmaParzenParas[cl];
|
|
kernelSigmaParzen = sigmaParzenParas[cl];
|
|
kernelSigmaSVDD = sigmaSVDDParas[cl];
|
|
kernelSigmaSVDD = sigmaSVDDParas[cl];
|
|
|
|
|
|
Timer tTrain;
|
|
Timer tTrain;
|
|
tTrain.start();
|
|
tTrain.start();
|
|
-
|
|
|
|
|
|
+
|
|
|
|
+ //compute the kernel matrix, which will be shared among all methods in this scenario
|
|
NICE::Matrix kernelMatrix(nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
|
|
NICE::Matrix kernelMatrix(nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
|
|
|
|
|
|
- //TODO in theory we have to compute a single kernel Matrix for every method, since every method may have its own optimal parameter
|
|
|
|
|
|
+ //NOTE in theory we have to compute a single kernel Matrix for every method, since every method may have its own optimal parameter
|
|
// I'm sure, we can speed it up a bit and compute it only for every different parameter
|
|
// I'm sure, we can speed it up a bit and compute it only for every different parameter
|
|
//nonetheless, it's not as nice as we originally thought (same matrix for every method)
|
|
//nonetheless, it's not as nice as we originally thought (same matrix for every method)
|
|
|
|
|
|
- //NOTE since we're only interested in runtimes, we can ignore this (and still do some further code optimization...) //TODO
|
|
|
|
-
|
|
|
|
-/* //adding some noise, if necessary
|
|
|
|
- if (noiseParas[cl] != 0.0)
|
|
|
|
- {
|
|
|
|
- kernelMatrix.addIdentity(noiseParas[cl]);
|
|
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
- //zero was already set
|
|
|
|
- } */
|
|
|
|
-
|
|
|
|
|
|
+ //NOTE Nonetheless, since we're only interested in runtimes, we can ignore this
|
|
|
|
+
|
|
//now sum up all entries of each row in the original kernel matrix
|
|
//now sum up all entries of each row in the original kernel matrix
|
|
double kernelScore(0.0);
|
|
double kernelScore(0.0);
|
|
for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
|
|
for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
|
|
@@ -589,10 +847,10 @@ int main (int argc, char **argv)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- //train GP Var Approx
|
|
|
|
|
|
+ // now call the individual training methods
|
|
|
|
|
|
|
|
+ //train GP Var Approx
|
|
NICE::Vector matrixDInv;
|
|
NICE::Vector matrixDInv;
|
|
- for (int i = 0; i < runsPerClassToAverageTraining; i++)
|
|
|
|
trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
|
|
trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
|
|
|
|
|
|
//train GP Var
|
|
//train GP Var
|
|
@@ -607,11 +865,22 @@ int main (int argc, char **argv)
|
|
NICE::Vector GPMeanRightPart;
|
|
NICE::Vector GPMeanRightPart;
|
|
trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
|
|
trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
|
|
|
|
|
|
|
|
+ //train GP SR Mean
|
|
|
|
+ NICE::Vector GPSRMeanRightPart;
|
|
|
|
+ std::vector<int> indicesOfChosenExamplesGPSRMean;
|
|
|
|
+ int nrOfRegressors = conf.gI( "GPSR", "nrOfRegressors", nrOfExamplesPerClass/2);
|
|
|
|
+ nrOfRegressors = std::min( nrOfRegressors, nrOfExamplesPerClass );
|
|
|
|
+ trainGPSRMean(GPSRMeanRightPart, noiseGPSRMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPSRMean );
|
|
|
|
+
|
|
|
|
+ //train GP SR Var
|
|
|
|
+ NICE::Matrix GPSRVarCholesky;
|
|
|
|
+ std::vector<int> indicesOfChosenExamplesGPSRVar;
|
|
|
|
+ trainGPSRVar(GPSRVarCholesky, noiseGPSRVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPSRVar );
|
|
|
|
+
|
|
//train Parzen
|
|
//train Parzen
|
|
//nothing to do :)
|
|
//nothing to do :)
|
|
|
|
|
|
//train SVDD
|
|
//train SVDD
|
|
- //TODO what do we need here?
|
|
|
|
KCMinimumEnclosingBall *svdd = trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
|
|
KCMinimumEnclosingBall *svdd = trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
|
|
|
|
|
|
tTrain.stop();
|
|
tTrain.stop();
|
|
@@ -627,7 +896,9 @@ int main (int argc, char **argv)
|
|
ClassificationResults resultsGPVarApprox;
|
|
ClassificationResults resultsGPVarApprox;
|
|
ClassificationResults resultsGPVar;
|
|
ClassificationResults resultsGPVar;
|
|
ClassificationResults resultsGPMeanApprox;
|
|
ClassificationResults resultsGPMeanApprox;
|
|
- ClassificationResults resultsGPMean;
|
|
|
|
|
|
+ ClassificationResults resultsGPMean;
|
|
|
|
+ ClassificationResults resultsGPSRMean;
|
|
|
|
+ ClassificationResults resultsGPSRVar;
|
|
ClassificationResults resultsParzen;
|
|
ClassificationResults resultsParzen;
|
|
ClassificationResults resultsSVDD;
|
|
ClassificationResults resultsSVDD;
|
|
|
|
|
|
@@ -640,6 +911,8 @@ int main (int argc, char **argv)
|
|
double timeForSingleExamplesGPVar(0.0);
|
|
double timeForSingleExamplesGPVar(0.0);
|
|
double timeForSingleExamplesGPMeanApprox(0.0);
|
|
double timeForSingleExamplesGPMeanApprox(0.0);
|
|
double timeForSingleExamplesGPMean(0.0);
|
|
double timeForSingleExamplesGPMean(0.0);
|
|
|
|
+ double timeForSingleExamplesGPSRMean(0.0);
|
|
|
|
+ double timeForSingleExamplesGPSRVar(0.0);
|
|
double timeForSingleExamplesParzen(0.0);
|
|
double timeForSingleExamplesParzen(0.0);
|
|
double timeForSingleExamplesSVDD(0.0);
|
|
double timeForSingleExamplesSVDD(0.0);
|
|
|
|
|
|
@@ -649,7 +922,9 @@ int main (int argc, char **argv)
|
|
|
|
|
|
const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
|
|
const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
|
|
|
|
|
|
- //TODO: again we should use method-specific optimal parameters. If we're only interested in the runtimes, this doesn't matter
|
|
|
|
|
|
+ //NOTE: again we should use method-specific optimal parameters. If we're only interested in the runtimes, this doesn't matter
|
|
|
|
+
|
|
|
|
+ //compute (self) similarities
|
|
double kernelSelf (measureDistance(svec,svec, kernelSigmaGPVarApprox) );
|
|
double kernelSelf (measureDistance(svec,svec, kernelSigmaGPVarApprox) );
|
|
NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
|
|
NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
|
|
|
|
|
|
@@ -658,6 +933,8 @@ int main (int argc, char **argv)
|
|
kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigmaGPVarApprox);
|
|
kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigmaGPVarApprox);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ //call the individual test-methods
|
|
|
|
+
|
|
//evaluate GP Var Approx
|
|
//evaluate GP Var Approx
|
|
ClassificationResult rGPVarApprox;
|
|
ClassificationResult rGPVarApprox;
|
|
evaluateGPVarApprox( kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox, runsPerClassToAverageTesting );
|
|
evaluateGPVarApprox( kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox, runsPerClassToAverageTesting );
|
|
@@ -673,6 +950,15 @@ int main (int argc, char **argv)
|
|
//evaluate GP Mean
|
|
//evaluate GP Mean
|
|
ClassificationResult rGPMean;
|
|
ClassificationResult rGPMean;
|
|
evaluateGPMean( kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean, runsPerClassToAverageTesting );
|
|
evaluateGPMean( kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean, runsPerClassToAverageTesting );
|
|
|
|
+
|
|
|
|
+ //evaluate GP SR Mean
|
|
|
|
+ ClassificationResult rGPSRMean;
|
|
|
|
+ evaluateGPSRMean( kernelVector, GPSRMeanRightPart, rGPSRMean, timeForSingleExamplesGPSRMean, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPSRMean );
|
|
|
|
+
|
|
|
|
+ //evaluate GP SR Var
|
|
|
|
+ ClassificationResult rGPSRVar;
|
|
|
|
+ evaluateGPSRVar( kernelVector, GPSRVarCholesky, rGPSRVar, timeForSingleExamplesGPSRVar, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPSRVar, noiseGPSRVarParas[cl] );
|
|
|
|
+
|
|
|
|
|
|
//evaluate Parzen
|
|
//evaluate Parzen
|
|
ClassificationResult rParzen;
|
|
ClassificationResult rParzen;
|
|
@@ -688,17 +974,18 @@ int main (int argc, char **argv)
|
|
rGPVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rGPVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rGPMeanApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rGPMeanApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rGPMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rGPMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
|
|
+ rGPSRMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
|
|
+ rGPSRVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rParzen.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rParzen.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rSVDD.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
rSVDD.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
|
|
-
|
|
|
|
-// std::cerr << "scores: " << std::endl;
|
|
|
|
-// scores >> std::cerr;
|
|
|
|
-// std::cerr << "gt: " << r.classno_groundtruth << " -- " << r.classno << std::endl;
|
|
|
|
-
|
|
|
|
|
|
+
|
|
|
|
+ //remember the results for the evaluation lateron
|
|
resultsGPVarApprox.push_back ( rGPVarApprox );
|
|
resultsGPVarApprox.push_back ( rGPVarApprox );
|
|
resultsGPVar.push_back ( rGPVar );
|
|
resultsGPVar.push_back ( rGPVar );
|
|
resultsGPMeanApprox.push_back ( rGPMeanApprox );
|
|
resultsGPMeanApprox.push_back ( rGPMeanApprox );
|
|
resultsGPMean.push_back ( rGPMean );
|
|
resultsGPMean.push_back ( rGPMean );
|
|
|
|
+ resultsGPSRMean.push_back ( rGPSRMean );
|
|
|
|
+ resultsGPSRVar.push_back ( rGPSRVar );
|
|
resultsParzen.push_back ( rParzen );
|
|
resultsParzen.push_back ( rParzen );
|
|
resultsSVDD.push_back ( rSVDD );
|
|
resultsSVDD.push_back ( rSVDD );
|
|
}
|
|
}
|
|
@@ -710,6 +997,8 @@ int main (int argc, char **argv)
|
|
timeForSingleExamplesGPVar/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesGPVar/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesGPMeanApprox/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesGPMeanApprox/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesGPMean/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesGPMean/= imageNetTest.getNumPreloadedExamples();
|
|
|
|
+ timeForSingleExamplesGPSRMean/= imageNetTest.getNumPreloadedExamples();
|
|
|
|
+ timeForSingleExamplesGPSRVar/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesParzen/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesParzen/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesSVDD/= imageNetTest.getNumPreloadedExamples();
|
|
timeForSingleExamplesSVDD/= imageNetTest.getNumPreloadedExamples();
|
|
|
|
|
|
@@ -717,15 +1006,18 @@ int main (int argc, char **argv)
|
|
std::cerr << "GPVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVar << std::endl;
|
|
std::cerr << "GPVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVar << std::endl;
|
|
std::cerr << "GPMeanApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMeanApprox << std::endl;
|
|
std::cerr << "GPMeanApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMeanApprox << std::endl;
|
|
std::cerr << "GPMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMean << std::endl;
|
|
std::cerr << "GPMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMean << std::endl;
|
|
|
|
+ std::cerr << "GPSRMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPSRMean << std::endl;
|
|
|
|
+ std::cerr << "GPSRVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPSRVar << std::endl;
|
|
std::cerr << "Parzen -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesParzen << std::endl;
|
|
std::cerr << "Parzen -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesParzen << std::endl;
|
|
std::cerr << "SVDD -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesSVDD << std::endl;
|
|
std::cerr << "SVDD -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesSVDD << std::endl;
|
|
|
|
|
|
-// std::cerr << "Writing results to " << resultsfile << std::endl;
|
|
|
|
-// results.writeWEKA ( resultsfile, 1 );
|
|
|
|
|
|
+ // run the AUC-evaluation
|
|
double perfvalueGPVarApprox = resultsGPVarApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPVarApprox = resultsGPVarApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPVar = resultsGPVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPVar = resultsGPVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPMeanApprox = resultsGPMeanApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPMeanApprox = resultsGPMeanApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPMean = resultsGPMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueGPMean = resultsGPMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
|
|
+ double perfvalueGPSRMean = resultsGPSRMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
|
|
+ double perfvalueGPSRVar = resultsGPSRVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueParzen = resultsParzen.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueParzen = resultsParzen.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueSVDD = resultsSVDD.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
double perfvalueSVDD = resultsSVDD.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
|
|
|
|
|
|
@@ -733,6 +1025,8 @@ int main (int argc, char **argv)
|
|
std::cerr << "Performance GPVar: " << perfvalueGPVar << std::endl;
|
|
std::cerr << "Performance GPVar: " << perfvalueGPVar << std::endl;
|
|
std::cerr << "Performance GPMeanApprox: " << perfvalueGPMeanApprox << std::endl;
|
|
std::cerr << "Performance GPMeanApprox: " << perfvalueGPMeanApprox << std::endl;
|
|
std::cerr << "Performance GPMean: " << perfvalueGPMean << std::endl;
|
|
std::cerr << "Performance GPMean: " << perfvalueGPMean << std::endl;
|
|
|
|
+ std::cerr << "Performance GPSRMean: " << perfvalueGPSRMean << std::endl;
|
|
|
|
+ std::cerr << "Performance GPSRVar: " << perfvalueGPSRVar << std::endl;
|
|
std::cerr << "Performance Parzen: " << perfvalueParzen << std::endl;
|
|
std::cerr << "Performance Parzen: " << perfvalueParzen << std::endl;
|
|
std::cerr << "Performance SVDD: " << perfvalueSVDD << std::endl;
|
|
std::cerr << "Performance SVDD: " << perfvalueSVDD << std::endl;
|
|
|
|
|
|
@@ -740,6 +1034,8 @@ int main (int argc, char **argv)
|
|
OverallPerformanceGPVar += perfvalueGPVarApprox;
|
|
OverallPerformanceGPVar += perfvalueGPVarApprox;
|
|
OverallPerformanceGPMeanApprox += perfvalueGPMeanApprox;
|
|
OverallPerformanceGPMeanApprox += perfvalueGPMeanApprox;
|
|
OverallPerformanceGPMean += perfvalueGPMean;
|
|
OverallPerformanceGPMean += perfvalueGPMean;
|
|
|
|
+ OverallPerformanceGPSRMean += perfvalueGPSRMean;
|
|
|
|
+ OverallPerformanceGPSRVar += perfvalueGPSRVar;
|
|
OverallPerformanceParzen += perfvalueParzen;
|
|
OverallPerformanceParzen += perfvalueParzen;
|
|
OverallPerformanceSVDD += perfvalueSVDD;
|
|
OverallPerformanceSVDD += perfvalueSVDD;
|
|
|
|
|
|
@@ -751,6 +1047,8 @@ int main (int argc, char **argv)
|
|
OverallPerformanceGPVar /= nrOfClassesToConcidere;
|
|
OverallPerformanceGPVar /= nrOfClassesToConcidere;
|
|
OverallPerformanceGPMeanApprox /= nrOfClassesToConcidere;
|
|
OverallPerformanceGPMeanApprox /= nrOfClassesToConcidere;
|
|
OverallPerformanceGPMean /= nrOfClassesToConcidere;
|
|
OverallPerformanceGPMean /= nrOfClassesToConcidere;
|
|
|
|
+ OverallPerformanceGPSRMean /= nrOfClassesToConcidere;
|
|
|
|
+ OverallPerformanceGPSRVar /= nrOfClassesToConcidere;
|
|
OverallPerformanceParzen /= nrOfClassesToConcidere;
|
|
OverallPerformanceParzen /= nrOfClassesToConcidere;
|
|
OverallPerformanceSVDD /= nrOfClassesToConcidere;
|
|
OverallPerformanceSVDD /= nrOfClassesToConcidere;
|
|
|
|
|
|
@@ -758,6 +1056,8 @@ int main (int argc, char **argv)
|
|
std::cerr << "overall performance GPVar: " << OverallPerformanceGPVar << std::endl;
|
|
std::cerr << "overall performance GPVar: " << OverallPerformanceGPVar << std::endl;
|
|
std::cerr << "overall performance GPMeanApprox: " << OverallPerformanceGPMeanApprox << std::endl;
|
|
std::cerr << "overall performance GPMeanApprox: " << OverallPerformanceGPMeanApprox << std::endl;
|
|
std::cerr << "overall performance GPMean: " << OverallPerformanceGPMean << std::endl;
|
|
std::cerr << "overall performance GPMean: " << OverallPerformanceGPMean << std::endl;
|
|
|
|
+ std::cerr << "overall performance GPSRMean: " << OverallPerformanceGPSRMean << std::endl;
|
|
|
|
+ std::cerr << "overall performance GPSRVar: " << OverallPerformanceGPSRVar << std::endl;
|
|
std::cerr << "overall performance Parzen: " << OverallPerformanceParzen << std::endl;
|
|
std::cerr << "overall performance Parzen: " << OverallPerformanceParzen << std::endl;
|
|
std::cerr << "overall performance SVDD: " << OverallPerformanceSVDD << std::endl;
|
|
std::cerr << "overall performance SVDD: " << OverallPerformanceSVDD << std::endl;
|
|
|
|
|