testImageNetBinaryBruteForce.cpp 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065
  1. /**
  2. * @file testImageNetBinaryBruteForce.cpp
  3. * @brief perform ImageNet tests with binary tasks for OCC using GP mean and variance, sophisticated approximations of both, Parzen Density Estimation and SVDD
  4. * @author Alexander Lütz
  5. * @date 23-05-2012 (dd-mm-yyyy)
  6. */
  7. #include <ctime>
  8. #include <time.h>
  9. #include "core/basics/Config.h"
  10. #include "core/basics/Timer.h"
  11. #include "core/algebra/CholeskyRobust.h"
  12. #include "core/vector/Algorithms.h"
  13. #include "core/vector/SparseVectorT.h"
  14. #include "vislearning/cbaselib/ClassificationResults.h"
  15. #include "vislearning/baselib/ProgressBar.h"
  16. #include "vislearning/classifier/kernelclassifier/KCMinimumEnclosingBall.h"
  17. #include "fast-hik/tools.h"
  18. #include "fast-hik/MatFileIO.h"
  19. #include "fast-hik/ImageNetData.h"
  20. using namespace std;
  21. using namespace NICE;
  22. using namespace OBJREC;
  23. // --------------- THE KERNEL FUNCTION ( exponential kernel with euclidian distance ) ----------------------
  24. double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)
  25. {
  26. double inner_sum(0.0);
  27. double d;
  28. //new version, where we needed on average 0.001707 s for each test sample
  29. NICE::SparseVector::const_iterator aIt = a.begin();
  30. NICE::SparseVector::const_iterator bIt = b.begin();
  31. //compute the euclidian distance between both feature vectores (given as SparseVectors)
  32. while ( (aIt != a.end()) && (bIt != b.end()) )
  33. {
  34. if (aIt->first == bIt->first)
  35. {
  36. d = ( aIt->second - bIt->second );
  37. inner_sum += d * d;
  38. aIt++;
  39. bIt++;
  40. }
  41. else if ( aIt->first < bIt->first)
  42. {
  43. inner_sum += aIt->second * aIt->second;
  44. aIt++;
  45. }
  46. else
  47. {
  48. inner_sum += bIt->second * bIt->second;
  49. bIt++;
  50. }
  51. }
  52. //compute remaining values, if b reached the end but not a
  53. while (aIt != a.end())
  54. {
  55. inner_sum += aIt->second * aIt->second;
  56. aIt++;
  57. }
  58. //compute remaining values, if a reached the end but not b
  59. while (bIt != b.end())
  60. {
  61. inner_sum += bIt->second * bIt->second;
  62. bIt++;
  63. }
  64. //normalization of the exponent
  65. inner_sum /= (2.0*sigma*sigma);
  66. //finally, compute the RBF-kernel score (RBF = radial basis function)
  67. return exp(-inner_sum);
  68. }
  69. // --------------- INPUT METHOD ----------------------
  70. void readParameters(string & filename, const int & size, NICE::Vector & parameterVector)
  71. {
  72. //we read the parameters which are given from a Matlab-Script (each line contains a single number, which is the optimal parameter for this class)
  73. parameterVector.resize(size);
  74. parameterVector.set(0.0);
  75. ifstream is(filename.c_str());
  76. if ( !is.good() )
  77. fthrow(IOException, "Unable to read parameters.");
  78. //
  79. string tmp;
  80. int cnt(0);
  81. while (! is.eof())
  82. {
  83. is >> tmp;
  84. parameterVector[cnt] = atof(tmp.c_str());
  85. cnt++;
  86. }
  87. //
  88. is.close();
  89. }
  90. //------------------- TRAINING METHODS --------------------
  91. void inline trainGPVarApprox(NICE::Vector & matrixDInv, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
  92. {
  93. std::cerr << "nrOfExamplesPerClass : " << nrOfExamplesPerClass << std::endl;
  94. Timer tTrainPreciseTimer;
  95. tTrainPreciseTimer.start();
  96. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  97. {
  98. matrixDInv.resize(nrOfExamplesPerClass);
  99. matrixDInv.set(0.0);
  100. //compute D
  101. //start with adding some noise, if necessary
  102. if (noise != 0.0)
  103. matrixDInv.set(noise);
  104. else
  105. matrixDInv.set(0.0);
  106. // the approximation creates a diagonal matrix (which is easy to invert)
  107. // with entries equal the row sums of the original kernel matrix
  108. for (int i = 0; i < nrOfExamplesPerClass; i++)
  109. {
  110. for (int j = i; j < nrOfExamplesPerClass; j++)
  111. {
  112. matrixDInv[i] += kernelMatrix(i,j);
  113. if (i != j)
  114. matrixDInv[j] += kernelMatrix(i,j);
  115. }
  116. }
  117. //compute its inverse
  118. for (int i = 0; i < nrOfExamplesPerClass; i++)
  119. {
  120. matrixDInv[i] = 1.0 / matrixDInv[i];
  121. }
  122. }
  123. tTrainPreciseTimer.stop();
  124. std::cerr << "Precise time used for GPVarApprox training class " << classNumber << ": " << tTrainPreciseTimer.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  125. }
  126. void inline trainGPVar(NICE::Matrix & choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
  127. {
  128. Timer tTrainPrecise;
  129. tTrainPrecise.start();
  130. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  131. {
  132. CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
  133. choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass);
  134. choleskyMatrix.set(0.0);
  135. //compute the cholesky decomposition of K in order to compute K^{-1} \cdot k_* for new test samples
  136. cr.robustChol ( kernelMatrix, choleskyMatrix );
  137. }
  138. tTrainPrecise.stop();
  139. std::cerr << "Precise time used for GPVar training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  140. }
  141. void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
  142. {
  143. Timer tTrainPrecise;
  144. tTrainPrecise.start();
  145. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  146. {
  147. NICE::Vector matrixDInv(nrOfExamplesPerClass,0.0);
  148. //compute D
  149. //start with adding some noise, if necessary
  150. if (noise != 0.0)
  151. matrixDInv.set(noise);
  152. else
  153. matrixDInv.set(0.0);
  154. // the approximation creates a diagonal matrix (which is easy to invert)
  155. // with entries equal the row sums of the original kernel matrix
  156. for (int i = 0; i < nrOfExamplesPerClass; i++)
  157. {
  158. for (int j = i; j < nrOfExamplesPerClass; j++)
  159. {
  160. matrixDInv[i] += kernelMatrix(i,j);
  161. if (i != j)
  162. matrixDInv[j] += kernelMatrix(i,j);
  163. }
  164. }
  165. //compute its inverse (and multiply every element with the label vector, which contains only one-entries and therefore be skipped...)
  166. GPMeanApproxRightPart.resize(nrOfExamplesPerClass);
  167. for (int i = 0; i < nrOfExamplesPerClass; i++)
  168. {
  169. GPMeanApproxRightPart[i] = 1.0 / matrixDInv[i];
  170. }
  171. }
  172. tTrainPrecise.stop();
  173. std::cerr << "Precise time used for GPMeanApprox training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  174. }
  175. void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
  176. {
  177. Timer tTrainPrecise;
  178. tTrainPrecise.start();
  179. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  180. {
  181. CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
  182. NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
  183. //compute the cholesky decomposition of K in order to compute K^{-1} \cdot y
  184. cr.robustChol ( kernelMatrix, choleskyMatrix );
  185. GPMeanRightPart.resize(nrOfExamplesPerClass);
  186. GPMeanRightPart.set(0.0);
  187. NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
  188. // pre-compute K^{-1} \cdot y, which is the same for every new test sample
  189. choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart );
  190. }
  191. tTrainPrecise.stop();
  192. std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  193. }
  194. // GP subset of regressors
  195. void inline trainGPSRMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples )
  196. {
  197. std::vector<int> examplesToChoose;
  198. indicesOfChosenExamples.clear();
  199. //add all examples for possible choice
  200. for (int i = 0; i < nrOfExamplesPerClass; i++)
  201. {
  202. examplesToChoose.push_back(i);
  203. }
  204. //now chose randomly some examples as active subset
  205. int index;
  206. for (int i = 0; i < std::min(nrOfRegressors,nrOfExamplesPerClass); i++)
  207. {
  208. index = rand() % examplesToChoose.size();
  209. indicesOfChosenExamples.push_back(examplesToChoose[index]);
  210. examplesToChoose.erase(examplesToChoose.begin() + index);
  211. }
  212. NICE::Matrix Kmn (indicesOfChosenExamples.size(), nrOfExamplesPerClass, 0.0);
  213. int rowCnt(0);
  214. //set every row
  215. for (int i = 0; i < indicesOfChosenExamples.size(); i++, rowCnt++ )
  216. {
  217. //set every element of this row
  218. NICE::Vector col = kernelMatrix.getRow(indicesOfChosenExamples[i]);
  219. for (int j = 0; j < nrOfExamplesPerClass; j++)
  220. {
  221. Kmn(rowCnt,j) = col(j);
  222. }
  223. }
  224. //we could speed this up if we would order the indices
  225. NICE::Matrix Kmm (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
  226. double tmp(0.0);
  227. for (int i = 0; i < indicesOfChosenExamples.size(); i++ )
  228. {
  229. for (int j = i; j < indicesOfChosenExamples.size(); j++ )
  230. {
  231. tmp = kernelMatrix(indicesOfChosenExamples[i], indicesOfChosenExamples[j]);
  232. Kmm(i,j) = tmp;
  233. if (i != j)
  234. Kmm(j,i) = tmp;
  235. }
  236. }
  237. Timer tTrainPrecise;
  238. tTrainPrecise.start();
  239. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  240. {
  241. NICE::Matrix innerMatrix;
  242. innerMatrix.multiply(Kmn, Kmn, true /* tranpose first matrix*/, false /* transpose second matrix*/);
  243. innerMatrix.addScaledMatrix( noise, Kmm );
  244. NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
  245. NICE::Vector projectedLabels;
  246. projectedLabels.multiply(Kmn,y);
  247. CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
  248. NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
  249. //compute the cholesky decomposition of K in order to compute K^{-1} \cdot y
  250. cr.robustChol ( innerMatrix, choleskyMatrix );
  251. GPMeanRightPart.resize(indicesOfChosenExamples.size());
  252. GPMeanRightPart.set(0.0);
  253. // pre-compute K^{-1} \cdot y, which is the same for every new test sample
  254. choleskySolveLargeScale ( choleskyMatrix, projectedLabels, GPMeanRightPart );
  255. }
  256. tTrainPrecise.stop();
  257. std::cerr << "Precise time used for GPSRMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  258. }
  259. // GP subset of regressors
  260. void inline trainGPSRVar(NICE::Matrix choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples )
  261. {
  262. std::vector<int> examplesToChoose;
  263. indicesOfChosenExamples.clear();
  264. //add all examples for possible choice
  265. for (int i = 0; i < nrOfExamplesPerClass; i++)
  266. {
  267. examplesToChoose.push_back(i);
  268. }
  269. //now chose randomly some examples as active subset
  270. int index;
  271. for (int i = 0; i < std::min(nrOfRegressors,nrOfExamplesPerClass); i++)
  272. {
  273. index = rand() % examplesToChoose.size();
  274. indicesOfChosenExamples.push_back(examplesToChoose[index]);
  275. examplesToChoose.erase(examplesToChoose.begin() + index);
  276. }
  277. NICE::Matrix Kmn (indicesOfChosenExamples.size(), nrOfExamplesPerClass, 0.0);
  278. int rowCnt(0);
  279. //set every row
  280. for (int i = 0; i < indicesOfChosenExamples.size(); i++, rowCnt++ )
  281. {
  282. //set every element of this row
  283. NICE::Vector col = kernelMatrix.getRow(indicesOfChosenExamples[i]);
  284. for (int j = 0; j < nrOfExamplesPerClass; j++)
  285. {
  286. Kmn(rowCnt,j) = col(j);
  287. }
  288. }
  289. //we could speed this up if we would order the indices
  290. NICE::Matrix Kmm (indicesOfChosenExamples.size(), indicesOfChosenExamples.size(), 0.0);
  291. double tmp(0.0);
  292. for (int i = 0; i < indicesOfChosenExamples.size(); i++ )
  293. {
  294. for (int j = i; j < indicesOfChosenExamples.size(); j++ )
  295. {
  296. tmp = kernelMatrix(indicesOfChosenExamples[i], indicesOfChosenExamples[j]);
  297. Kmm(i,j) = tmp;
  298. if (i != j)
  299. Kmm(j,i) = tmp;
  300. }
  301. }
  302. Timer tTrainPrecise;
  303. tTrainPrecise.start();
  304. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  305. {
  306. NICE::Matrix innerMatrix;
  307. innerMatrix.multiply(Kmn, Kmn, true /* tranpose first matrix*/, false /* transpose second matrix*/);
  308. innerMatrix.addScaledMatrix( noise, Kmm );
  309. CholeskyRobust cr ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
  310. choleskyMatrix.resize( nrOfExamplesPerClass, nrOfExamplesPerClass );
  311. choleskyMatrix.set( 0.0 );
  312. //compute the cholesky decomposition of K in order to compute K^{-1} \cdot y
  313. cr.robustChol ( innerMatrix, choleskyMatrix );
  314. }
  315. tTrainPrecise.stop();
  316. std::cerr << "Precise time used for GPSRMean training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  317. }
  318. KCMinimumEnclosingBall *trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber, const int & runsPerClassToAverageTraining )
  319. {
  320. Config conf;
  321. // set the outlier ratio (Paul optimized this paramter FIXME)
  322. conf.sD( "SVDD", "outlier_fraction", 0.1 );
  323. conf.sB( "SVDD", "verbose", false );
  324. KCMinimumEnclosingBall *svdd = new KCMinimumEnclosingBall ( &conf, NULL /* no kernel function */, "SVDD" /* config section */);
  325. KernelData kernelData ( &conf, kernelMatrix, "Kernel" , false /* update cholesky */ );
  326. Timer tTrainPrecise;
  327. tTrainPrecise.start();
  328. for (int run = 0; run < runsPerClassToAverageTraining; run++)
  329. {
  330. NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
  331. // KCMinimumEnclosingBall does not store the kernel data object, therefore, we are save with passing a local copy
  332. svdd->teach ( &kernelData, y );
  333. }
  334. tTrainPrecise.stop();
  335. std::cerr << "Precise time used for SVDD training class " << classNumber << ": " << tTrainPrecise.getLast()/(double)runsPerClassToAverageTraining << std::endl;
  336. return svdd;
  337. }
  338. // ------------- EVALUATION METHODS ---------------------
  339. void inline evaluateGPVarApprox(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Vector & matrixDInv, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
  340. {
  341. double uncertainty;
  342. Timer tTestSingle;
  343. tTestSingle.start();
  344. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  345. {
  346. // uncertainty = k{**} - \k_*^T \cdot D^{-1} \cdot k_* where D is our nice approximation of K
  347. NICE::Vector rightPart (kernelVector.size());
  348. for (int j = 0; j < kernelVector.size(); j++)
  349. {
  350. rightPart[j] = kernelVector[j] * matrixDInv[j];
  351. }
  352. uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
  353. }
  354. tTestSingle.stop();
  355. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  356. FullVector scores ( 2 );
  357. scores[0] = 0.0;
  358. scores[1] = 1.0 - uncertainty;
  359. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  360. }
  361. void inline evaluateGPVar(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
  362. {
  363. double uncertainty;
  364. Timer tTestSingle;
  365. tTestSingle.start();
  366. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  367. {
  368. // uncertainty = k{**} - \k_*^T \cdot D^{-1} \cdot k_*
  369. NICE::Vector rightPart (kernelVector.size(),0.0);
  370. choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
  371. uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
  372. }
  373. tTestSingle.stop();
  374. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  375. FullVector scores ( 2 );
  376. scores[0] = 0.0;
  377. scores[1] = 1.0 - uncertainty;
  378. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  379. }
  380. void inline evaluateGPMeanApprox(const NICE::Vector & kernelVector, const NICE::Vector & rightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
  381. {
  382. double mean;
  383. Timer tTestSingle;
  384. tTestSingle.start();
  385. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  386. {
  387. // \mean = \k_*^T \cdot D^{-1} \cdot y where D is our nice approximation of K
  388. mean = kernelVector.scalarProduct ( rightPart );
  389. }
  390. tTestSingle.stop();
  391. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  392. FullVector scores ( 2 );
  393. scores[0] = 0.0;
  394. scores[1] = mean;
  395. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  396. }
  397. void inline evaluateGPMean(const NICE::Vector & kernelVector, const NICE::Vector & GPMeanRightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
  398. {
  399. double mean;
  400. Timer tTestSingle;
  401. tTestSingle.start();
  402. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  403. {
  404. // \mean = \k_*^T \cdot K^{-1} \cdot y
  405. mean = kernelVector.scalarProduct ( GPMeanRightPart );
  406. }
  407. tTestSingle.stop();
  408. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  409. FullVector scores ( 2 );
  410. scores[0] = 0.0;
  411. scores[1] = mean;
  412. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  413. }
  414. void inline evaluateGPSRMean(const NICE::Vector & kernelVector, const NICE::Vector & GPSRMeanRightPart, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting, const int & nrOfRegressors, const std::vector<int> & indicesOfChosenExamples)
  415. {
  416. double mean;
  417. //grep the entries corresponding to the active set
  418. NICE::Vector kernelVectorM;
  419. kernelVectorM.resize(nrOfRegressors);
  420. for (int i = 0; i < nrOfRegressors; i++)
  421. {
  422. kernelVectorM[i] = kernelVector[indicesOfChosenExamples[i]];
  423. }
  424. Timer tTestSingle;
  425. tTestSingle.start();
  426. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  427. {
  428. // \mean = \k_*^T \cdot K^{-1} \cdot y
  429. mean = kernelVectorM.scalarProduct ( GPSRMeanRightPart );
  430. }
  431. tTestSingle.stop();
  432. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  433. FullVector scores ( 2 );
  434. scores[0] = 0.0;
  435. scores[1] = mean;
  436. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  437. }
  438. void inline evaluateGPSRVar(const NICE::Vector & kernelVector, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting, const int & nrOfRegressors, std::vector<int> & indicesOfChosenExamples, const double & noise)
  439. {
  440. double uncertainty;
  441. //grep the entries corresponding to the active set
  442. NICE::Vector kernelVectorM;
  443. kernelVectorM.resize(nrOfRegressors);
  444. for (int i = 0; i < nrOfRegressors; i++)
  445. {
  446. kernelVectorM[i] = kernelVector[indicesOfChosenExamples[i]];
  447. }
  448. Timer tTestSingle;
  449. tTestSingle.start();
  450. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  451. {
  452. NICE::Vector rightPart (nrOfRegressors,0.0);
  453. choleskySolveLargeScale ( choleskyMatrix, kernelVectorM, rightPart );
  454. uncertainty = noise*kernelVectorM.scalarProduct ( rightPart );
  455. }
  456. tTestSingle.stop();
  457. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  458. FullVector scores ( 2 );
  459. scores[0] = 0.0;
  460. scores[1] = 1.0 - uncertainty;
  461. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  462. }
  463. void inline evaluateParzen(const NICE::Vector & kernelVector, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
  464. {
  465. double score;
  466. Timer tTestSingle;
  467. tTestSingle.start();
  468. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  469. {
  470. //the Parzen score is nothing but the averaged similarity to every training sample
  471. score = kernelVector.Sum() / (double) kernelVector.size(); //maybe we could directly call kernelVector.Mean() here
  472. }
  473. tTestSingle.stop();
  474. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  475. FullVector scores ( 2 );
  476. scores[0] = 0.0;
  477. scores[1] = score;
  478. r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );
  479. }
  480. void inline evaluateSVDD( KCMinimumEnclosingBall *svdd, const NICE::Vector & kernelVector, ClassificationResult & r, double & timeForSingleExamples, const int & runsPerClassToAverageTesting)
  481. {
  482. Timer tTestSingle;
  483. tTestSingle.start();
  484. for (int run = 0; run < runsPerClassToAverageTesting; run++)
  485. {
  486. // In the following, we assume that we are using a Gaussian kernel
  487. r = svdd->classifyKernel ( kernelVector, 1.0 /* kernel self */ );
  488. }
  489. tTestSingle.stop();
  490. timeForSingleExamples += tTestSingle.getLast()/(double)runsPerClassToAverageTesting;
  491. }
  492. /**
  493. test the basic functionality of fast-hik hyperparameter optimization
  494. */
  495. int main (int argc, char **argv)
  496. {
  497. std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
  498. Config conf ( argc, argv );
  499. string resultsfile = conf.gS("main", "results", "results.txt" );
  500. int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50);
  501. nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class
  502. //which classes to considere? we assume consecutive class numers
  503. int indexOfFirstClass = conf.gI("main", "indexOfFirstClass", 0);
  504. indexOfFirstClass = std::max(indexOfFirstClass, 0); //we do not have less than 0 classes
  505. int indexOfLastClass = conf.gI("main", "indexOfLastClass", 999);
  506. indexOfLastClass = std::min(indexOfLastClass, 999); //we do not have more than 1000 classes
  507. int nrOfClassesToConcidere = (indexOfLastClass - indexOfLastClass)+1;
  508. //repetitions for every class to achieve reliable time evalutions
  509. int runsPerClassToAverageTraining = conf.gI( "main", "runsPerClassToAverageTraining", 1 );
  510. int runsPerClassToAverageTesting = conf.gI( "main", "runsPerClassToAverageTesting", 1 );
  511. // share parameters among methods and classes?
  512. bool shareParameters = conf.gB("main" , "shareParameters", true);
  513. // GP variance approximation
  514. NICE::Vector sigmaGPVarApproxParas(nrOfClassesToConcidere,0.0);
  515. NICE::Vector noiseGPVarApproxParas(nrOfClassesToConcidere,0.0);
  516. // GP variance
  517. NICE::Vector sigmaGPVarParas(nrOfClassesToConcidere,0.0);
  518. NICE::Vector noiseGPVarParas(nrOfClassesToConcidere,0.0);
  519. //GP mean approximation
  520. NICE::Vector sigmaGPMeanApproxParas(nrOfClassesToConcidere,0.0);
  521. NICE::Vector noiseGPMeanApproxParas(nrOfClassesToConcidere,0.0);
  522. //GP mean
  523. NICE::Vector sigmaGPMeanParas(nrOfClassesToConcidere,0.0);
  524. NICE::Vector noiseGPMeanParas(nrOfClassesToConcidere,0.0);
  525. //GP SR mean
  526. NICE::Vector sigmaGPSRMeanParas(nrOfClassesToConcidere,0.0);
  527. NICE::Vector noiseGPSRMeanParas(nrOfClassesToConcidere,0.0);
  528. //GP SR var
  529. NICE::Vector sigmaGPSRVarParas(nrOfClassesToConcidere,0.0);
  530. NICE::Vector noiseGPSRVarParas(nrOfClassesToConcidere,0.0);
  531. //Parzen
  532. NICE::Vector sigmaParzenParas(nrOfClassesToConcidere,0.0);
  533. NICE::Vector noiseParzenParas(nrOfClassesToConcidere,0.0);
  534. //SVDD
  535. NICE::Vector sigmaSVDDParas(nrOfClassesToConcidere,0.0);
  536. NICE::Vector noiseSVDDParas(nrOfClassesToConcidere,0.0);
  537. if (!shareParameters)
  538. {
  539. //read the optimal parameters for the different methods
  540. // GP variance approximation
  541. string sigmaGPVarApproxFile = conf.gS("main", "sigmaGPVarApproxFile", "approxVarSigma.txt");
  542. string noiseGPVarApproxFile = conf.gS("main", "noiseGPVarApproxFile", "approxVarNoise.txt");
  543. // GP variance
  544. string sigmaGPVarFile = conf.gS("main", "sigmaGPVarFile", "approxVarSigma.txt");
  545. string noiseGPVarFile = conf.gS("main", "noiseGPVarFile", "approxVarNoise.txt");
  546. //GP mean approximation
  547. string sigmaGPMeanApproxFile = conf.gS("main", "sigmaGPMeanApproxFile", "approxVarSigma.txt");
  548. string noiseGPMeanApproxFile = conf.gS("main", "noiseGPMeanApproxFile", "approxVarNoise.txt");
  549. //GP mean
  550. string sigmaGPMeanFile = conf.gS("main", "sigmaGPMeanFile", "approxVarSigma.txt");
  551. string noiseGPMeanFile = conf.gS("main", "noiseGPMeanFile", "approxVarNoise.txt");
  552. //Parzen
  553. string sigmaParzenFile = conf.gS("main", "sigmaParzenFile", "approxVarSigma.txt");
  554. string noiseParzenFile = conf.gS("main", "noiseParzenFile", "approxVarNoise.txt");
  555. //SVDD
  556. string sigmaSVDDFile = conf.gS("main", "sigmaSVDDFile", "approxVarSigma.txt");
  557. string noiseSVDDFile = conf.gS("main", "noiseSVDDFile", "approxVarNoise.txt");
  558. // GP variance approximation
  559. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarApproxParas);
  560. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarApproxParas);
  561. // GP variance
  562. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarParas);
  563. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarParas);
  564. //GP mean approximation
  565. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanApproxParas);
  566. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanApproxParas);
  567. //GP mean
  568. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas);
  569. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas);
  570. //GP SR mean
  571. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPSRMeanParas);
  572. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPSRMeanParas);
  573. //GP SR var
  574. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPSRVarParas);
  575. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPSRVarParas);
  576. //Parzen
  577. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas);
  578. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas);
  579. //SVDD
  580. readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaSVDDParas);
  581. readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseSVDDParas);
  582. }
  583. else
  584. {
  585. //use static variables for all methods and classis
  586. double noise = conf.gD( "main", "noise", 0.01 );
  587. double sigma = conf.gD( "main", "sigma", 1.0 );
  588. sigmaGPVarApproxParas.set(sigma);
  589. noiseGPVarApproxParas.set(noise);
  590. // GP variance
  591. sigmaGPVarParas.set(sigma);
  592. noiseGPVarParas.set(noise);
  593. //GP mean approximation
  594. sigmaGPMeanApproxParas.set(sigma);
  595. noiseGPMeanApproxParas.set(noise);
  596. //GP mean
  597. sigmaGPMeanParas.set(sigma);
  598. noiseGPMeanParas.set(noise);
  599. //GP SR mean
  600. sigmaGPSRMeanParas.set(sigma);
  601. noiseGPSRMeanParas.set(noise);
  602. //GP SR var
  603. sigmaGPSRVarParas.set(sigma);
  604. noiseGPSRVarParas.set(noise);
  605. //Parzen
  606. sigmaParzenParas.set(sigma);
  607. noiseParzenParas.set(noise);
  608. //SVDD
  609. sigmaSVDDParas.set(sigma);
  610. noiseSVDDParas.set(noise);
  611. }
  612. // -------- optimal parameters read --------------
  613. std::vector<SparseVector> trainingData;
  614. NICE::Vector y;
  615. std::cerr << "Reading ImageNet data ..." << std::endl;
  616. bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
  617. string imageNetPath;
  618. if (imageNetLocal)
  619. imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
  620. else
  621. imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
  622. ImageNetData imageNetTrain ( imageNetPath + "demo/" );
  623. imageNetTrain.preloadData( "train", "training" );
  624. trainingData = imageNetTrain.getPreloadedData();
  625. y = imageNetTrain.getPreloadedLabels();
  626. std::cerr << "Reading of training data finished" << std::endl;
  627. std::cerr << "trainingData.size(): " << trainingData.size() << std::endl;
  628. std::cerr << "y.size(): " << y.size() << std::endl;
  629. std::cerr << "Reading ImageNet test data files (takes some seconds)..." << std::endl;
  630. ImageNetData imageNetTest ( imageNetPath + "demo/" );
  631. imageNetTest.preloadData ( "val", "testing" );
  632. imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );
  633. double OverallPerformanceGPVarApprox(0.0);
  634. double OverallPerformanceGPVar(0.0);
  635. double OverallPerformanceGPMeanApprox(0.0);
  636. double OverallPerformanceGPMean(0.0);
  637. double OverallPerformanceGPSRMean(0.0);
  638. double OverallPerformanceGPSRVar(0.0);
  639. double OverallPerformanceParzen(0.0);
  640. double OverallPerformanceSVDD(0.0);
  641. double kernelSigmaGPVarApprox;
  642. double kernelSigmaGPVar;
  643. double kernelSigmaGPMeanApprox;
  644. double kernelSigmaGPMean;
  645. double kernelSigmaGPSRMean;
  646. double kernelSigmaGPSRVar;
  647. double kernelSigmaParzen;
  648. double kernelSigmaSVDD;
  649. for (int cl = indexOfFirstClass; cl <= indexOfLastClass; cl++)
  650. {
  651. std::cerr << "run for class " << cl << std::endl;
  652. int positiveClass = cl+1; //labels are from 1 to 1000, but our indices from 0 to 999
  653. // ------------------------------ TRAINING ------------------------------
  654. kernelSigmaGPVarApprox = sigmaGPVarApproxParas[cl];
  655. kernelSigmaGPVar = sigmaGPVarParas[cl];
  656. kernelSigmaGPMeanApprox = sigmaGPMeanApproxParas[cl];
  657. kernelSigmaGPMean = sigmaGPMeanParas[cl];
  658. kernelSigmaGPMean = sigmaGPSRMeanParas[cl];
  659. kernelSigmaGPSRVar = sigmaGPSRVarParas[cl];
  660. kernelSigmaParzen = sigmaParzenParas[cl];
  661. kernelSigmaSVDD = sigmaSVDDParas[cl];
  662. Timer tTrain;
  663. tTrain.start();
  664. //compute the kernel matrix, which will be shared among all methods in this scenario
  665. NICE::Matrix kernelMatrix(nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
  666. //NOTE in theory we have to compute a single kernel Matrix for every method, since every method may have its own optimal parameter
  667. // I'm sure, we can speed it up a bit and compute it only for every different parameter
  668. //nonetheless, it's not as nice as we originally thought (same matrix for every method)
  669. //NOTE Nonetheless, since we're only interested in runtimes, we can ignore this
  670. //now sum up all entries of each row in the original kernel matrix
  671. double kernelScore(0.0);
  672. for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
  673. {
  674. for (int j = i; j < cl*100+nrOfExamplesPerClass; j++)
  675. {
  676. kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigmaGPVarApprox);
  677. kernelMatrix(i-cl*100,j-cl*100) = kernelScore;
  678. if (i != j)
  679. kernelMatrix(j-cl*100,i-cl*100) = kernelScore;
  680. }
  681. }
  682. // now call the individual training methods
  683. //train GP Var Approx
  684. NICE::Vector matrixDInv;
  685. trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
  686. //train GP Var
  687. NICE::Matrix GPVarCholesky;
  688. trainGPVar(GPVarCholesky, noiseGPVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
  689. //train GP Mean Approx
  690. NICE::Vector GPMeanApproxRightPart;
  691. trainGPMeanApprox(GPMeanApproxRightPart, noiseGPMeanApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
  692. //train GP Mean
  693. NICE::Vector GPMeanRightPart;
  694. trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
  695. //train GP SR Mean
  696. NICE::Vector GPSRMeanRightPart;
  697. std::vector<int> indicesOfChosenExamplesGPSRMean;
  698. int nrOfRegressors = conf.gI( "GPSR", "nrOfRegressors", nrOfExamplesPerClass/2);
  699. nrOfRegressors = std::min( nrOfRegressors, nrOfExamplesPerClass );
  700. trainGPSRMean(GPSRMeanRightPart, noiseGPSRMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPSRMean );
  701. //train GP SR Var
  702. NICE::Matrix GPSRVarCholesky;
  703. std::vector<int> indicesOfChosenExamplesGPSRVar;
  704. trainGPSRVar(GPSRVarCholesky, noiseGPSRVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining, nrOfRegressors, indicesOfChosenExamplesGPSRVar );
  705. //train Parzen
  706. //nothing to do :)
  707. //train SVDD
  708. KCMinimumEnclosingBall *svdd = trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl, runsPerClassToAverageTraining );
  709. tTrain.stop();
  710. std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl;
  711. std::cerr << "training done - now perform the evaluation" << std::endl;
  712. // ------------------------------ TESTING ------------------------------
  713. std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl;
  714. ClassificationResults resultsGPVarApprox;
  715. ClassificationResults resultsGPVar;
  716. ClassificationResults resultsGPMeanApprox;
  717. ClassificationResults resultsGPMean;
  718. ClassificationResults resultsGPSRMean;
  719. ClassificationResults resultsGPSRVar;
  720. ClassificationResults resultsParzen;
  721. ClassificationResults resultsSVDD;
  722. ProgressBar pb;
  723. Timer tTest;
  724. tTest.start();
  725. Timer tTestSingle;
  726. double timeForSingleExamplesGPVarApprox(0.0);
  727. double timeForSingleExamplesGPVar(0.0);
  728. double timeForSingleExamplesGPMeanApprox(0.0);
  729. double timeForSingleExamplesGPMean(0.0);
  730. double timeForSingleExamplesGPSRMean(0.0);
  731. double timeForSingleExamplesGPSRVar(0.0);
  732. double timeForSingleExamplesParzen(0.0);
  733. double timeForSingleExamplesSVDD(0.0);
  734. for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ )
  735. {
  736. pb.update ( imageNetTest.getNumPreloadedExamples() );
  737. const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
  738. //NOTE: again we should use method-specific optimal parameters. If we're only interested in the runtimes, this doesn't matter
  739. //compute (self) similarities
  740. double kernelSelf (measureDistance(svec,svec, kernelSigmaGPVarApprox) );
  741. NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
  742. for (int j = 0; j < nrOfExamplesPerClass; j++)
  743. {
  744. kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigmaGPVarApprox);
  745. }
  746. //call the individual test-methods
  747. //evaluate GP Var Approx
  748. ClassificationResult rGPVarApprox;
  749. evaluateGPVarApprox( kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox, runsPerClassToAverageTesting );
  750. //evaluate GP Var
  751. ClassificationResult rGPVar;
  752. evaluateGPVar( kernelVector, kernelSelf, GPVarCholesky, rGPVar, timeForSingleExamplesGPVar, runsPerClassToAverageTesting );
  753. //evaluate GP Mean Approx
  754. ClassificationResult rGPMeanApprox;
  755. evaluateGPMeanApprox( kernelVector, matrixDInv, rGPMeanApprox, timeForSingleExamplesGPMeanApprox, runsPerClassToAverageTesting );
  756. //evaluate GP Mean
  757. ClassificationResult rGPMean;
  758. evaluateGPMean( kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean, runsPerClassToAverageTesting );
  759. //evaluate GP SR Mean
  760. ClassificationResult rGPSRMean;
  761. evaluateGPSRMean( kernelVector, GPSRMeanRightPart, rGPSRMean, timeForSingleExamplesGPSRMean, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPSRMean );
  762. //evaluate GP SR Var
  763. ClassificationResult rGPSRVar;
  764. evaluateGPSRVar( kernelVector, GPSRVarCholesky, rGPSRVar, timeForSingleExamplesGPSRVar, runsPerClassToAverageTesting, nrOfRegressors, indicesOfChosenExamplesGPSRVar, noiseGPSRVarParas[cl] );
  765. //evaluate Parzen
  766. ClassificationResult rParzen;
  767. evaluateParzen( kernelVector, rParzen, timeForSingleExamplesParzen, runsPerClassToAverageTesting );
  768. //evaluate SVDD
  769. ClassificationResult rSVDD;
  770. evaluateSVDD( svdd, kernelVector, rSVDD, timeForSingleExamplesSVDD, runsPerClassToAverageTesting );
  771. // set ground truth label
  772. rGPVarApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  773. rGPVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  774. rGPMeanApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  775. rGPMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  776. rGPSRMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  777. rGPSRVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  778. rParzen.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  779. rSVDD.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
  780. //remember the results for the evaluation lateron
  781. resultsGPVarApprox.push_back ( rGPVarApprox );
  782. resultsGPVar.push_back ( rGPVar );
  783. resultsGPMeanApprox.push_back ( rGPMeanApprox );
  784. resultsGPMean.push_back ( rGPMean );
  785. resultsGPSRMean.push_back ( rGPSRMean );
  786. resultsGPSRVar.push_back ( rGPSRVar );
  787. resultsParzen.push_back ( rParzen );
  788. resultsSVDD.push_back ( rSVDD );
  789. }
  790. tTest.stop();
  791. std::cerr << "Time used for evaluating class " << cl << ": " << tTest.getLast() << std::endl;
  792. timeForSingleExamplesGPVarApprox/= imageNetTest.getNumPreloadedExamples();
  793. timeForSingleExamplesGPVar/= imageNetTest.getNumPreloadedExamples();
  794. timeForSingleExamplesGPMeanApprox/= imageNetTest.getNumPreloadedExamples();
  795. timeForSingleExamplesGPMean/= imageNetTest.getNumPreloadedExamples();
  796. timeForSingleExamplesGPSRMean/= imageNetTest.getNumPreloadedExamples();
  797. timeForSingleExamplesGPSRVar/= imageNetTest.getNumPreloadedExamples();
  798. timeForSingleExamplesParzen/= imageNetTest.getNumPreloadedExamples();
  799. timeForSingleExamplesSVDD/= imageNetTest.getNumPreloadedExamples();
  800. std::cerr << "GPVarApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVarApprox << std::endl;
  801. std::cerr << "GPVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVar << std::endl;
  802. std::cerr << "GPMeanApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMeanApprox << std::endl;
  803. std::cerr << "GPMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMean << std::endl;
  804. std::cerr << "GPSRMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPSRMean << std::endl;
  805. std::cerr << "GPSRVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPSRVar << std::endl;
  806. std::cerr << "Parzen -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesParzen << std::endl;
  807. std::cerr << "SVDD -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesSVDD << std::endl;
  808. // run the AUC-evaluation
  809. double perfvalueGPVarApprox = resultsGPVarApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  810. double perfvalueGPVar = resultsGPVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  811. double perfvalueGPMeanApprox = resultsGPMeanApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  812. double perfvalueGPMean = resultsGPMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  813. double perfvalueGPSRMean = resultsGPSRMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  814. double perfvalueGPSRVar = resultsGPSRVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  815. double perfvalueParzen = resultsParzen.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  816. double perfvalueSVDD = resultsSVDD.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
  817. std::cerr << "Performance GPVarApprox: " << perfvalueGPVarApprox << std::endl;
  818. std::cerr << "Performance GPVar: " << perfvalueGPVar << std::endl;
  819. std::cerr << "Performance GPMeanApprox: " << perfvalueGPMeanApprox << std::endl;
  820. std::cerr << "Performance GPMean: " << perfvalueGPMean << std::endl;
  821. std::cerr << "Performance GPSRMean: " << perfvalueGPSRMean << std::endl;
  822. std::cerr << "Performance GPSRVar: " << perfvalueGPSRVar << std::endl;
  823. std::cerr << "Performance Parzen: " << perfvalueParzen << std::endl;
  824. std::cerr << "Performance SVDD: " << perfvalueSVDD << std::endl;
  825. OverallPerformanceGPVarApprox += perfvalueGPVar;
  826. OverallPerformanceGPVar += perfvalueGPVarApprox;
  827. OverallPerformanceGPMeanApprox += perfvalueGPMeanApprox;
  828. OverallPerformanceGPMean += perfvalueGPMean;
  829. OverallPerformanceGPSRMean += perfvalueGPSRMean;
  830. OverallPerformanceGPSRVar += perfvalueGPSRVar;
  831. OverallPerformanceParzen += perfvalueParzen;
  832. OverallPerformanceSVDD += perfvalueSVDD;
  833. // clean up memory used by SVDD
  834. delete svdd;
  835. }
  836. OverallPerformanceGPVarApprox /= nrOfClassesToConcidere;
  837. OverallPerformanceGPVar /= nrOfClassesToConcidere;
  838. OverallPerformanceGPMeanApprox /= nrOfClassesToConcidere;
  839. OverallPerformanceGPMean /= nrOfClassesToConcidere;
  840. OverallPerformanceGPSRMean /= nrOfClassesToConcidere;
  841. OverallPerformanceGPSRVar /= nrOfClassesToConcidere;
  842. OverallPerformanceParzen /= nrOfClassesToConcidere;
  843. OverallPerformanceSVDD /= nrOfClassesToConcidere;
  844. std::cerr << "overall performance GPVarApprox: " << OverallPerformanceGPVarApprox << std::endl;
  845. std::cerr << "overall performance GPVar: " << OverallPerformanceGPVar << std::endl;
  846. std::cerr << "overall performance GPMeanApprox: " << OverallPerformanceGPMeanApprox << std::endl;
  847. std::cerr << "overall performance GPMean: " << OverallPerformanceGPMean << std::endl;
  848. std::cerr << "overall performance GPSRMean: " << OverallPerformanceGPSRMean << std::endl;
  849. std::cerr << "overall performance GPSRVar: " << OverallPerformanceGPSRVar << std::endl;
  850. std::cerr << "overall performance Parzen: " << OverallPerformanceParzen << std::endl;
  851. std::cerr << "overall performance SVDD: " << OverallPerformanceSVDD << std::endl;
  852. return 0;
  853. }