IL_NewExamples_Comparison.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. /**
  2. * @file IL_NewExamples_Comparison.cpp
  3. * @brief Large GP-IL-Testsetup
  4. * @author Alexander Freytag
  5. * @date 09-05-2012
  6. */
  7. #include <vector>
  8. #include <stdlib.h>
  9. #include <time.h>
  10. #include <set>
  11. #include <iostream>
  12. #include <math.h>
  13. #include <core/basics/Config.h>
  14. #include <core/basics/StringTools.h>
  15. #include <core/vector/SparseVectorT.h>
  16. #include <core/vector/VectorT.h>
  17. //----------
  18. #include "vislearning/baselib/ProgressBar.h"
  19. #include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
  20. #include "vislearning/cbaselib/MultiDataset.h"
  21. #include <vislearning/cbaselib/LabeledSet.h>
  22. #include "vislearning/cbaselib/ClassificationResults.h"
  23. #include <vislearning/baselib/Globals.h>
  24. #include <vislearning/math/kernels/KernelData.h>
  25. //----------
  26. #include "gp-hik-exp/progs/datatools.h"
  27. #include "gp-hik-exp/GPHIKClassifierNICE.h"
  28. //----------
  29. // #include <incrementallearning/IL_Framework_Generic.h>
  30. //
  31. using namespace std;
  32. using namespace NICE;
  33. using namespace OBJREC;
  34. enum verbose_level {NONE = 0, LOW = 1, MEDIUM = 2, EVERYTHING = 3};
  35. /**
  36. Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
  37. */
  38. int main ( int argc, char **argv )
  39. {
  40. std::cout.precision ( 5 );
  41. std::cerr.precision ( 5 );
  42. NICE::Config conf ( argc, argv );
  43. int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
  44. int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
  45. int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
  46. int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );
  47. bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
  48. string featureLocation = conf.gS( "GP_IL", "featureLocation", "toyExampleLargeLargeScale.data");
  49. int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
  50. verbose_level verbose ( NONE );
  51. switch ( verbose_int )
  52. {
  53. case 0:
  54. verbose = NONE;
  55. break;
  56. case 1:
  57. verbose = LOW;
  58. break;
  59. case 2:
  60. verbose = MEDIUM;
  61. break;
  62. case 3:
  63. verbose = EVERYTHING;
  64. break;
  65. }
  66. /* initialize random seed: */
  67. srand ( time ( NULL ) ); //with 0 for reproductive results
  68. // srand ( 0 ); //with 0 for reproductive results
  69. // =========================== INIT ===========================
  70. //these classes are the basic knowledge we have at the beginning
  71. set<int> classesForTraining;
  72. classesForTraining.insert(0);
  73. classesForTraining.insert(1);
  74. classesForTraining.insert(2);
  75. classesForTraining.insert(3);
  76. classesForTraining.insert(4);
  77. classesForTraining.insert(5);
  78. classesForTraining.insert(6);
  79. classesForTraining.insert(7);
  80. classesForTraining.insert(8);
  81. classesForTraining.insert(9);
  82. classesForTraining.insert(10);
  83. classesForTraining.insert(11);
  84. classesForTraining.insert(12);
  85. classesForTraining.insert(13);
  86. classesForTraining.insert(14);
  87. // //these classes will be added iteratively to our training set
  88. // std::set<int> classesForIncrementalTraining;
  89. std::vector<std::vector<double> > recognitionsRatesBatch(nrOfIncrements+1);
  90. std::vector<std::vector<double> > recognitionsRatesIL(nrOfIncrements+1);
  91. std::vector<std::vector<float> > trainingTimesBatch(nrOfIncrements+1);
  92. std::vector<std::vector<float> > trainingTimesIL(nrOfIncrements+1);
  93. for ( int run = 0; run < num_runs; run++ )
  94. {
  95. std::cerr << "run: " << run << std::endl;
  96. //15-scenes settings
  97. std::string ext = conf.gS("main", "ext", ".txt");
  98. std::cerr << "Using cache extension: " << ext << std::endl;
  99. OBJREC::MultiDataset md ( &conf );
  100. const ClassNames & classNamesTrain = md.getClassNames("train");
  101. // read training set
  102. vector< NICE::Vector > trainDataOrig;
  103. Vector y;
  104. const LabeledSet *train = md["train"];
  105. readData< std::vector< NICE::Vector >, NICE::Vector > ( conf, *train, trainDataOrig, y, ext );
  106. std::vector<double> labelsStd;
  107. int datasize_all ( trainDataOrig.size() );
  108. std::set<int> classesAvailable;
  109. for ( uint i = 0; i < y.size(); i++)
  110. {
  111. //automatically check for duplicates
  112. classesAvailable.insert(y[i]);
  113. }
  114. int numberOfClasses = classesAvailable.size();
  115. std::map<int,int> nrExamplesPerClassInDataset;
  116. std::map<int,std::vector<int> > examplesPerClassInDataset;
  117. for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
  118. {
  119. nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
  120. examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
  121. }
  122. for ( uint i = 0; i < y.size(); i++ )
  123. {
  124. (examplesPerClassInDataset.find(y[i])->second).push_back(i);
  125. }
  126. for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
  127. {
  128. nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
  129. }
  130. for ( std::map<int,int>::const_iterator it = nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
  131. {
  132. cerr << it->first << ": " << it->second << endl;
  133. }
  134. Examples examples;
  135. std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
  136. //chose examples for every class used for training
  137. for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
  138. {
  139. std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
  140. // std::cerr << "pick training examples for class " << *clIt << std::endl;
  141. for (int i = 0; i < trainExPerClass; i++)
  142. {
  143. // std::cerr << "i: " << i << std::endl;
  144. int exampleIndex ( rand() % ( exIt->second.size() ) );
  145. // std::cerr << "exampleIndex: " << exampleIndex << std::endl;
  146. Example example;
  147. NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
  148. example.svec = new SparseVector(xTrain);
  149. examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
  150. exIt->second.erase(exIt->second.begin()+exampleIndex);
  151. }
  152. }
  153. std::cerr << "start training " << std::endl;
  154. time_t prep_start_time = clock();
  155. GPHIKClassifierNICE * classifierBatch = new GPHIKClassifierNICE( &conf ); //we don't need this one in the first round
  156. GPHIKClassifierNICE * classifierIL = new GPHIKClassifierNICE( &conf );
  157. FeaturePool fp; // will be ignored
  158. classifierIL->train ( fp, examples );
  159. float time_preparation = ( float ) ( clock() - prep_start_time ) ;
  160. int classesUsed(classesForTraining.size());
  161. std::cerr << "training done " << std::endl;
  162. // ------------------ TESTING
  163. const LabeledSet *test = md["test"];
  164. VVector testData;
  165. Vector yTest;
  166. readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
  167. NICE::Matrix confusionMatrixBatch ( numberOfClasses, numberOfClasses );
  168. NICE::Matrix confusionMatrixIL ( numberOfClasses, numberOfClasses );
  169. confusionMatrixBatch.set ( 0.0 );
  170. confusionMatrixIL.set ( 0.0 );
  171. time_t start_time = clock();
  172. std::vector<int> chosen_examples_per_class ( numberOfClasses );
  173. if ( do_classification )
  174. {
  175. for ( uint i = 0 ; i < testData.size(); i++ )
  176. {
  177. Example example;
  178. const Vector & xstar = testData[i];
  179. SparseVector xstar_sparse ( xstar );
  180. OBJREC::ClassificationResult result;
  181. example.svec = &xstar_sparse;
  182. result = classifierIL->classify( example );
  183. cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
  184. result.classno_groundtruth = yTest[i];
  185. confusionMatrixIL ( result.classno_groundtruth , result.classno ) ++;
  186. }
  187. float time_classification = ( float ) ( clock() - start_time ) ;
  188. if ( verbose >= LOW )
  189. cerr << "Time for Classification with " << classesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
  190. confusionMatrixIL.normalizeRowsL1();
  191. double avg_recognition_rate = 0.0;
  192. for ( int i = 0 ; i < ( int ) confusionMatrixIL.rows(); i++ )
  193. {
  194. if ( verbose >= MEDIUM )
  195. {
  196. cerr << "Class no: " << i << " : " << confusionMatrixIL ( i, i ) << endl;
  197. }
  198. avg_recognition_rate += confusionMatrixIL ( i, i );
  199. }
  200. avg_recognition_rate /= confusionMatrixIL.rows();
  201. std::cerr << confusionMatrixIL << std::endl;
  202. std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " %" << std::endl;
  203. recognitionsRatesBatch[0].push_back ( avg_recognition_rate*100 );
  204. recognitionsRatesIL[0].push_back ( avg_recognition_rate*100 );
  205. }
  206. //Now start the Incremental-Learning-Part
  207. for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
  208. {
  209. uint oldSize = examples.size();
  210. //chose examples for every class used for training
  211. int cnt(0);
  212. Examples newExamples;
  213. for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
  214. {
  215. std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
  216. for (int i = 0; i < incrementalAddSize; i++)
  217. {
  218. std::cerr << "i: " << cnt << std::endl;
  219. Example example;
  220. int exampleIndex ( rand() % ( exIt->second.size() ) );
  221. NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex] ];
  222. example.svec = new SparseVector(xTrain);
  223. examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
  224. newExamples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
  225. exIt->second.erase(exIt->second.begin()+exampleIndex);
  226. cnt++;
  227. }
  228. }
  229. std::cerr << "Incremental, but not batch" << std::endl;
  230. time_t IL_add_start_time = clock();
  231. // for ( uint i = oldSize ; i < examples.size() ; i++ )
  232. // {
  233. // Example & example = examples[i].second;
  234. // int classno = examples[i].first;
  235. //
  236. // //skip the optimization for the first k examples
  237. // classifierIL->addExample( example, (double) classno, true );
  238. // }
  239. // for ( uint i = examples.size()-1 ; i < examples.size() ; i++ )
  240. // {
  241. // Example & example = examples[i].second;
  242. // int classno = examples[i].first;
  243. // //perform the optimization
  244. // classifierIL->addExample( example, (double) classno, true );
  245. // }
  246. classifierIL->addMultipleExamples( newExamples );
  247. float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
  248. std::cerr << "Time for IL-adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << " training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
  249. trainingTimesIL[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);
  250. std::cerr << "start batch retraining" << std::endl;
  251. time_t batch_add_start_time = clock();
  252. //
  253. if (classifierBatch != NULL)
  254. delete classifierBatch;
  255. classifierBatch = new GPHIKClassifierNICE( &conf );
  256. classifierBatch->train( fp, examples );
  257. //
  258. float time_batch_add = ( float ) ( clock() - batch_add_start_time ) ;
  259. std::cerr << "Time for batch relearning after adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << " training-examples: " << time_batch_add / CLOCKS_PER_SEC << " [s]" << std::endl;
  260. trainingTimesBatch[incrementationStep].push_back(time_batch_add / CLOCKS_PER_SEC);
  261. //do the classification for evaluating the benefit of new examples
  262. if ( do_classification )
  263. {
  264. std::cerr << "do classification" << std::endl;
  265. for ( uint i = 0 ; i < testData.size(); i++ )
  266. {
  267. Example example;
  268. const Vector & xstar = testData[i];
  269. SparseVector xstar_sparse ( xstar );
  270. example.svec = &xstar_sparse;
  271. OBJREC::ClassificationResult resultBatch;
  272. OBJREC::ClassificationResult resultIL;
  273. resultBatch = classifierBatch->classify( example );
  274. resultIL = classifierIL->classify( example );
  275. std::cerr << "Batch: [" << i << " / " << testData.size() << "] " << resultBatch.classno << " " << yTest[i] << std::endl;
  276. std::cerr << "IL: [" << i << " / " << testData.size() << "] " << resultIL.classno << " " << yTest[i] << std::endl;
  277. resultBatch.classno_groundtruth = yTest[i];
  278. resultIL.classno_groundtruth = yTest[i];
  279. confusionMatrixBatch ( resultBatch.classno_groundtruth , resultBatch.classno ) ++;
  280. confusionMatrixIL ( resultIL.classno_groundtruth , resultIL.classno ) ++;
  281. }
  282. float time_classification = ( float ) ( clock() - start_time ) ;
  283. if ( verbose >= LOW )
  284. std::cerr << "Time for Classification with " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
  285. confusionMatrixBatch.normalizeRowsL1();
  286. confusionMatrixIL.normalizeRowsL1();
  287. double ARRBatch = 0.0;
  288. double ARRIL = 0.0;
  289. for ( int i = 0 ; i < ( int ) confusionMatrixBatch.rows(); i++ )
  290. {
  291. if ( verbose >= MEDIUM )
  292. {
  293. std::cerr << "Batch Class no: " << i << " : " << confusionMatrixBatch ( i, i ) << std::endl;
  294. std::cerr << "IL Class no: " << i << " : " << confusionMatrixIL ( i, i ) << std::endl;
  295. }
  296. ARRBatch += confusionMatrixBatch ( i, i );
  297. ARRIL += confusionMatrixIL ( i, i );
  298. }
  299. ARRBatch /= confusionMatrixBatch.rows();
  300. ARRIL /= confusionMatrixIL.rows();
  301. std::cerr << "Batch matrix and results: " << std::endl;
  302. std::cerr << confusionMatrixBatch << std::endl;
  303. std::cerr << "ARRBatch " << ARRBatch*100 << " %" << std::endl;
  304. std::cerr << "IL matrix and results: " << std::endl;
  305. std::cerr << confusionMatrixIL << std::endl;
  306. std::cerr << "ARRIL " << ARRIL*100 << " %" << std::endl;
  307. recognitionsRatesBatch[incrementationStep+1].push_back ( ARRBatch*100 );
  308. recognitionsRatesIL[incrementationStep+1].push_back ( ARRIL*100 );
  309. } //classification after IL adding
  310. } //IL adding of different classes
  311. }//runs
  312. int classesUsed(classesForTraining.size());
  313. std::cerr << "classes used: " << classesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
  314. std::cerr << "========================" << std::endl;
  315. std::cerr << "content of trainingTimesIL: " << std::endl;
  316. for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesIL.begin(); it != trainingTimesIL.end(); it++ )
  317. {
  318. for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  319. {
  320. std::cerr << *jt << " ";
  321. }
  322. std::cerr << std::endl;
  323. }
  324. std::vector<float> trainingTimesILMean;
  325. std::vector<float> trainingTimesILStdDev;
  326. for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesIL.begin(); it != trainingTimesIL.end(); it++ )
  327. {
  328. float trainingTimeILMean ( 0.0 );
  329. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  330. {
  331. trainingTimeILMean += *itRun;
  332. }
  333. trainingTimeILMean /= it->size();
  334. trainingTimesILMean.push_back ( trainingTimeILMean );
  335. double trainingTimeILStdDev ( 0.0 );
  336. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  337. {
  338. trainingTimeILStdDev += pow ( *itRun - trainingTimeILMean, 2 );
  339. }
  340. trainingTimeILStdDev /= it->size();
  341. trainingTimeILStdDev = sqrt ( trainingTimeILStdDev );
  342. trainingTimesILStdDev.push_back ( trainingTimeILStdDev );
  343. }
  344. int datasize ( classesUsed*trainExPerClass );
  345. for ( uint i = 0; i < trainingTimesILMean.size(); i++)
  346. {
  347. cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " trainingTimesILMean: " << trainingTimesILMean[i] << " trainingTimesILStdDev: " << trainingTimesILStdDev[i] << endl;
  348. datasize += classesUsed*incrementalAddSize ;
  349. }
  350. std::cerr << "========================" << std::endl;
  351. std::cerr << "content of trainingTimesBatch: " << std::endl;
  352. for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesBatch.begin(); it != trainingTimesBatch.end(); it++ )
  353. {
  354. for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  355. {
  356. std::cerr << *jt << " ";
  357. }
  358. std::cerr << std::endl;
  359. }
  360. std::vector<float> trainingTimesBatchMean;
  361. std::vector<float> trainingTimesBatchStdDev;
  362. for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesBatch.begin(); it != trainingTimesBatch.end(); it++ )
  363. {
  364. float trainingTimeBatchMean ( 0.0 );
  365. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  366. {
  367. trainingTimeBatchMean += *itRun;
  368. }
  369. trainingTimeBatchMean /= it->size();
  370. trainingTimesBatchMean.push_back ( trainingTimeBatchMean );
  371. double trainingTimeBatchStdDev ( 0.0 );
  372. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  373. {
  374. trainingTimeBatchStdDev += pow ( *itRun - trainingTimeBatchMean, 2 );
  375. }
  376. trainingTimeBatchStdDev /= it->size();
  377. trainingTimeBatchStdDev = sqrt ( trainingTimeBatchStdDev );
  378. trainingTimesBatchStdDev.push_back ( trainingTimeBatchStdDev );
  379. }
  380. datasize = classesUsed*trainExPerClass;
  381. for ( uint i = 0; i < trainingTimesBatchMean.size(); i++)
  382. {
  383. cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " trainingTimesBatchMean: " << trainingTimesBatchMean[i] << " trainingTimesBatchStdDev: " << trainingTimesBatchStdDev[i] << endl;
  384. datasize += classesUsed*incrementalAddSize ;
  385. }
  386. if ( do_classification )
  387. {
  388. std::cerr << "========================" << std::endl;
  389. std::cerr << "content of recognitionsRatesIL: " << std::endl;
  390. for ( std::vector<std::vector<double> >::const_iterator it = recognitionsRatesIL.begin(); it != recognitionsRatesIL.end(); it++ )
  391. {
  392. for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  393. {
  394. std::cerr << *jt << " ";
  395. }
  396. std::cerr << std::endl;
  397. }
  398. std::cerr << "calculating final IL results " << std::endl;
  399. std::vector<double> recRatesILMean;
  400. std::vector<double> recRatesILStdDev;
  401. for (std::vector<std::vector<double> >::const_iterator it = recognitionsRatesIL.begin(); it != recognitionsRatesIL.end(); it++ )
  402. {
  403. double recRateILMean ( 0.0 );
  404. for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  405. {
  406. recRateILMean += *itRun;
  407. }
  408. recRateILMean /= it->size();
  409. recRatesILMean.push_back ( recRateILMean );
  410. double recRateILStdDev ( 0.0 );
  411. for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  412. {
  413. recRateILStdDev += pow ( *itRun - recRateILMean, 2 );
  414. }
  415. recRateILStdDev /= it->size();
  416. recRateILStdDev = sqrt ( recRateILStdDev );
  417. recRatesILStdDev.push_back ( recRateILStdDev );
  418. }
  419. int datasize ( classesUsed*trainExPerClass);
  420. for ( uint i = 0; i < recRatesILMean.size(); i++)
  421. {
  422. std::cerr << "size: " << datasize << " recRatesILMean: " << recRatesILMean[i] << " recRatesILStdDev: " << recRatesILStdDev[i] << std::endl;
  423. datasize += classesUsed*incrementalAddSize ;
  424. }
  425. std::cerr << "========================" << std::endl;
  426. std::cerr << "content of recognitionsRatesBatch: " << std::endl;
  427. for ( std::vector<std::vector<double> >::const_iterator it = recognitionsRatesBatch.begin(); it != recognitionsRatesBatch.end(); it++ )
  428. {
  429. for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  430. {
  431. std::cerr << *jt << " ";
  432. }
  433. std::cerr << std::endl;
  434. }
  435. std::cerr << "calculating final batch results " << std::endl;
  436. std::vector<double> recRatesBatchMean;
  437. std::vector<double> recRatesBatchStdDev;
  438. for (std::vector<std::vector<double> >::const_iterator it = recognitionsRatesBatch.begin(); it != recognitionsRatesBatch.end(); it++ )
  439. {
  440. double recRateBatchMean ( 0.0 );
  441. for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  442. {
  443. recRateBatchMean += *itRun;
  444. }
  445. recRateBatchMean /= it->size();
  446. recRatesBatchMean.push_back ( recRateBatchMean );
  447. double recRateBatchStdDev ( 0.0 );
  448. for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  449. {
  450. recRateBatchStdDev += pow ( *itRun - recRateBatchMean, 2 );
  451. }
  452. recRateBatchStdDev /= it->size();
  453. recRateBatchStdDev = sqrt ( recRateBatchStdDev );
  454. recRatesBatchStdDev.push_back ( recRateBatchStdDev );
  455. }
  456. datasize = classesUsed*trainExPerClass;
  457. for ( uint i = 0; i < recRatesBatchMean.size(); i++)
  458. {
  459. std::cerr << "size: " << datasize << " recRatesBatchMean: " << recRatesBatchMean[i] << " recRatesBatchStdDev: " << recRatesBatchStdDev[i] << std::endl;
  460. datasize += classesUsed*incrementalAddSize ;
  461. }
  462. }
  463. else
  464. {
  465. std::cerr << "========================" << std::endl;
  466. std::cerr << "No classification done therefor no classification times available." << std::endl;
  467. }
  468. return 0;
  469. }