IL_AL.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. /**
  2. * @file IL_AL.cpp
  3. * @brief Incrementally train the GP HIK classifier using the predictive variance and its approximations to select new samples
  4. * @author Alexander Freytag
  5. * @date 09-05-2012
  6. */
  7. #include <vector>
  8. #include <stdlib.h>
  9. #include <time.h>
  10. #include <set>
  11. #include <core/basics/Config.h>
  12. #include <core/basics/StringTools.h>
  13. #include <core/vector/SparseVectorT.h>
  14. #include <core/vector/VectorT.h>
  15. //----------
  16. #include "vislearning/baselib/ProgressBar.h"
  17. #include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
  18. #include "vislearning/cbaselib/MultiDataset.h"
  19. #include <vislearning/cbaselib/LabeledSet.h>
  20. #include "vislearning/cbaselib/ClassificationResults.h"
  21. #include <vislearning/baselib/Globals.h>
  22. #include <vislearning/math/kernels/KernelData.h>
  23. //----------
  24. #include "gp-hik-exp/progs/datatools.h"
  25. #include "gp-hik-exp/GPHIKClassifierNICE.h"
  26. //----------
  27. // #include <incrementallearning/IL_Framework_Generic.h>
  28. //
  29. using namespace std;
  30. using namespace NICE;
  31. using namespace OBJREC;
  32. enum verbose_level {NONE = 0, LOW = 1, MEDIUM = 2, EVERYTHING = 3};
  33. enum QueryStrategy{
  34. RANDOM = 0,
  35. GPMEAN,
  36. GPPREDVAR,
  37. GPHEURISTIC
  38. };
  39. std::string convertInt(int number)
  40. {
  41. stringstream ss;//create a stringstream
  42. ss << number;//add number to the stream
  43. return ss.str();//return a string with the contents of the stream
  44. }
  45. /**
  46. Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
  47. */
  48. int main ( int argc, char **argv )
  49. {
  50. std::cout.precision ( 10 );
  51. std::cerr.precision ( 10 );
  52. NICE::Config conf ( argc, argv );
  53. int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
  54. int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
  55. int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
  56. int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );
  57. bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
  58. double squaredNoise = pow( conf.gD("GPHIKClassifierNICE", "noise", 0.01) , 2);
  59. string queryStrategyString = conf.gS( "main", "queryStrategy", "random");
  60. QueryStrategy queryStrategy;
  61. if (queryStrategyString.compare("gpMean") == 0)
  62. {
  63. queryStrategy = GPMEAN;
  64. }
  65. else if (queryStrategyString.compare("gpPredVar") == 0)
  66. {
  67. queryStrategy = GPPREDVAR;
  68. }
  69. else if (queryStrategyString.compare("gpHeuristic") == 0)
  70. {
  71. queryStrategy = GPHEURISTIC;
  72. }
  73. else
  74. {
  75. queryStrategy = RANDOM;
  76. }
  77. int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
  78. verbose_level verbose ( NONE );
  79. switch ( verbose_int )
  80. {
  81. case 0:
  82. verbose = NONE;
  83. break;
  84. case 1:
  85. verbose = LOW;
  86. break;
  87. case 2:
  88. verbose = MEDIUM;
  89. break;
  90. case 3:
  91. verbose = EVERYTHING;
  92. break;
  93. }
  94. /* initialize random seed: */
  95. srand ( time ( NULL ) ); //with 0 for reproductive results
  96. // srand ( 0 ); //with 0 for reproductive results
  97. // =========================== INIT ===========================
  98. std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
  99. std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
  100. std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
  101. int nrOfClassesUsed;
  102. for ( int run = 0; run < num_runs; run++ )
  103. {
  104. std::cerr << "run: " << run << std::endl;
  105. //15-scenes settings
  106. std::string ext = conf.gS("main", "ext", ".txt");
  107. std::cerr << "Using cache extension: " << ext << std::endl;
  108. OBJREC::MultiDataset md ( &conf );
  109. std::cerr << "now read the dataset" << std::endl;
  110. // read training set
  111. vector< NICE::Vector > trainDataOrig;
  112. Vector y;
  113. string trainRun ( "train" + convertInt( run ) );
  114. std::cerr << "look for " << trainRun << std::endl;
  115. const LabeledSet *train = md[ trainRun ]; //previously, we only selected "train", no we select the permutation for this run
  116. LabeledSet::Permutation orderTrain;
  117. train->getPermutation(orderTrain);
  118. std::vector<string> filenamesTraining;
  119. for ( LabeledSet::Permutation::const_iterator i = orderTrain.begin(); i != orderTrain.end(); i++)
  120. {
  121. string filename((i->second)->img());
  122. filenamesTraining.push_back(filename);
  123. }
  124. readData< std::vector< NICE::Vector >, NICE::Vector > ( conf, *train, trainDataOrig, y, ext );
  125. std::set<int> classesAvailable;
  126. for ( uint i = 0; i < y.size(); i++)
  127. {
  128. //automatically check for duplicates
  129. classesAvailable.insert( y[i] );
  130. }
  131. int numberOfClasses = classesAvailable.size();
  132. std::map<int,int> nrExamplesPerClassInDataset;
  133. std::map<int,std::vector<int> > examplesPerClassInDataset;
  134. for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
  135. {
  136. nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
  137. examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
  138. }
  139. for ( uint i = 0; i < y.size(); i++ )
  140. {
  141. (examplesPerClassInDataset.find( y[i] )->second).push_back(i);
  142. }
  143. for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
  144. {
  145. nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
  146. }
  147. for ( std::map<int,int>::const_iterator it = nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
  148. {
  149. cerr << it->first << ": " << it->second << endl;
  150. }
  151. Examples examples;
  152. //count how many examples of every class we have while actively selecting new examples
  153. //NOTE works only if we have subsequent class numbers
  154. NICE::Vector pickedExamplesPerClass( classesAvailable.size(), trainExPerClass);
  155. std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
  156. //chose examples for every class used for training
  157. //we will always use the first examples from each class, since the dataset comes already randomly ordered
  158. for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++)
  159. {
  160. std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
  161. std::cerr << "pick training examples for class " << *clIt << std::endl;
  162. for (int i = 0; i < trainExPerClass; i++)
  163. {
  164. std::cerr << "i: " << i << std::endl;
  165. int exampleIndex ( 0 ); //old: rand() % ( exIt->second.size() ) );
  166. std::cerr << "pick example " << exIt->second[exampleIndex] << " - " << y[exIt->second[exampleIndex] ] << " -- " << filenamesTraining[exIt->second[exampleIndex]] << std::endl;
  167. Example example;
  168. NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
  169. example.svec = new SparseVector(xTrain);
  170. //let's take this example and its corresponding label (which should be *clIt)
  171. examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
  172. //
  173. exIt->second.erase(exIt->second.begin()+exampleIndex);
  174. }
  175. }
  176. std::vector<string> filenamesUnlabeled;
  177. filenamesUnlabeled.clear();
  178. //which examples are left to be actively chosen lateron?
  179. std::vector<int> unlabeledExamples( y.size() - trainExPerClass*classesAvailable.size() );
  180. int exCnt( 0 );
  181. for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++ )
  182. {
  183. std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
  184. //list all examples of this specific class
  185. for (std::vector<int>::const_iterator it = exIt->second.begin(); it != exIt->second.end(); it++)
  186. {
  187. unlabeledExamples[exCnt] = *it;
  188. exCnt++;
  189. filenamesUnlabeled.push_back( filenamesTraining[*it] );
  190. }
  191. }
  192. time_t prep_start_time = clock();
  193. GPHIKClassifierNICE * classifier = new GPHIKClassifierNICE( &conf );
  194. FeaturePool fp; // will be ignored
  195. classifier->train ( fp, examples );
  196. float time_preparation = ( float ) ( clock() - prep_start_time ) ;
  197. std::cerr << "Time for initial training: " << time_preparation / CLOCKS_PER_SEC << std::endl;
  198. nrOfClassesUsed = classesAvailable.size();
  199. // ------------------ TESTING
  200. string testRun ( "test" + convertInt( run ) );
  201. const LabeledSet *test = md[ testRun ]; //previously, we only selected "test", no we select the permutation for this run
  202. VVector testData;
  203. Vector yTest;
  204. readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
  205. NICE::Matrix confusionMatrix ( numberOfClasses, numberOfClasses );
  206. confusionMatrix.set ( 0.0 );
  207. time_t start_time = clock();
  208. std::vector<int> chosen_examples_per_class ( numberOfClasses );
  209. std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
  210. if ( do_classification )
  211. {
  212. for ( uint i = 0 ; i < testData.size(); i++ )
  213. {
  214. Example example;
  215. const Vector & xstar = testData[i];
  216. SparseVector xstar_sparse ( xstar );
  217. OBJREC::ClassificationResult result;
  218. example.svec = &xstar_sparse;
  219. result = classifier->classify( example );
  220. // cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
  221. result.classno_groundtruth = yTest[i];
  222. confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
  223. }
  224. float time_classification = ( float ) ( clock() - start_time ) ;
  225. if ( verbose >= LOW )
  226. cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
  227. ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
  228. confusionMatrix.normalizeRowsL1();
  229. double avg_recognition_rate = 0.0;
  230. for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
  231. {
  232. if ( verbose >= MEDIUM )
  233. {
  234. std::cerr << "Class no: " << i << " : " << confusionMatrix ( i, i ) << std::endl;
  235. }
  236. avg_recognition_rate += confusionMatrix ( i, i );
  237. }
  238. avg_recognition_rate /= confusionMatrix.rows();
  239. std::cerr << confusionMatrix;
  240. std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " % -- " << examples.size() << " training examples used" << std::endl << std::endl;
  241. recognitions_rates[0].push_back ( avg_recognition_rate*100 );
  242. }
  243. //Now start the Incremental-Learning-Part
  244. for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
  245. {
  246. //chose examples for every class used for training
  247. Examples newExamples;
  248. //simply count how many possible example we have
  249. int nrOfPossibleExamples( unlabeledExamples.size() );
  250. if (queryStrategy == RANDOM)
  251. {
  252. std::cerr << "print chosen examples: " << std::endl;
  253. for (int i = 0; i < incrementalAddSize; i++)
  254. {
  255. int exampleIndex ( rand() % ( unlabeledExamples.size() ) );
  256. Example newExample;
  257. NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exampleIndex] ];
  258. newExample.svec = new SparseVector( xTrain );
  259. int label( y[ unlabeledExamples[exampleIndex] ] );
  260. newExamples.push_back ( pair<int, Example> ( label, newExample ) );
  261. unlabeledExamples.erase( unlabeledExamples.begin()+exampleIndex );
  262. std::cerr << exampleIndex+1 << " / " << incrementalAddSize << " : " << filenamesUnlabeled[ exampleIndex ] << std::endl;
  263. filenamesUnlabeled.erase( filenamesUnlabeled.begin()+exampleIndex );
  264. pickedExamplesPerClass[label]++;
  265. }
  266. }// end computation for RANDOM
  267. else if ( (queryStrategy == GPMEAN) || (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTIC) )
  268. {
  269. //compute uncertainty values for all examples according to the query strategy
  270. std::vector<std::pair<int,double> > scores;
  271. scores.clear();
  272. time_t unc_pred_start_time = clock();
  273. // std::cerr << "possible examples to query: " << unlabeledExamples.size() << std::endl;
  274. for (uint exIndex = 0; exIndex < unlabeledExamples.size(); exIndex++)
  275. {
  276. Example example;
  277. NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exIndex] ];
  278. SparseVector xTrainSparse ( xTrain );
  279. example.svec = &xTrainSparse;
  280. if (queryStrategy == GPMEAN)
  281. {
  282. ClassificationResult r = classifier->classify( example );
  283. double bestScore( numeric_limits<double>::max() );
  284. for( int clCnt = 0; clCnt < nrOfClassesUsed; clCnt++)
  285. {
  286. if ( fabs(r.scores[clCnt]) < bestScore )
  287. bestScore = fabs(r.scores[clCnt]);
  288. }
  289. scores.push_back( std::pair<int,double> ( exIndex, bestScore ) );
  290. }
  291. else if (queryStrategy == GPPREDVAR)
  292. {
  293. NICE::Vector singleUncertainties;
  294. //use the pred variance computation specified in the config file
  295. classifier->predictUncertainty( example, singleUncertainties );
  296. //take the maximum of the scores for the predictive variance
  297. scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties.Max()) );
  298. }
  299. else if (queryStrategy == GPHEURISTIC)
  300. {
  301. NICE::Vector singleUncertainties;
  302. //use the pred variance computation specified in the config file
  303. classifier->predictUncertainty( example, singleUncertainties );
  304. //compute the mean values for every class
  305. ClassificationResult r = classifier->classify( example );
  306. for ( int tmp = 0; tmp < singleUncertainties.size(); tmp++ )
  307. {
  308. singleUncertainties[tmp] = fabs(r.scores[tmp]) / sqrt( squaredNoise + singleUncertainties[tmp] );
  309. }
  310. //take the minimum of the scores for the heuristic measure
  311. scores.push_back( std::pair<int,double> ( exIndex, singleUncertainties.Min()) );
  312. }
  313. }
  314. float time_score_computation = ( float ) ( clock() - unc_pred_start_time ) ;
  315. //pick the ones with best score
  316. //we could speed this up using a more sophisticated search method
  317. if (queryStrategy == GPPREDVAR) //take the maximum of the scores for the predictive variance
  318. {
  319. std::set<int> chosenExamplesForThisRun;
  320. chosenExamplesForThisRun.clear();
  321. for (int i = 0; i < incrementalAddSize; i++)
  322. {
  323. std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
  324. for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
  325. {
  326. if (jIt->second > bestExample->second)
  327. bestExample = jIt;
  328. }
  329. Example newExample;
  330. NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
  331. newExample.svec = new SparseVector( xTrain );
  332. //actually this is the ACTIVE LEARNING step (query a label)
  333. int label( y[ unlabeledExamples[bestExample->first] ] );
  334. newExamples.push_back ( pair<int, Example> ( label, newExample ) );
  335. //remember the index, to safely remove this example afterwards from unlabeledExamples
  336. chosenExamplesForThisRun.insert(bestExample->first);
  337. scores.erase(bestExample);
  338. pickedExamplesPerClass[label]++;
  339. }
  340. std::cerr << "print chosen examples: " << std::endl;
  341. int tmpCnt(0);
  342. for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
  343. {
  344. std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " << filenamesUnlabeled[ *it ] << std::endl;
  345. }
  346. //delete the queried examples from the set of unlabeled ones
  347. //do this in an decreasing order in terms of indices to ensure valid access
  348. for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
  349. {
  350. unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );
  351. }
  352. }
  353. else //take the minimum of the scores for the heuristic and the gp mean (minimum margin)
  354. {
  355. std::set<int> chosenExamplesForThisRun;
  356. chosenExamplesForThisRun.clear();
  357. for (int i = 0; i < incrementalAddSize; i++)
  358. {
  359. std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
  360. for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
  361. {
  362. if (jIt->second < bestExample->second)
  363. bestExample = jIt;
  364. }
  365. Example newExample;
  366. NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
  367. newExample.svec = new SparseVector( xTrain );
  368. //actually this is the ACTIVE LEARNING step (query a label)
  369. int label( y[ unlabeledExamples[bestExample->first] ] );
  370. newExamples.push_back ( pair<int, Example> ( label, newExample ) );
  371. //remember the index, to safely remove this example afterwards from unlabeledExamples
  372. chosenExamplesForThisRun.insert(bestExample->first);
  373. scores.erase(bestExample);
  374. pickedExamplesPerClass[label]++;
  375. }
  376. std::cerr << "print chosen examples: " << std::endl;
  377. int tmpCnt(0);
  378. for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
  379. {
  380. std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " << filenamesUnlabeled[ *it ] << std::endl;
  381. }
  382. //delete the queried example from the set of unlabeled ones
  383. //do this in an decreasing order in terms of indices to ensure valid access
  384. for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
  385. {
  386. unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );
  387. filenamesUnlabeled.erase( filenamesUnlabeled.begin()+(*it) );
  388. }
  389. }
  390. std::cerr << "Time used to compute query-scores for " << nrOfPossibleExamples << " examples: " << time_score_computation / CLOCKS_PER_SEC << " [s]" << std::endl;
  391. } // end computation for GPMEAN, GPPREDVAR, or GPHEURISTIC
  392. std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
  393. time_t IL_add_start_time = clock();
  394. classifier->addMultipleExamples( newExamples );
  395. //remove the memory used in newExamples
  396. for ( uint tmp = 0; tmp < newExamples.size(); tmp++ )
  397. {
  398. delete newExamples[tmp].second.svec;
  399. newExamples[tmp].second.svec = NULL;
  400. }
  401. float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
  402. std::cerr << "Time for IL-adding of " << incrementalAddSize << " examples to already " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*incrementationStep << " training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
  403. IL_training_times[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);
  404. //do the classification for evaluating the benefit of new examples
  405. if ( do_classification )
  406. {
  407. confusionMatrix.set( 0.0 );
  408. for ( uint i = 0 ; i < testData.size(); i++ )
  409. {
  410. Example example;
  411. const Vector & xstar = testData[i];
  412. SparseVector xstar_sparse ( xstar );
  413. example.svec = &xstar_sparse;
  414. OBJREC::ClassificationResult result;
  415. result = classifier->classify( example );
  416. result.classno_groundtruth = yTest[i];
  417. confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
  418. }
  419. float time_classification = ( float ) ( clock() - start_time ) ;
  420. if ( verbose >= LOW )
  421. std::cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
  422. ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
  423. confusionMatrix.normalizeRowsL1();
  424. double avg_recognition_rate = 0.0;
  425. for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
  426. {
  427. if ( verbose >= MEDIUM )
  428. {
  429. std::cerr << "Class no: " << i << " : " << confusionMatrix ( i, i ) << std::endl;
  430. }
  431. avg_recognition_rate += confusionMatrix ( i, i );
  432. }
  433. avg_recognition_rate /= confusionMatrix.rows();
  434. std::cerr << confusionMatrix;
  435. std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl << std::endl;
  436. recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );
  437. } //classification after IL adding
  438. } //IL adding of different classes
  439. std::cerr << "Final statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
  440. //don't waste memory!
  441. delete classifier;
  442. for ( int tmp = 0; tmp < examples.size(); tmp++ )
  443. {
  444. delete examples[tmp].second.svec;
  445. examples[tmp].second.svec = NULL;
  446. }
  447. }//runs
  448. std::cerr << "no of classes used: " << nrOfClassesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
  449. // ================= EVALUATION ========================0
  450. if ( do_classification )
  451. {
  452. std::cerr << "========================" << std::endl;
  453. std::cerr << "content of classification_times: " << std::endl;
  454. for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
  455. {
  456. for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  457. {
  458. std::cerr << *jt << " ";
  459. }
  460. std::cerr << std::endl;
  461. }
  462. std::vector<float> mean_classification_times;
  463. std::vector<float> std_dev_classification_times;
  464. for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
  465. {
  466. float mean_classification_time ( 0.0 );
  467. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  468. {
  469. mean_classification_time += *itRun;
  470. }
  471. mean_classification_time /= it->size();
  472. mean_classification_times.push_back ( mean_classification_time );
  473. double std_dev_classification_time ( 0.0 );
  474. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  475. {
  476. std_dev_classification_time += pow ( *itRun - mean_classification_time, 2 );
  477. }
  478. std_dev_classification_time /= it->size();
  479. std_dev_classification_time = sqrt ( std_dev_classification_time );
  480. std_dev_classification_times.push_back ( std_dev_classification_time );
  481. }
  482. int datasize ( nrOfClassesUsed*trainExPerClass );
  483. for ( uint i = 0; i < mean_classification_times.size(); i++)
  484. {
  485. std::cerr << "size: " << datasize << " mean classification time: " << mean_classification_times[i] << " std_dev classification time: " << std_dev_classification_times[i] << std::endl;
  486. datasize += incrementalAddSize ;
  487. }
  488. }
  489. else
  490. {
  491. std::cerr << "========================" << std::endl;
  492. std::cerr << "No classification done therefor no classification times available." << std::endl;
  493. }
  494. std::cerr << "========================" << std::endl;
  495. std::cerr << "content of IL_training_times: " << std::endl;
  496. for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
  497. {
  498. for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  499. {
  500. std::cerr << *jt << " ";
  501. }
  502. std::cerr << std::endl;
  503. }
  504. std::vector<float> mean_IL_training_times;
  505. std::vector<float> std_dev_IL_training_times;
  506. for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
  507. {
  508. float mean_IL_training_time ( 0.0 );
  509. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  510. {
  511. mean_IL_training_time += *itRun;
  512. }
  513. mean_IL_training_time /= it->size();
  514. mean_IL_training_times.push_back ( mean_IL_training_time );
  515. double std_dev_IL_training_time ( 0.0 );
  516. for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  517. {
  518. std_dev_IL_training_time += pow ( *itRun - mean_IL_training_time, 2 );
  519. }
  520. std_dev_IL_training_time /= it->size();
  521. std_dev_IL_training_time = sqrt ( std_dev_IL_training_time );
  522. std_dev_IL_training_times.push_back ( std_dev_IL_training_time );
  523. }
  524. int datasize ( nrOfClassesUsed*trainExPerClass );
  525. for ( uint i = 0; i < mean_IL_training_times.size(); i++)
  526. {
  527. cerr << "size: " << datasize << " and adding " << incrementalAddSize << " mean IL_training time: " << mean_IL_training_times[i] << " std_dev IL_training time: " << std_dev_IL_training_times[i] << endl;
  528. datasize += incrementalAddSize ;
  529. }
  530. if ( do_classification )
  531. {
  532. std::cerr << "========================" << std::endl;
  533. std::cerr << "content of recognition_rates: " << std::endl;
  534. for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
  535. {
  536. for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
  537. {
  538. std::cerr << *jt << " ";
  539. }
  540. std::cerr << std::endl;
  541. }
  542. std::cerr << "calculating final results " << std::endl;
  543. std::vector<double> mean_recs;
  544. std::vector<double> std_dev_recs;
  545. for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
  546. {
  547. double mean_rec ( 0.0 );
  548. for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  549. {
  550. mean_rec += *itRun;
  551. }
  552. mean_rec /= it->size();
  553. mean_recs.push_back ( mean_rec );
  554. double std_dev_rec ( 0.0 );
  555. for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
  556. {
  557. std_dev_rec += pow ( *itRun - mean_rec, 2 );
  558. }
  559. std_dev_rec /= it->size();
  560. std_dev_rec = sqrt ( std_dev_rec );
  561. std_dev_recs.push_back ( std_dev_rec );
  562. }
  563. int datasize ( nrOfClassesUsed*trainExPerClass );
  564. for ( uint i = 0; i < recognitions_rates.size(); i++)
  565. {
  566. std::cerr << "size: " << datasize << " mean_IL: " << mean_recs[i] << " std_dev_IL: " << std_dev_recs[i] << std::endl;
  567. datasize += incrementalAddSize ;
  568. }
  569. }
  570. else
  571. {
  572. std::cerr << "========================" << std::endl;
  573. std::cerr << "No classification done therefor no classification times available." << std::endl;
  574. }
  575. return 0;
  576. }