eccv2012-AwA.cpp 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /**
  2. * @file eccv2012-AwA.cpp
  3. * @brief ECCV 2012 Experiment with Animals with Attributes
  4. * @author Alexander Freytag
  5. * @date 06-02-2012 (dd-mm-yyyy)
  6. */
  7. #include <vector>
  8. //----------
  9. #include <core/basics/vectorio.h>
  10. #include <core/basics/Timer.h>
  11. #include <core/basics/Config.h>
  12. //----------
  13. #include <vislearning/cbaselib/MultiDataset.h>
  14. #include <vislearning/baselib/Globals.h>
  15. //----------
  16. #include <gp-hik-core/FastMinKernel.h>
  17. #include <gp-hik-core/FMKGPHyperparameterOptimization.h>
  18. #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
  19. #include <gp-hik-core/parameterizedFunctions/PFExp.h>
  20. #include <gp-hik-core/tools.h>
  21. //----------
  22. #include "datatools.h"
  23. using namespace std;
  24. using namespace NICE;
  25. using namespace OBJREC;
  26. /**
  27. ECCV 2012 Experiment with Animals with Attributes
  28. */
  29. int main (int argc, char **argv)
  30. {
  31. std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
  32. NICE::Config conf ( argc, argv );
  33. string pf_s = conf.gS("main", "transform", "absexp");
  34. int nrRuns = conf.gI("main", "nrRuns", 1);
  35. int dim = conf.gI("main", "dim", 2000);
  36. conf.sD( "FMKGPHyperparameterOptimization", "parameter_upper_bound", 5.0 );
  37. conf.sD( "FMKGPHyperparameterOptimization", "parameter_lower_bound", 1.0 );
  38. if ( pf_s == "absexp" )
  39. conf.sS( "FMKGPHyperparameterOptimization", "transform", "absexp" );
  40. else if ( pf_s == "exp" )
  41. conf.sS( "FMKGPHyperparameterOptimization", "transform", "exp" );
  42. else
  43. fthrow(Exception, "Parameterized function type " << pf_s << " not yet implemented");
  44. std::cerr << "Transformation type: " << pf_s << std::endl;
  45. std::string ext = conf.gS("main", "ext", ".txt");
  46. std::cerr << "Using cache extension: " << ext << std::endl;
  47. // read training set
  48. std::vector< std::vector<double> > trainData;
  49. NICE::Vector y;
  50. double AARR(0.0); // averaged average recognition rate :)
  51. for (int run = 0; run < nrRuns; run++)
  52. {
  53. MultiDataset md ( &conf );
  54. const ClassNames & classNamesTrain = md.getClassNames("train");
  55. const LabeledSet *train = md["train"];
  56. readData< vector< vector<double> >, vector<double> > ( conf, *train, trainData, y, ext ); //works correctly wit AwA
  57. transposeVectorOfVectors ( trainData );
  58. // DEBUG
  59. #if 0
  60. Quantization q ( conf.gI("HIKGP", "num_bins") );
  61. for ( uint i = 0 ; i < trainData.size() ; i++ )
  62. for ( uint j = 0 ; j < trainData[i].size(); j++ )
  63. trainData[i][j] = q.getPrototype ( q.quantize( trainData[i][j] ) );
  64. #endif
  65. // END DEBUG
  66. double noise = 0.1;
  67. FastMinKernel *fmk = new FastMinKernel ( trainData, noise, dim );
  68. FMKGPHyperparameterOptimization hyper ( &conf, fmk );
  69. hyper.optimize ( y );
  70. // ------------------ TESTING
  71. // q'n'd memory extensive solution
  72. const LabeledSet *test = md["test"];
  73. VVector testData;
  74. Vector yTest;
  75. readDataAwA ( conf, *test, testData, yTest, ext ); //ok, reading the data works also correctly with the AwA-dataformat
  76. // DEBUG
  77. #if 0
  78. for ( uint i = 0 ; i < testData.size() ; i++ )
  79. for ( uint j = 0 ; j < testData[i].size(); j++ )
  80. testData[i][j] = q.getPrototype ( q.quantize( testData[i][j] ) );
  81. #endif
  82. //DEBUG END
  83. Timer t;
  84. Matrix confusion ( y.Max()+1, yTest.Max() + 1, 0.0 );
  85. for ( uint i = 0 ; i < testData.size(); i++ )
  86. {
  87. const Vector & xstar = testData[i];
  88. // the following is just to be sure that we
  89. // do not count the time necessary for conversion
  90. SparseVector xstar_sparse ( xstar ); //default tolerance is 10e-10
  91. uint classno_groundtruth = yTest[i];
  92. SparseVector scores;
  93. t.start();
  94. uint classno_estimated = hyper.classify ( xstar_sparse, scores );
  95. t.stop();
  96. scores.store(cerr);
  97. cerr << "[" << i << " / " << testData.size() << "] " << classno_estimated << " " << classno_groundtruth << " time: " << t.getLast() << endl;
  98. confusion( classno_groundtruth, classno_estimated ) += 1;
  99. //confusion( classno_estimated, classno_groundtruth ) += 1;
  100. }
  101. confusion.normalizeRowsL1();
  102. cerr << confusion << endl;
  103. cerr << "average recognition rate: " << confusion.trace()/confusion.rows() << endl;
  104. AARR += confusion.trace()/confusion.rows();
  105. // //don't waste memory;
  106. // delete train;
  107. // delete test;
  108. // delete fmk;
  109. }
  110. AARR /= (nrRuns);
  111. std::cerr << "final averaged recognition rate: " << AARR << std::endl;
  112. return 0;
  113. }