eccv2012-AwA.cpp 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /**
  2. * @file eccv2012-AwA.cpp
  3. * @brief ECCV 2012 Experiment with Animals with Attributes
  4. * @author Alexander Freytag
  5. * @date 06-02-2012 (dd-mm-yyyy)
  6. */
  7. #include <vector>
  8. //----------
  9. #include <core/basics/vectorio.h>
  10. #include <core/basics/Timer.h>
  11. #include <core/basics/Config.h>
  12. //----------
  13. #include <vislearning/cbaselib/MultiDataset.h>
  14. #include <vislearning/baselib/Globals.h>
  15. //----------
  16. #include <gp-hik-core/FastMinKernel.h>
  17. #include <gp-hik-core/FMKGPHyperparameterOptimization.h>
  18. #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
  19. #include <gp-hik-core/parameterizedFunctions/PFExp.h>
  20. #include <gp-hik-core/tools.h>
  21. //----------
  22. #include "datatools.h"
  23. using namespace std;
  24. using namespace NICE;
  25. using namespace OBJREC;
  26. /**
  27. ECCV 2012 Experiment with Animals with Attributes
  28. */
  29. int main (int argc, char **argv)
  30. {
  31. std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
  32. Config conf ( argc, argv );
  33. string pf_s = conf.gS("main", "transform", "absexp");
  34. int nrRuns = conf.gI("main", "nrRuns", 1);
  35. int dim = conf.gI("main", "dim", 2000);
  36. ParameterizedFunction *pf;
  37. double parameterLowerBound = conf.gI("HIKGP", "parameter_lower_bound", 1.0 );
  38. double parameterUpperBound = conf.gI("HIKGP", "parameter_upper_bound", 5.0 );
  39. if ( pf_s == "absexp" )
  40. pf = new PFAbsExp( 1.0, parameterLowerBound, parameterUpperBound );
  41. else if ( pf_s == "exp" )
  42. pf = new PFExp ( 1.0, parameterLowerBound, parameterUpperBound );
  43. else
  44. fthrow(Exception, "Parameterized function type " << pf_s << " not yet implemented");
  45. cerr << "Transformation type: " << pf_s << endl;
  46. string ext = conf.gS("main", "ext", ".txt");
  47. cerr << "Using cache extension: " << ext << endl;
  48. // read training set
  49. vector< vector<double> > trainData;
  50. Vector y;
  51. double AARR(0.0); // averaged average recognition rate :)
  52. for (int run = 0; run < nrRuns; run++)
  53. {
  54. MultiDataset md ( &conf );
  55. const ClassNames & classNamesTrain = md.getClassNames("train");
  56. const LabeledSet *train = md["train"];
  57. readData< vector< vector<double> >, vector<double> > ( conf, *train, trainData, y, ext ); //works correctly wit AwA
  58. transposeVectorOfVectors ( trainData );
  59. // DEBUG
  60. #if 0
  61. Quantization q ( conf.gI("HIKGP", "num_bins") );
  62. for ( uint i = 0 ; i < trainData.size() ; i++ )
  63. for ( uint j = 0 ; j < trainData[i].size(); j++ )
  64. trainData[i][j] = q.getPrototype ( q.quantize( trainData[i][j] ) );
  65. #endif
  66. // END DEBUG
  67. double noise = 0.1;
  68. FastMinKernel *fmk = new FastMinKernel ( trainData, noise, dim );
  69. FMKGPHyperparameterOptimization hyper ( &conf, pf, fmk );
  70. hyper.optimize ( y );
  71. // ------------------ TESTING
  72. // q'n'd memory extensive solution
  73. const LabeledSet *test = md["test"];
  74. VVector testData;
  75. Vector yTest;
  76. readDataAwA ( conf, *test, testData, yTest, ext ); //ok, reading the data works also correctly with the AwA-dataformat
  77. // DEBUG
  78. #if 0
  79. for ( uint i = 0 ; i < testData.size() ; i++ )
  80. for ( uint j = 0 ; j < testData[i].size(); j++ )
  81. testData[i][j] = q.getPrototype ( q.quantize( testData[i][j] ) );
  82. #endif
  83. //DEBUG END
  84. Timer t;
  85. Matrix confusion ( y.Max()+1, yTest.Max() + 1, 0.0 );
  86. for ( uint i = 0 ; i < testData.size(); i++ )
  87. {
  88. const Vector & xstar = testData[i];
  89. // the following is just to be sure that we
  90. // do not count the time necessary for conversion
  91. SparseVector xstar_sparse ( xstar ); //default tolerance is 10e-10
  92. uint classno_groundtruth = yTest[i];
  93. SparseVector scores;
  94. t.start();
  95. uint classno_estimated = hyper.classify ( xstar_sparse, scores );
  96. t.stop();
  97. scores.store(cerr);
  98. cerr << "[" << i << " / " << testData.size() << "] " << classno_estimated << " " << classno_groundtruth << " time: " << t.getLast() << endl;
  99. confusion( classno_groundtruth, classno_estimated ) += 1;
  100. //confusion( classno_estimated, classno_groundtruth ) += 1;
  101. }
  102. confusion.normalizeRowsL1();
  103. cerr << confusion << endl;
  104. cerr << "average recognition rate: " << confusion.trace()/confusion.rows() << endl;
  105. AARR += confusion.trace()/confusion.rows();
  106. // //don't waste memory;
  107. // delete train;
  108. // delete test;
  109. // delete fmk;
  110. }
  111. AARR /= (nrRuns);
  112. std::cerr << "final averaged recognition rate: " << AARR << std::endl;
  113. delete pf;
  114. return 0;
  115. }