GMHIKernelRaw.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /**
  2. * @file GMHIKernelRaw.cpp
  3. * @brief Fast multiplication with histogram intersection kernel matrices (Implementation)
  4. * @author Erik Rodner, Alexander Freytag
  5. * @date 01/02/2012
  6. */
  7. #include <iostream>
  8. #include <core/vector/VVector.h>
  9. #include <core/basics/Timer.h>
  10. #include "GMHIKernelRaw.h"
  11. using namespace NICE;
  12. using namespace std;
  13. GMHIKernelRaw::GMHIKernelRaw( const std::vector< const NICE::SparseVector *> &_examples,
  14. const double _d_noise
  15. const NICE::Quantization * _q
  16. )
  17. {
  18. this->examples_raw = NULL;
  19. this->nnz_per_dimension = NULL;
  20. this->table_A = NULL;
  21. this->table_B = NULL;
  22. this->d_noise = _d_noise;
  23. this->q = _q;
  24. initData(_examples);
  25. }
  26. GMHIKernelRaw::~GMHIKernelRaw()
  27. {
  28. cleanupData();
  29. }
  30. void GMHIKernelRaw::cleanupData()
  31. {
  32. if ( this->examples_raw != NULL ) {
  33. for ( uint d = 0; d < this->num_dimension; d++ )
  34. if (examples_raw[d] != NULL)
  35. delete [] examples_raw[d];
  36. delete [] this->examples_raw;
  37. this->examples_raw = NULL;
  38. }
  39. if ( this->nnz_per_dimension != NULL ) {
  40. delete [] this->nnz_per_dimension;
  41. this->nnz_per_dimension = NULL;
  42. }
  43. if ( this->table_A != NULL ) {
  44. for ( uint d = 0; d < this->num_dimension; d++ )
  45. if (table_A[d] != NULL)
  46. delete [] table_A[d];
  47. delete [] this->table_A;
  48. this->table_A = NULL;
  49. }
  50. if ( this->table_B != NULL ) {
  51. for ( uint d = 0; d < this->num_dimension; d++ )
  52. if (table_B[d] != NULL)
  53. delete [] table_B[d];
  54. delete [] this->table_B;
  55. this->table_B = NULL;
  56. }
  57. }
  58. void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_examples )
  59. {
  60. if (_examples.size() == 0 )
  61. fthrow(Exception, "No examples given for learning");
  62. cleanupData();
  63. this->num_dimension = _examples[0]->getDim();
  64. this->examples_raw = new sparseVectorElement *[num_dimension];
  65. this->nnz_per_dimension = new uint [num_dimension];
  66. this->num_examples = _examples.size();
  67. // waste memory and allocate a non-sparse data block
  68. sparseVectorElement **examples_raw_increment = new sparseVectorElement *[num_dimension];
  69. for (uint d = 0; d < this->num_dimension; d++)
  70. {
  71. this->examples_raw[d] = new sparseVectorElement [ this->num_examples ];
  72. examples_raw_increment[d] = this->examples_raw[d];
  73. this->nnz_per_dimension[d] = 0;
  74. }
  75. // additionally allocate a Vector with as many entries as examples
  76. // this vector will contain the L1 norm values of all examples + noise
  77. // thereby, it represents the diagonal entries of our kernel matrix for
  78. // the special case of minimum kernel
  79. this->diagonalElements.resize ( this->num_examples );
  80. this->diagonalElements.set ( this->d_noise );
  81. uint example_index = 0;
  82. NICE::Vector::iterator itDiagEl = this->diagonalElements.begin();
  83. // minor pre-allocation
  84. uint index;
  85. double value;
  86. double l1norm;
  87. for ( std::vector< const NICE::SparseVector * >::const_iterator i = _examples.begin();
  88. i != _examples.end();
  89. i++, example_index++, itDiagEl++
  90. )
  91. {
  92. l1norm = 0.0;
  93. const NICE::SparseVector *x = *i;
  94. for ( NICE::SparseVector::const_iterator j = x->begin(); j != x->end(); j++ )
  95. {
  96. index = j->first;
  97. value = j->second;
  98. examples_raw_increment[index]->value = value;
  99. examples_raw_increment[index]->example_index = example_index;
  100. // move to the next element
  101. examples_raw_increment[index]++;
  102. this->nnz_per_dimension[index]++;
  103. l1norm = l1norm + value;
  104. }
  105. *itDiagEl = *itDiagEl + l1norm;
  106. }
  107. delete [] examples_raw_increment;
  108. // sort along each dimension
  109. for (uint d = 0; d < this->num_dimension; d++)
  110. {
  111. uint nnz = this->nnz_per_dimension[d];
  112. if ( nnz > 1 )
  113. std::sort( this->examples_raw[d], this->examples_raw[d] + nnz );
  114. }
  115. // pre-allocate the A and B matrices
  116. this->table_A = allocateTableAorB();
  117. this->table_B = allocateTableAorB();
  118. // Quantization for classification?
  119. if ( this->q != NULL )
  120. {
  121. // (1) if yes, setup the parameters of the quantization object
  122. this->q->computeParametersFromData ( this );
  123. this->table_T = allocateTableT();
  124. }
  125. }
  126. double **GMHIKernelRaw::allocateTableAorB() const
  127. {
  128. double **table;
  129. table = new double *[this->num_dimension];
  130. for (uint i = 0; i < this->num_dimension; i++)
  131. {
  132. uint nnz = this->nnz_per_dimension[i];
  133. if (nnz>0) {
  134. table[i] = new double [ nnz ];
  135. } else {
  136. table[i] = NULL;
  137. }
  138. }
  139. return table;
  140. }
  141. double **GMHIKernelRaw::allocateTableT() const
  142. {
  143. double **table;
  144. table = new double *[this->num_dimension * this->q->getNumberOfBins()];
  145. return table;
  146. }
  147. void GMHIKernelRaw::copyTableAorB(double **src, double **dst) const
  148. {
  149. for (uint i = 0; i < this->num_dimension; i++)
  150. {
  151. uint nnz = this->nnz_per_dimension[i];
  152. if (nnz>0) {
  153. for (uint j = 0; j < nnz; j++)
  154. dst[i][j] = src[i][j];
  155. } else {
  156. dst[i] = NULL;
  157. }
  158. }
  159. }
  160. void GMHIKernelRaw::copyTableC(double **src, double **dst) const
  161. {
  162. for (uint i = 0; i < this->num_dimension; i++)
  163. {
  164. for (uint j = 0; j < this->q->getNumberOfBins(); j++)
  165. {
  166. //FIXME can we speed this up using pointer increments?
  167. dst[i][j] = src[i][j];
  168. }
  169. }
  170. }
  171. void GMHIKernelRaw::updateTables ( const NICE::Vector _x ) const
  172. {
  173. // pre-computions if quantization is activated
  174. double * prototypes;
  175. double * p_prototypes;
  176. // store prototypes
  177. if ( this->q != NULL)
  178. {
  179. // number of quantization bins
  180. uint hmax = _q->getNumberOfBins();
  181. double * prototypes = new double [ hmax * this->ui_d ];
  182. double * p_prototypes = prototypes;
  183. for (uint dim = 0; dim < this->ui_d; dim++)
  184. {
  185. for ( uint i = 0 ; i < hmax ; i++ )
  186. {
  187. if ( _pf != NULL )
  188. {
  189. *p_prototypes = _pf->f ( dim, _q->getPrototype( i, dim ) );
  190. } else
  191. {
  192. *p_prototypes = _q->getPrototype( i, dim );
  193. }
  194. p_prototypes++;
  195. }
  196. }
  197. }
  198. // start the actual computations of A, B, and optionally T
  199. for (uint dim = 0; dim < this->num_dimension; dim++)
  200. {
  201. double alpha_sum = 0.0;
  202. double alpha_times_x_sum = 0.0;
  203. uint nnz = nnz_per_dimension[dim];
  204. // loop through all elements in sorted order
  205. sparseVectorElement *training_values_in_dim = examples_raw[dim];
  206. for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
  207. {
  208. // index of the feature
  209. int index = training_values_in_dim->example_index;
  210. // element of the feature
  211. double elem = training_values_in_dim->value;
  212. alpha_times_x_sum += _x[index] * elem;
  213. this->table_A[dim][cntNonzeroFeat] = alpha_times_x_sum;
  214. alpha_sum += _x[index];
  215. this->table_B[dim][cntNonzeroFeat] = alpha_sum;
  216. if ( this->q != NULL)
  217. {
  218. // // index of the element, which is always bigger than the current value fval
  219. // uint index = 0;
  220. // // we use the quantization of the original features! the transformed feature were
  221. // // already used to calculate A and B, this of course assumes monotonic functions!!!
  222. // uint qBin = _q->quantize ( i->first, dim );
  223. // // the next loop is linear in max(hmax, n)
  224. // // REMARK: this could be changed to hmax*log(n), when
  225. // // we use binary search
  226. // for (uint j = 0; j < hmax; j++)
  227. // {
  228. // double fval = prototypes[ dim*hmax + j ];
  229. // double t;
  230. // if ( (index == 0) && (j < qBin) ) {
  231. // // current element is smaller than everything else
  232. // // resulting value = fval * sum_l=1^n alpha_l
  233. // t = fval*( _B[dim][this->ui_n-1 - nrZeroIndices] );
  234. // } else {
  235. // // move to next example, if necessary
  236. // while ( (j >= qBin) && ( index < (this->ui_n-1-nrZeroIndices)) )
  237. // {
  238. // index++;
  239. // iPredecessor = i;
  240. // i++;
  241. // if ( i->first != iPredecessor->first )
  242. // qBin = _q->quantize ( i->first, dim );
  243. // }
  244. // // compute current element in the lookup table and keep in mind that
  245. // // index is the next element and not the previous one
  246. // //NOTE pay attention: this is only valid if all entries are positive! -
  247. // // If not, ask whether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
  248. // if ( (j >= qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
  249. // // the current element (fval) is equal or bigger to the element indexed by index
  250. // // in fact, the term B[dim][this->n-1-nrZeroIndices] - B[dim][index] is equal to zero and vanishes, which is logical, since all elements are smaller than j!
  251. // t = _A[dim][index];// + fval*( _B[dim][this->ui_n-1-nrZeroIndices] - _B[dim][index] );
  252. // } else {
  253. // // standard case
  254. // t = _A[dim][index-1] + fval*( _B[dim][this->ui_n-1-nrZeroIndices] - _B[dim][index-1] );
  255. // }
  256. // }
  257. // Tlookup[ dim*hmax + j ] = t;
  258. // }
  259. }
  260. }
  261. }
  262. }
  263. /** multiply with a vector: A*x = y */
  264. void GMHIKernelRaw::multiply (NICE::Vector & _y, const NICE::Vector & _x) const
  265. {
  266. // STEP 1: initialize tables A and B
  267. updateTables(_x);
  268. _y.resize( this->num_examples );
  269. _y.set(0.0);
  270. for (uint dim = 0; dim < this->num_dimension; dim++)
  271. {
  272. uint nnz = this->nnz_per_dimension[dim];
  273. uint nz = this->num_examples - nnz;
  274. if ( nnz == 0 ) {
  275. // all values are zero in this dimension :) and we can simply ignore the feature
  276. continue;
  277. }
  278. sparseVectorElement *training_values_in_dim = examples_raw[dim];
  279. for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
  280. {
  281. uint feat = training_values_in_dim->example_index;
  282. uint inversePosition = cntNonzeroFeat;
  283. double fval = training_values_in_dim->value;
  284. double firstPart = this->table_A[dim][inversePosition];
  285. double secondPart = this->table_B[dim][nnz-1] - this->table_B[dim][inversePosition];
  286. _y[feat] += firstPart + fval * secondPart;
  287. }
  288. }
  289. for (uint feat = 0; feat < this->num_examples; feat++)
  290. _y[feat] += this->d_noise * _x[feat];
  291. }
  292. /** get the number of rows in A */
  293. uint GMHIKernelRaw::rows () const
  294. {
  295. // return the number of examples
  296. return num_examples;
  297. }
  298. /** get the number of columns in A */
  299. uint GMHIKernelRaw::cols () const
  300. {
  301. // return the number of examples
  302. return num_examples;
  303. }
  304. double **GMHIKernelRaw::getTableA() const
  305. {
  306. double **t = allocateTableAorB();
  307. copyTableAorB(this->table_A, t);
  308. return t;
  309. }
  310. double **GMHIKernelRaw::getTableB() const
  311. {
  312. double **t = allocateTableAorB();
  313. copyTableAorB(this->table_B, t);
  314. return t;
  315. }
  316. double **GMHIKernelRaw::getTableT() const
  317. {
  318. double **t = allocateTableT();
  319. copyTableT(this->table_T, t);
  320. return t;
  321. }
  322. uint *GMHIKernelRaw::getNNZPerDimension() const
  323. {
  324. uint *v = new uint[this->num_dimension];
  325. for (uint i = 0; i < this->num_dimension; i++)
  326. v[i] = this->nnz_per_dimension[i];
  327. return v;
  328. }
  329. uint NICE::GMHIKernelRaw::getNumberOfDimensions() const
  330. {
  331. return this->num_dimension;
  332. }
  333. void NICE::GMHIKernelRaw::getDiagonalElements( NICE::Vector & _diagonalElements) const
  334. {
  335. _diagonalElements = this->diagonalElements;
  336. }
  337. double NICE::GMHIKernelRaw::getLargestValue ( ) const
  338. {
  339. double vmax (0.0);
  340. double vtmp (0.0);
  341. uint tmpIdx ( 0 );
  342. // compare largest elements of all dimensions
  343. for (uint d = 0; d < this->num_dimension; d++)
  344. {
  345. uint nnz = this->nnz_per_dimension[d];
  346. if ( nnz > 1 )
  347. {
  348. tmpIdx = tmpIdx + nnz;
  349. vtmp = this->examples_raw[tmpIdx];
  350. if ( vtmp > vmax )
  351. {
  352. vmax = vtmp;
  353. }
  354. }
  355. }
  356. return vmax;
  357. }
  358. NICE::Vector NICE::GMHIKernelRaw::getLargestValuePerDimension ( ) const
  359. {
  360. NICE::Vector vmax ( this->get_d() );
  361. NICE::Vector::iterator vmaxIt = vmax.begin();
  362. uint tmpIdx ( 0 );
  363. for (uint d = 0; d < this->num_dimension; d++, vmaxIt++)
  364. {
  365. uint nnz = this->nnz_per_dimension[d];
  366. if ( nnz > 1 )
  367. {
  368. tmpIdx = tmpIdx + nnz;
  369. *vmaxIt = this->examples_raw[tmpIdx];
  370. }
  371. else
  372. {
  373. *vmaxIt = 0.0;
  374. }
  375. }
  376. return vmax;
  377. }