GMHIKernelRaw.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /**
  2. * @file GMHIKernelRaw.cpp
  3. * @brief Fast multiplication with histogram intersection kernel matrices (Implementation)
  4. * @author Erik Rodner, Alexander Freytag
  5. * @date 01/02/2012
  6. */
  7. #include <iostream>
  8. #include <core/vector/VVector.h>
  9. #include <core/basics/Timer.h>
  10. #include "GMHIKernelRaw.h"
  11. using namespace NICE;
  12. using namespace std;
  13. GMHIKernelRaw::GMHIKernelRaw( const std::vector< const NICE::SparseVector *> &_examples,
  14. const double _d_noise,
  15. NICE::Quantization * _q
  16. )
  17. {
  18. this->examples_raw = NULL;
  19. this->nnz_per_dimension = NULL;
  20. this->table_A = NULL;
  21. this->table_B = NULL;
  22. this->table_T = NULL;
  23. this->d_noise = _d_noise;
  24. this->q = _q;
  25. this->initData(_examples);
  26. }
  27. GMHIKernelRaw::~GMHIKernelRaw()
  28. {
  29. this->cleanupData();
  30. }
  31. void GMHIKernelRaw::cleanupData()
  32. {
  33. // data structure of examples
  34. if ( this->examples_raw != NULL )
  35. {
  36. for ( uint d = 0; d < this->num_dimension; d++ )
  37. if (examples_raw[d] != NULL)
  38. delete [] examples_raw[d];
  39. delete [] this->examples_raw;
  40. this->examples_raw = NULL;
  41. }
  42. // counter of non-zero examples in each dimension
  43. if ( this->nnz_per_dimension != NULL )
  44. {
  45. delete [] this->nnz_per_dimension;
  46. this->nnz_per_dimension = NULL;
  47. }
  48. // LUT A for classification without quantization
  49. if ( this->table_A != NULL )
  50. {
  51. for ( uint d = 0; d < this->num_dimension; d++ )
  52. if (table_A[d] != NULL)
  53. delete [] table_A[d];
  54. delete [] this->table_A;
  55. this->table_A = NULL;
  56. }
  57. // LUT B for classification without quantization
  58. if ( this->table_B != NULL )
  59. {
  60. for ( uint d = 0; d < this->num_dimension; d++ )
  61. if (table_B[d] != NULL)
  62. delete [] table_B[d];
  63. delete [] this->table_B;
  64. this->table_B = NULL;
  65. }
  66. // LUT T for classification with quantization
  67. if ( this->table_T != NULL )
  68. {
  69. delete [] this->table_T;
  70. this->table_T = NULL;
  71. }
  72. }
  73. void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_examples )
  74. {
  75. if (_examples.size() == 0 )
  76. fthrow(Exception, "No examples given for learning");
  77. cleanupData();
  78. this->num_dimension = _examples[0]->getDim();
  79. this->examples_raw = new sparseVectorElement *[num_dimension];
  80. this->nnz_per_dimension = new uint [num_dimension];
  81. this->num_examples = _examples.size();
  82. // waste memory and allocate a non-sparse data block
  83. sparseVectorElement **examples_raw_increment = new sparseVectorElement *[num_dimension];
  84. for (uint d = 0; d < this->num_dimension; d++)
  85. {
  86. this->examples_raw[d] = new sparseVectorElement [ this->num_examples ];
  87. examples_raw_increment[d] = this->examples_raw[d];
  88. this->nnz_per_dimension[d] = 0;
  89. }
  90. // additionally allocate a Vector with as many entries as examples
  91. // this vector will contain the L1 norm values of all examples + noise
  92. // thereby, it represents the diagonal entries of our kernel matrix for
  93. // the special case of minimum kernel
  94. this->diagonalElements.resize ( this->num_examples );
  95. this->diagonalElements.set ( this->d_noise );
  96. uint example_index = 0;
  97. NICE::Vector::iterator itDiagEl = this->diagonalElements.begin();
  98. // minor pre-allocation
  99. uint i_dimNonZero;
  100. double value;
  101. double l1norm;
  102. // iterate over all provided training examples to process their data
  103. for ( std::vector< const NICE::SparseVector * >::const_iterator i = _examples.begin();
  104. i != _examples.end();
  105. i++, example_index++, itDiagEl++
  106. )
  107. {
  108. l1norm = 0.0;
  109. const NICE::SparseVector *x = *i;
  110. // loop over all non-zero dimensions, copy dimension and value into our data structure, and compute the L1 norm
  111. for ( NICE::SparseVector::const_iterator j = x->begin(); j != x->end(); j++ )
  112. {
  113. i_dimNonZero = j->first;
  114. value = j->second;
  115. examples_raw_increment[i_dimNonZero]->value = value;
  116. examples_raw_increment[i_dimNonZero]->example_index = example_index;
  117. // move data pointer to the next element in the current dimension
  118. examples_raw_increment[i_dimNonZero]++;
  119. this->nnz_per_dimension[i_dimNonZero]++;
  120. l1norm = l1norm + value;
  121. }
  122. *itDiagEl = *itDiagEl + l1norm;
  123. }
  124. delete [] examples_raw_increment;
  125. // sort along each dimension
  126. for (uint d = 0; d < this->num_dimension; d++)
  127. {
  128. uint nnz = this->nnz_per_dimension[d];
  129. if ( nnz > 1 )
  130. std::sort( this->examples_raw[d], this->examples_raw[d] + nnz );
  131. }
  132. // pre-allocate the A and B matrices
  133. this->table_A = allocateTableAorB();
  134. this->table_B = allocateTableAorB();
  135. // Quantization for classification?
  136. if ( this->q != NULL )
  137. {
  138. // (1) if yes, setup the parameters of the quantization object
  139. NICE::Vector _maxValuesPerDimension = this->getLargestValuePerDimension();
  140. this->q->computeParametersFromData ( _maxValuesPerDimension );
  141. this->table_T = this->allocateTableT();
  142. }
  143. }
  144. double **GMHIKernelRaw::allocateTableAorB() const
  145. {
  146. double **table;
  147. table = new double *[this->num_dimension];
  148. for (uint i = 0; i < this->num_dimension; i++)
  149. {
  150. uint nnz = this->nnz_per_dimension[i];
  151. if (nnz>0) {
  152. table[i] = new double [ nnz ];
  153. } else {
  154. table[i] = NULL;
  155. }
  156. }
  157. return table;
  158. }
  159. double *GMHIKernelRaw::allocateTableT() const
  160. {
  161. double *table;
  162. table = new double [this->num_dimension * this->q->getNumberOfBins()];
  163. return table;
  164. }
  165. void GMHIKernelRaw::copyTableAorB(double **src, double **dst) const
  166. {
  167. for (uint i = 0; i < this->num_dimension; i++)
  168. {
  169. uint nnz = this->nnz_per_dimension[i];
  170. if (nnz>0) {
  171. for (uint j = 0; j < nnz; j++)
  172. dst[i][j] = src[i][j];
  173. } else {
  174. dst[i] = NULL;
  175. }
  176. }
  177. }
  178. void GMHIKernelRaw::copyTableT(double *_src, double *_dst) const
  179. {
  180. double * p_src = _src;
  181. double * p_dst = _dst;
  182. for ( int i = 0; i < this->num_dimension * this->q->getNumberOfBins(); i++, p_src++, p_dst++ )
  183. {
  184. *p_dst = *p_src;
  185. }
  186. }
  187. void GMHIKernelRaw::updateTablesAandB ( const NICE::Vector _x ) const
  188. {
  189. // start the actual computations of A, B, and optionally T
  190. for (uint dim = 0; dim < this->num_dimension; dim++)
  191. {
  192. double alpha_sum = 0.0;
  193. double alpha_times_x_sum = 0.0;
  194. uint nnz = nnz_per_dimension[dim];
  195. //////////
  196. // loop through all elements in sorted order
  197. sparseVectorElement *training_values_in_dim = examples_raw[dim];
  198. for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
  199. {
  200. // index of the feature
  201. int index = training_values_in_dim->example_index;
  202. // element of the feature
  203. double elem = training_values_in_dim->value;
  204. alpha_times_x_sum += _x[index] * elem;
  205. this->table_A[dim][cntNonzeroFeat] = alpha_times_x_sum;
  206. alpha_sum += _x[index];
  207. this->table_B[dim][cntNonzeroFeat] = alpha_sum;
  208. }
  209. }
  210. }
  211. void GMHIKernelRaw::updateTableT ( const NICE::Vector _x ) const
  212. {
  213. // sanity check
  214. if ( this->q == NULL)
  215. {
  216. return;
  217. }
  218. // number of quantization bins
  219. uint hmax = this->q->getNumberOfBins();
  220. double * prototypes;
  221. prototypes = new double [ hmax * this->num_dimension ];
  222. double * p_prototypes;
  223. p_prototypes = prototypes;
  224. // compute all prototypes to compare against lateron
  225. for (uint dim = 0; dim < this->num_dimension; dim++)
  226. {
  227. for ( uint i = 0 ; i < hmax ; i++ )
  228. {
  229. *p_prototypes = this->q->getPrototype( i, dim );
  230. p_prototypes++;
  231. }
  232. }
  233. // start the actual computation of T
  234. for (uint dim = 0; dim < this->num_dimension; dim++)
  235. {
  236. uint nnz = nnz_per_dimension[dim];
  237. uint idxProtoElem; // denotes the bin number in dim i of a quantized example, previously termed qBin
  238. sparseVectorElement * i = examples_raw[dim];
  239. sparseVectorElement * iPredecessor = examples_raw[dim];
  240. // index of the element, which is always bigger than the current value fval
  241. int indexElem = 0;
  242. // element of the feature
  243. double elem = i->value;
  244. idxProtoElem = this->q->quantize ( elem, dim );
  245. uint idxProto;
  246. double * itProtoVal = prototypes + dim*hmax;
  247. double * itT = this->table_T + dim*hmax;
  248. // special case 1:
  249. // loop over all prototypes smaller then the smallest quantized example in this dimension
  250. for ( idxProto = 0; idxProto < idxProtoElem; idxProto++, itProtoVal++, itT++) // idxProto previously j
  251. {
  252. // current prototype is smaller than all known examples
  253. // -> resulting value = fval * sum_l=1^n alpha_l
  254. (*itT) = (*itProtoVal) * ( this->table_B[ dim ][ nnz-1 ] );
  255. }//for-loop over prototypes -- special case 1
  256. // standard case: prototypes larger then the smallest element, but smaller then the largest one in the corrent dimension
  257. for ( ; idxProto < hmax; idxProto++, itProtoVal++, itT++)
  258. {
  259. //move to next example, which is smaller then the current prototype after quantization
  260. // pay attentation to not loop over the number of non-zero elements
  261. while ( (idxProto >= idxProtoElem) && ( indexElem < ( nnz - 1 ) ) ) //(this->ui_n-1-nrZeroIndices)) )
  262. {
  263. indexElem++;
  264. iPredecessor = i;
  265. i++;
  266. // only quantize if value changed
  267. if ( i->value != iPredecessor->value )
  268. {
  269. idxProtoElem = this->q->quantize ( i->value, dim );
  270. }
  271. }
  272. // did we looped over the largest element in this dimension?
  273. if ( indexElem==( nnz-1 ) )
  274. {
  275. break;
  276. }
  277. (*itT) = table_A[ dim ][ indexElem-1 ] + (*itProtoVal)*( table_B[ dim ][ nnz-1 ] - table_B[ dim ][ indexElem-1 ] );
  278. }//for-loop over prototypes -- standard case
  279. // special case 2:
  280. // the current prototype is equal to or larger than the largest training example in this dimension
  281. // -> the term B[ dim ][ nnz-1 ] - B[ dim ][ indexElem ] is equal to zero and vanishes, which is logical, since all elements are smaller than the remaining prototypes!
  282. for ( ; idxProto < hmax; idxProto++, itProtoVal++, itT++)
  283. {
  284. (*itT) = table_A[ dim ][ indexElem ];
  285. }//for-loop over prototypes -- special case 2
  286. }//for-loop over dimensions
  287. // clean-up prototypes
  288. if ( this->q != NULL)
  289. {
  290. delete [] prototypes;
  291. }
  292. }
  293. /** multiply with a vector: A*x = y */
  294. void GMHIKernelRaw::multiply (NICE::Vector & _y, const NICE::Vector & _x) const
  295. {
  296. // STEP 1: initialize tables A and B
  297. this->updateTablesAandB(_x);
  298. _y.resize( this->num_examples );
  299. _y.set(0.0);
  300. for (uint dim = 0; dim < this->num_dimension; dim++)
  301. {
  302. uint nnz = this->nnz_per_dimension[dim];
  303. uint nz = this->num_examples - nnz;
  304. if ( nnz == 0 ) {
  305. // all values are zero in this dimension :) and we can simply ignore the feature
  306. continue;
  307. }
  308. sparseVectorElement *training_values_in_dim = examples_raw[dim];
  309. for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
  310. {
  311. uint feat = training_values_in_dim->example_index;
  312. uint inversePosition = cntNonzeroFeat;
  313. double fval = training_values_in_dim->value;
  314. double firstPart = this->table_A[dim][inversePosition];
  315. double secondPart = this->table_B[dim][nnz-1] - this->table_B[dim][inversePosition];
  316. _y[feat] += firstPart + fval * secondPart;
  317. }
  318. }
  319. for (uint feat = 0; feat < this->num_examples; feat++)
  320. _y[feat] += this->d_noise * _x[feat];
  321. }
  322. /** get the number of rows in A */
  323. uint GMHIKernelRaw::rows () const
  324. {
  325. // return the number of examples
  326. return num_examples;
  327. }
  328. /** get the number of columns in A */
  329. uint GMHIKernelRaw::cols () const
  330. {
  331. // return the number of examples
  332. return num_examples;
  333. }
  334. double **GMHIKernelRaw::getTableA() const
  335. {
  336. double **t = allocateTableAorB();
  337. copyTableAorB(this->table_A, t);
  338. return t;
  339. }
  340. double **GMHIKernelRaw::getTableB() const
  341. {
  342. double **t = allocateTableAorB();
  343. copyTableAorB(this->table_B, t);
  344. return t;
  345. }
  346. double * GMHIKernelRaw::getTableT() const
  347. {
  348. double * T = this->allocateTableT();
  349. copyTableT(this->table_T, T);
  350. return T;
  351. }
  352. uint *GMHIKernelRaw::getNNZPerDimension() const
  353. {
  354. uint *v = new uint[this->num_dimension];
  355. for (uint i = 0; i < this->num_dimension; i++)
  356. v[i] = this->nnz_per_dimension[i];
  357. return v;
  358. }
  359. uint NICE::GMHIKernelRaw::getNumberOfDimensions() const
  360. {
  361. return this->num_dimension;
  362. }
  363. void NICE::GMHIKernelRaw::getDiagonalElements( NICE::Vector & _diagonalElements) const
  364. {
  365. _diagonalElements = this->diagonalElements;
  366. }
  367. NICE::Vector NICE::GMHIKernelRaw::getLargestValuePerDimension ( ) const
  368. {
  369. NICE::Vector vmax ( this->num_dimension );
  370. NICE::Vector::iterator vmaxIt = vmax.begin();
  371. for (uint d = 0; d < this->num_dimension; d++, vmaxIt++)
  372. {
  373. uint nnz = this->nnz_per_dimension[d];
  374. if ( nnz > 0 )
  375. {
  376. *vmaxIt = this->examples_raw[ d ][ nnz-1 ].value;
  377. }
  378. else
  379. {
  380. *vmaxIt = 0.0;
  381. }
  382. }
  383. return vmax;
  384. }