TestFastHIK.cpp 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. #ifdef NICE_USELIB_CPPUNIT
  2. #include <string>
  3. #include <exception>
  4. #include <core/algebra/ILSConjugateGradients.h>
  5. #include <core/algebra/GMStandard.h>
  6. #include <core/basics/Timer.h>
  7. #include <gp-hik-core/tools.h>
  8. #include <gp-hik-core/kernels/IntersectionKernelFunction.h>
  9. #include <gp-hik-core/kernels/GeneralizedIntersectionKernelFunction.h>
  10. #include <gp-hik-core/parameterizedFunctions/ParameterizedFunction.h>
  11. #include <gp-hik-core/parameterizedFunctions/PFAbsExp.h>
  12. #include "TestFastHIK.h"
  13. const bool verbose = false;
  14. const bool verboseStartEnd = true;
  15. const bool solveLinWithoutRand = false;
  16. const uint n = 20;//1500;//1500;//10;
  17. const uint d = 5;//200;//2;
  18. const uint numBins = 11;//1001;//1001;
  19. const uint solveLinMaxIterations = 1000;
  20. const double sparse_prob = 0.6;
  21. const bool smallTest = false;
  22. using namespace NICE;
  23. using namespace std;
  24. CPPUNIT_TEST_SUITE_REGISTRATION( TestFastHIK );
  25. void TestFastHIK::setUp() {
  26. }
  27. void TestFastHIK::tearDown() {
  28. }
  29. bool compareVVector(const NICE::VVector & A, const NICE::VVector & B, const double & tolerance = 10e-8)
  30. {
  31. bool result(true);
  32. // std::cerr << "A.size(): " << A.size() << " B.size(): " << B.size() << std::endl;
  33. NICE::VVector::const_iterator itA = A.begin();
  34. NICE::VVector::const_iterator itB = B.begin();
  35. while ( (itA != A.end()) && ( itB != B.end()) )
  36. {
  37. if (itA->size() != itB->size())
  38. {
  39. result = false;
  40. break;
  41. }
  42. // std::cerr << "itA->size(): " << itA->size() << "itB->size(): " << itB->size() << std::endl;
  43. for(uint i = 0; (i < itA->size()) && (i < itB->size()); i++)
  44. {
  45. if (fabs((*itA)[i] - (*itB)[i]) > tolerance)
  46. {
  47. result = false;
  48. break;
  49. }
  50. }
  51. if (result == false)
  52. break;
  53. itA++;
  54. itB++;
  55. // std::cerr << "foo" << std::endl;
  56. }
  57. return result;
  58. }
  59. bool compareLUTs(const double* LUT1, const double* LUT2, const int & size, const double & tolerance = 10e-8)
  60. {
  61. bool result = true;
  62. for (int i = 0; i < size; i++)
  63. {
  64. if ( fabs(LUT1[i] - LUT2[i]) > tolerance)
  65. {
  66. result = false;
  67. std::cerr << "problem in : " << i << " / " << size << " LUT1: " << LUT1[i] << " LUT2: " << LUT2[i] << std::endl;
  68. break;
  69. }
  70. }
  71. return result;
  72. }
  73. void TestFastHIK::testKernelMultiplication()
  74. {
  75. if (verboseStartEnd)
  76. std::cerr << "================== TestFastHIK::testKernelMultiplication ===================== " << std::endl;
  77. vector< vector<double> > dataMatrix;
  78. generateRandomFeatures ( d, n, dataMatrix );
  79. int nrZeros(0);
  80. for ( uint i = 0 ; i < d; i++ )
  81. {
  82. for ( uint k = 0; k < n; k++ )
  83. if ( drand48() < sparse_prob )
  84. {
  85. dataMatrix[i][k] = 0.0;
  86. nrZeros++;
  87. }
  88. }
  89. if ( verbose ) {
  90. cerr << "data matrix: " << endl;
  91. printMatrix ( dataMatrix );
  92. cerr << endl;
  93. }
  94. double noise = 1.0;
  95. FastMinKernel fmk ( dataMatrix, noise );
  96. if ( (n*d)>0)
  97. {
  98. CPPUNIT_ASSERT_DOUBLES_EQUAL(fmk.getSparsityRatio(), (double)nrZeros/(double)(n*d), 1e-8);
  99. if (verbose)
  100. std::cerr << "fmk.getSparsityRatio(): " << fmk.getSparsityRatio() << " (double)nrZeros/(double)(n*d): " << (double)nrZeros/(double)(n*d) << std::endl;
  101. }
  102. GMHIKernel gmk ( &fmk );
  103. if (verbose)
  104. gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution
  105. else
  106. gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution
  107. Vector y ( n );
  108. for ( uint i = 0; i < y.size(); i++ )
  109. y[i] = sin(i);
  110. Vector alpha;
  111. gmk.multiply ( alpha, y );
  112. NICE::IntersectionKernelFunction<double> hikSlow;
  113. // tic
  114. time_t slow_start = clock();
  115. std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
  116. transposeVectorOfVectors(dataMatrix_transposed);
  117. NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
  118. //toc
  119. float time_slowComputation = (float) (clock() - slow_start);
  120. std::cerr << "Time for computing the kernel matrix without using sparsity: " << time_slowComputation/CLOCKS_PER_SEC << " s" << std::endl;
  121. // tic
  122. time_t slow_sparse_start = clock();
  123. NICE::Matrix KSparseCalculated (hikSlow.computeKernelMatrix(fmk.featureMatrix(), noise));
  124. //toc
  125. float time_slowComputation_usingSparsity = (float) (clock() - slow_sparse_start);
  126. std::cerr << "Time for computing the kernel matrix using sparsity: " << time_slowComputation_usingSparsity/CLOCKS_PER_SEC << " s" << std::endl;
  127. if ( verbose )
  128. cerr << "K = " << K << endl;
  129. // check the trace calculation
  130. //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
  131. CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
  132. // let us compute the kernel multiplication with the slow version
  133. Vector alpha_slow = K*y;
  134. if (verbose)
  135. std::cerr << "Sparse multiplication [alpha, alpha_slow]: " << std::endl << alpha << std::endl << alpha_slow << std::endl << std::endl;
  136. CPPUNIT_ASSERT_DOUBLES_EQUAL((alpha-alpha_slow).normL1(), 0.0, 1e-8);
  137. // test the case, where we first transform and then use the multiply stuff
  138. NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
  139. NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
  140. ParameterizedFunction *pf = new PFAbsExp( 1.2 );
  141. fmk.applyFunctionToFeatureMatrix( pf );
  142. // pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
  143. Vector galpha;
  144. gmk.multiply ( galpha, y );
  145. Vector galpha_slow = gK * y;
  146. CPPUNIT_ASSERT_DOUBLES_EQUAL((galpha-galpha_slow).normL1(), 0.0, 1e-8);
  147. if (verboseStartEnd)
  148. std::cerr << "================== TestFastHIK::testKernelMultiplication done ===================== " << std::endl;
  149. }
  150. void TestFastHIK::testKernelMultiplicationFast()
  151. {
  152. if (verboseStartEnd)
  153. std::cerr << "================== TestFastHIK::testKernelMultiplicationFast ===================== " << std::endl;
  154. Quantization q_gen ( numBins );
  155. Quantization q ( 2*numBins -1);
  156. // data is generated, such that there is no approximation error
  157. vector< vector<double> > dataMatrix;
  158. for ( uint i = 0; i < d ; i++ )
  159. {
  160. vector<double> v;
  161. v.resize(n);
  162. for ( uint k = 0; k < n; k++ ) {
  163. if ( drand48() < sparse_prob ) {
  164. v[k] = 0;
  165. } else {
  166. v[k] = q_gen.getPrototype( (rand() % numBins) );
  167. }
  168. }
  169. dataMatrix.push_back(v);
  170. }
  171. if ( verbose ) {
  172. cerr << "data matrix: " << endl;
  173. printMatrix ( dataMatrix );
  174. cerr << endl;
  175. }
  176. double noise = 1.0;
  177. FastMinKernel fmk ( dataMatrix, noise );
  178. GMHIKernel gmk ( &fmk );
  179. if (verbose)
  180. gmk.setVerbose(true); //we want to see the size of size(A)+size(B) for non-sparse vs sparse solution
  181. else
  182. gmk.setVerbose(false); //we don't want to see the size of size(A)+size(B) for non-sparse vs sparse solution
  183. Vector y ( n );
  184. for ( uint i = 0; i < y.size(); i++ )
  185. y[i] = sin(i);
  186. ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
  187. GMHIKernel gmkFast ( &fmk, pf, &q );
  188. // pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
  189. Vector alpha;
  190. gmk.multiply ( alpha, y );
  191. Vector alphaFast;
  192. gmkFast.multiply ( alphaFast, y );
  193. NICE::IntersectionKernelFunction<double> hikSlow;
  194. std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
  195. transposeVectorOfVectors(dataMatrix_transposed);
  196. NICE::Matrix K (hikSlow.computeKernelMatrix(dataMatrix_transposed, noise));
  197. if ( verbose )
  198. cerr << "K = " << K << endl;
  199. // check the trace calculation
  200. //CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-12 );
  201. CPPUNIT_ASSERT_DOUBLES_EQUAL( K.trace(), fmk.featureMatrix().hikTrace() + noise*n, 1e-8 );
  202. // let us compute the kernel multiplication with the slow version
  203. Vector alpha_slow = K*y;
  204. if ( verbose )
  205. std::cerr << "Sparse multiplication [alpha, alphaFast, alpha_slow]: " << std::endl << alpha << std::endl << alphaFast << std::endl << alpha_slow << std::endl << std::endl;
  206. CPPUNIT_ASSERT_DOUBLES_EQUAL(0.0, (alphaFast-alpha_slow).normL1(), 1e-8);
  207. // test the case, where we first transform and then use the multiply stuff
  208. NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.2 );
  209. NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
  210. pf->parameters()[0] = 1.2;
  211. fmk.applyFunctionToFeatureMatrix( pf );
  212. // pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
  213. Vector galphaFast;
  214. gmkFast.multiply ( galphaFast, y );
  215. Vector galpha;
  216. gmk.multiply ( galpha, y );
  217. Vector galpha_slow = gK * y;
  218. if (verbose)
  219. std::cerr << "Sparse multiplication [galpha, galphaFast, galpha_slow]: " << std::endl << galpha << std::endl << galphaFast << std::endl << galpha_slow << std::endl << std::endl;
  220. CPPUNIT_ASSERT_DOUBLES_EQUAL((galphaFast-galpha_slow).normL1(), 0.0, 1e-8);
  221. if (verboseStartEnd)
  222. std::cerr << "================== TestFastHIK::testKernelMultiplicationFast done ===================== " << std::endl;
  223. }
  224. void TestFastHIK::testKernelSum()
  225. {
  226. if (verboseStartEnd)
  227. std::cerr << "================== TestFastHIK::testKernelSum ===================== " << std::endl;
  228. vector< vector<double> > dataMatrix;
  229. generateRandomFeatures ( d, n, dataMatrix );
  230. int nrZeros(0);
  231. for ( uint i = 0 ; i < d; i++ )
  232. {
  233. for ( uint k = 0; k < n; k++ )
  234. if ( drand48() < sparse_prob )
  235. {
  236. dataMatrix[i][k] = 0.0;
  237. nrZeros++;
  238. }
  239. }
  240. if ( verbose ) {
  241. cerr << "data matrix: " << endl;
  242. printMatrix ( dataMatrix );
  243. cerr << endl;
  244. }
  245. double noise = 1.0;
  246. FastMinKernel fmk ( dataMatrix, noise );
  247. Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
  248. NICE::VVector ASparse;
  249. NICE::VVector BSparse;
  250. fmk.hik_prepare_alpha_multiplications ( alpha, ASparse, BSparse );
  251. Vector xstar (d);
  252. for ( uint i = 0 ; i < d ; i++ )
  253. if ( drand48() < sparse_prob ) {
  254. xstar[i] = 0.0;
  255. } else {
  256. xstar[i] = rand();
  257. }
  258. SparseVector xstarSparse ( xstar );
  259. double betaSparse;
  260. fmk.hik_kernel_sum ( ASparse, BSparse, xstarSparse, betaSparse );
  261. if (verbose)
  262. std::cerr << "kernelSumSparse done, now do the thing without exploiting sparsity" << std::endl;
  263. // checking the result
  264. std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
  265. transposeVectorOfVectors(dataMatrix_transposed);
  266. NICE::IntersectionKernelFunction<double> hikSlow;
  267. std::vector<double> xstar_stl;
  268. xstar_stl.resize(d);
  269. for ( uint i = 0 ; i < d; i++ )
  270. xstar_stl[i] = xstar[i];
  271. std::vector<double> kstar_stl = hikSlow.computeKernelVector ( dataMatrix_transposed, xstar_stl );
  272. double beta_slow = 0.0;
  273. for ( uint i = 0 ; i < n; i++ )
  274. beta_slow += kstar_stl[i] * alpha[i];
  275. if (verbose)
  276. std::cerr << "difference of beta_slow and betaSparse: " << fabs(beta_slow - betaSparse) << std::endl;
  277. CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, betaSparse, 1e-8);
  278. if (verboseStartEnd)
  279. std::cerr << "================== TestFastHIK::testKernelSum done ===================== " << std::endl;
  280. }
  281. void TestFastHIK::testKernelSumFast()
  282. {
  283. if (verboseStartEnd)
  284. std::cerr << "================== TestFastHIK::testKernelSumFast ===================== " << std::endl;
  285. Quantization q ( numBins );
  286. // data is generated, such that there is no approximation error
  287. vector< vector<double> > dataMatrix;
  288. for ( uint i = 0; i < d ; i++ )
  289. {
  290. vector<double> v;
  291. v.resize(n);
  292. for ( uint k = 0; k < n; k++ ) {
  293. if ( drand48() < sparse_prob ) {
  294. v[k] = 0;
  295. } else {
  296. v[k] = q.getPrototype( (rand() % numBins) );
  297. }
  298. }
  299. dataMatrix.push_back(v);
  300. }
  301. if ( verbose ) {
  302. cerr << "data matrix: " << endl;
  303. printMatrix ( dataMatrix );
  304. cerr << endl;
  305. }
  306. double noise = 1.0;
  307. FastMinKernel fmk ( dataMatrix, noise );
  308. Vector alpha = Vector::UniformRandom( n, 0.0, 1.0, 0 );
  309. if ( verbose )
  310. std::cerr << "alpha = " << alpha << endl;
  311. // generate xstar
  312. Vector xstar (d);
  313. for ( uint i = 0 ; i < d ; i++ )
  314. if ( drand48() < sparse_prob ) {
  315. xstar[i] = 0;
  316. } else {
  317. xstar[i] = q.getPrototype( (rand() % numBins) );
  318. }
  319. // convert to STL vector
  320. vector<double> xstar_stl;
  321. xstar_stl.resize(d);
  322. for ( uint i = 0 ; i < d; i++ )
  323. xstar_stl[i] = xstar[i];
  324. if ( verbose )
  325. cerr << "xstar = " << xstar << endl;
  326. for ( double gamma = 1.0 ; gamma < 2.0; gamma += 0.5 )
  327. {
  328. if (verbose)
  329. std::cerr << "testing hik_kernel_sum_fast with ghik parameter: " << gamma << endl;
  330. PFAbsExp pf ( gamma );
  331. // pf.applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
  332. fmk.applyFunctionToFeatureMatrix( &pf );
  333. NICE::VVector A;
  334. NICE::VVector B;
  335. if (verbose)
  336. std::cerr << "fmk.hik_prepare_alpha_multiplications ( alpha, A, B ) " << std::endl;
  337. fmk.hik_prepare_alpha_multiplications ( alpha, A, B );
  338. if (verbose)
  339. //std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast( A, B, q )" << std::endl;
  340. std::cerr << "double *Tlookup = fmk.hik_prepare_alpha_multiplications_fast_alltogether( alpha, q, &pf )" << std::endl;
  341. double *TlookupOld = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, &pf );
  342. double *TlookupNew = fmk.hikPrepareLookupTable( alpha, q, &pf );
  343. int maxAcces(numBins*d);
  344. if (verbose)
  345. {
  346. std::cerr << "TlookupOld: " << std::endl;
  347. for (int i = 0; i < maxAcces; i++)
  348. {
  349. std::cerr << TlookupOld[i] << " ";
  350. if ( (i%numBins) == (numBins-1))
  351. std::cerr << std::endl;
  352. }
  353. std::cerr << "TlookupNew: " << std::endl;
  354. for (int i = 0; i < maxAcces; i++)
  355. {
  356. std::cerr << TlookupNew[i] << " ";
  357. if ( (i%numBins) == (numBins-1))
  358. std::cerr << std::endl;
  359. }
  360. }
  361. if (verbose)
  362. std::cerr << "fmk.hik_kernel_sum_fast ( Tlookup, q, xstar, beta_fast )" << std::endl;
  363. double beta_fast;
  364. fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar, beta_fast );
  365. NICE::SparseVector xstar_sparse(xstar);
  366. double beta_fast_sparse;
  367. fmk.hik_kernel_sum_fast ( TlookupNew, q, xstar_sparse, beta_fast_sparse );
  368. double betaSparse;
  369. fmk.hik_kernel_sum ( A, B, xstar_sparse, betaSparse, &pf );
  370. // checking the result
  371. std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
  372. transposeVectorOfVectors(dataMatrix_transposed);
  373. NICE::GeneralizedIntersectionKernelFunction<double> hikSlow (gamma);
  374. vector<double> kstar_stl = hikSlow.computeKernelVector ( dataMatrix_transposed, xstar_stl );
  375. double beta_slow = 0.0;
  376. for ( uint i = 0 ; i < n; i++ )
  377. beta_slow += kstar_stl[i] * alpha[i];
  378. if (verbose)
  379. std::cerr << "beta_slow: " << beta_slow << std::endl << "beta_fast: " << beta_fast << std::endl << "beta_fast_sparse: " << beta_fast_sparse << std::endl << "betaSparse: " << betaSparse<< std::endl;
  380. CPPUNIT_ASSERT_DOUBLES_EQUAL(beta_slow, beta_fast_sparse, 1e-8);
  381. delete [] TlookupNew;
  382. delete [] TlookupOld;
  383. }
  384. if (verboseStartEnd)
  385. std::cerr << "================== TestFastHIK::testKernelSumFast done ===================== " << std::endl;
  386. }
  387. void TestFastHIK::testLUTUpdate()
  388. {
  389. if (verboseStartEnd)
  390. std::cerr << "================== TestFastHIK::testLUTUpdate ===================== " << std::endl;
  391. Quantization q ( numBins );
  392. // data is generated, such that there is no approximation error
  393. vector< vector<double> > dataMatrix;
  394. for ( uint i = 0; i < d ; i++ )
  395. {
  396. vector<double> v;
  397. v.resize(n);
  398. for ( uint k = 0; k < n; k++ ) {
  399. if ( drand48() < sparse_prob ) {
  400. v[k] = 0;
  401. } else {
  402. v[k] = q.getPrototype( (rand() % numBins) );
  403. }
  404. }
  405. dataMatrix.push_back(v);
  406. }
  407. if ( verbose ) {
  408. cerr << "data matrix: " << endl;
  409. printMatrix ( dataMatrix );
  410. cerr << endl;
  411. }
  412. double noise = 1.0;
  413. FastMinKernel fmk ( dataMatrix, noise );
  414. ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
  415. Vector alpha ( n );
  416. for ( uint i = 0; i < alpha.size(); i++ )
  417. alpha[i] = sin(i);
  418. if (verbose)
  419. std::cerr << "prepare LUT" << std::endl;
  420. double * T = fmk.hikPrepareLookupTable(alpha, q, pf);
  421. if (verbose)
  422. std::cerr << "preparation done -- printing T" << std::endl;
  423. int maxAcces(numBins*d);
  424. if (verbose)
  425. {
  426. for (int i = 0; i < maxAcces; i++)
  427. {
  428. std::cerr << T[i] << " ";
  429. if ( (i%numBins) == (numBins-1))
  430. std::cerr << std::endl;
  431. }
  432. }
  433. //lets change index 2
  434. int idx(2);
  435. double valAlphaOld(alpha[idx]);
  436. double valAlphaNew(1.2); //this value is definitely different from the previous one
  437. Vector alphaNew(alpha);
  438. alphaNew[idx] = valAlphaNew;
  439. double * TNew = fmk.hikPrepareLookupTable(alphaNew, q, pf);
  440. if (verbose)
  441. std::cerr << "calculated the new LUT, no print it: " << std::endl;
  442. if (verbose)
  443. {
  444. for (int i = 0; i < maxAcces; i++)
  445. {
  446. std::cerr << TNew[i] << " ";
  447. if ( (i%numBins) == (numBins-1))
  448. std::cerr << std::endl;
  449. }
  450. }
  451. if (verbose)
  452. std::cerr << "change the old LUT by a new value for alpha_i" << std::endl;
  453. fmk.hikUpdateLookupTable(T, valAlphaNew, valAlphaOld, idx, q, pf );
  454. if (verbose)
  455. std::cerr << "update is done, now print the updated version: " << std::endl;
  456. if (verbose)
  457. {
  458. for (int i = 0; i < maxAcces; i++)
  459. {
  460. std::cerr << T[i] << " ";
  461. if ( (i%numBins) == (numBins-1))
  462. std::cerr << std::endl;
  463. }
  464. }
  465. bool equal = compareLUTs(T, TNew, q.size()*d, 10e-8);
  466. if (verbose)
  467. {
  468. if (equal)
  469. std::cerr << "LUTs are equal :) " << std::endl;
  470. else
  471. {
  472. std::cerr << "T are not equal :( " << std::endl;
  473. for (uint i = 0; i < q.size()*d; i++)
  474. {
  475. if ( (i % q.size()) == 0)
  476. std::cerr << std::endl;
  477. std::cerr << T[i] << " ";
  478. }
  479. std::cerr << "TNew: "<< std::endl;
  480. for (uint i = 0; i < q.size()*d; i++)
  481. {
  482. if ( (i % q.size()) == 0)
  483. std::cerr << std::endl;
  484. std::cerr << TNew[i] << " ";
  485. }
  486. }
  487. }
  488. CPPUNIT_ASSERT(equal == true);
  489. if (verboseStartEnd)
  490. std::cerr << "================== TestFastHIK::testLUTUpdate done ===================== " << std::endl;
  491. delete [] T;
  492. delete [] TNew;
  493. }
  494. void TestFastHIK::testLinSolve()
  495. {
  496. if (verboseStartEnd)
  497. std::cerr << "================== TestFastHIK::testLinSolve ===================== " << std::endl;
  498. Quantization q ( numBins );
  499. // data is generated, such that there is no approximation error
  500. vector< vector<double> > dataMatrix;
  501. for ( uint i = 0; i < d ; i++ )
  502. {
  503. vector<double> v;
  504. v.resize(n);
  505. for ( uint k = 0; k < n; k++ ) {
  506. if ( drand48() < sparse_prob ) {
  507. v[k] = 0;
  508. } else {
  509. v[k] = q.getPrototype( (rand() % numBins) );
  510. }
  511. }
  512. dataMatrix.push_back(v);
  513. }
  514. if ( verbose ) {
  515. cerr << "data matrix: " << endl;
  516. printMatrix ( dataMatrix );
  517. cerr << endl;
  518. }
  519. double noise = 1.0;
  520. FastMinKernel fmk ( dataMatrix, noise );
  521. ParameterizedFunction *pf = new PFAbsExp ( 1.0 );
  522. fmk.applyFunctionToFeatureMatrix( pf );
  523. // pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
  524. Vector y ( n );
  525. for ( uint i = 0; i < y.size(); i++ )
  526. y[i] = sin(i);
  527. Vector alpha;
  528. Vector alphaRandomized;
  529. std::cerr << "solveLin with randomization" << std::endl;
  530. // tic
  531. Timer t;
  532. t.start();
  533. //let's try to do 10.000 iterations and sample in each iteration 30 examples randomly
  534. fmk.solveLin(y,alphaRandomized,q,pf,true,solveLinMaxIterations,30);
  535. //toc
  536. t.stop();
  537. float time_randomizedSolving = t.getLast();
  538. std::cerr << "Time for solving with random subsets: " << time_randomizedSolving << " s" << std::endl;
  539. // test the case, where we first transform and then use the multiply stuff
  540. std::vector<std::vector<double> > dataMatrix_transposed (dataMatrix);
  541. transposeVectorOfVectors(dataMatrix_transposed);
  542. NICE::GeneralizedIntersectionKernelFunction<double> ghikSlow ( 1.0 );
  543. NICE::Matrix gK ( ghikSlow.computeKernelMatrix(dataMatrix_transposed, noise) );
  544. Vector K_alphaRandomized;
  545. K_alphaRandomized.multiply(gK, alphaRandomized);
  546. if (solveLinWithoutRand)
  547. {
  548. std::cerr << "solveLin without randomization" << std::endl;
  549. fmk.solveLin(y,alpha,q,pf,false,1000);
  550. Vector K_alpha;
  551. K_alpha.multiply(gK, alpha);
  552. std::cerr << "now assert that K_alpha == y" << std::endl;
  553. std::cerr << "(K_alpha-y).normL1(): " << (K_alpha-y).normL1() << std::endl;
  554. }
  555. // std::cerr << "alpha: " << alpha << std::endl;
  556. // std::cerr << "K_times_alpha: " << K_alpha << std::endl;
  557. // std::cerr << "y: " << y << std::endl;
  558. //
  559. // Vector test_alpha;
  560. // ILSConjugateGradients cgm;
  561. // cgm.solveLin( GMStandard(gK),y,test_alpha);
  562. //
  563. // K_alpha.multiply( gK, test_alpha);
  564. //
  565. // std::cerr << "test_alpha (CGM): " << test_alpha << std::endl;
  566. // std::cerr << "K_times_alpha (CGM): " << K_alpha << std::endl;
  567. std::cerr << "now assert that K_alphaRandomized == y" << std::endl;
  568. std::cerr << "(K_alphaRandomized-y).normL1(): " << (K_alphaRandomized-y).normL1() << std::endl;
  569. // CPPUNIT_ASSERT_DOUBLES_EQUAL((K_alphaRandomized-y).normL1(), 0.0, 1e-6);
  570. if (verboseStartEnd)
  571. std::cerr << "================== TestFastHIK::testLinSolve done ===================== " << std::endl;
  572. }
  573. void TestFastHIK::testKernelVector()
  574. {
  575. if (verboseStartEnd)
  576. std::cerr << "================== TestFastHIK::testKernelVector ===================== " << std::endl;
  577. std::vector< std::vector<double> > dataMatrix;
  578. std::vector<double> dim1; dim1.push_back(0.2);dim1.push_back(0.1);dim1.push_back(0.0);dim1.push_back(0.0);dim1.push_back(0.4); dataMatrix.push_back(dim1);
  579. std::vector<double> dim2; dim2.push_back(0.3);dim2.push_back(0.6);dim2.push_back(1.0);dim2.push_back(0.4);dim2.push_back(0.3); dataMatrix.push_back(dim2);
  580. std::vector<double> dim3; dim3.push_back(0.5);dim3.push_back(0.3);dim3.push_back(0.0);dim3.push_back(0.6);dim3.push_back(0.3); dataMatrix.push_back(dim3);
  581. if ( verbose ) {
  582. std::cerr << "data matrix: " << std::endl;
  583. printMatrix ( dataMatrix );
  584. std::cerr << endl;
  585. }
  586. double noise = 1.0;
  587. FastMinKernel fmk ( dataMatrix, noise );
  588. std::vector<double> xStar; xStar.push_back(0.2);xStar.push_back(0.7);xStar.push_back(0.1);
  589. NICE::Vector xStarVec (xStar);
  590. std::vector<double> x2; x2.push_back(0.7);x2.push_back(0.3);xStar.push_back(0.0);
  591. NICE::Vector x2Vec (x2);
  592. NICE::SparseVector xStarsparse( xStarVec );
  593. NICE::SparseVector x2sparse( x2Vec );
  594. NICE::Vector k1;
  595. fmk.hikComputeKernelVector( xStarsparse, k1 );
  596. NICE::Vector k2;
  597. fmk.hikComputeKernelVector( x2sparse, k2 );
  598. NICE::Vector k1GT(5); k1GT[0] = 0.6; k1GT[1] = 0.8; k1GT[2] = 0.7; k1GT[3] = 0.5; k1GT[4] = 0.6;
  599. NICE::Vector k2GT(5); k2GT[0] = 0.5; k2GT[1] = 0.4; k2GT[2] = 0.3; k2GT[3] = 0.3; k2GT[4] = 0.7;
  600. if (verbose)
  601. {
  602. std::cerr << "k1: " << k1 << std::endl;
  603. std::cerr << "GT: " << k1GT << std::endl;
  604. std::cerr << "k2: " << k2 << std::endl;
  605. std::cerr << "GT: " << k2GT << std::endl;
  606. }
  607. for (int i = 0; i < 5; i++)
  608. {
  609. CPPUNIT_ASSERT_DOUBLES_EQUAL(k1[i]-k1GT[i], 0.0, 1e-6);
  610. CPPUNIT_ASSERT_DOUBLES_EQUAL(k2[i]-k2GT[i], 0.0, 1e-6);
  611. }
  612. if (verboseStartEnd)
  613. std::cerr << "================== TestFastHIK::testKernelVector done ===================== " << std::endl;
  614. }
  615. void TestFastHIK::testAddExample()
  616. {
  617. if (verboseStartEnd)
  618. std::cerr << "================== TestFastHIK::testAddExample ===================== " << std::endl;
  619. std::vector< std::vector<double> > dataMatrix;
  620. int dim = 3;
  621. int number = 5;
  622. if (!smallTest)
  623. {
  624. dim = d;
  625. number = n;
  626. }
  627. if (smallTest)
  628. {
  629. dataMatrix.resize(3);
  630. //we explicitely give some values which can easily be verified
  631. dataMatrix[0].push_back(0.2);dataMatrix[0].push_back(0.1);dataMatrix[0].push_back(0.0);dataMatrix[0].push_back(0.0);dataMatrix[0].push_back(0.4);
  632. dataMatrix[1].push_back(0.3);dataMatrix[1].push_back(0.6);dataMatrix[1].push_back(1.0);dataMatrix[1].push_back(0.4);dataMatrix[1].push_back(0.3);
  633. dataMatrix[2].push_back(0.5);dataMatrix[2].push_back(0.3);dataMatrix[2].push_back(0.0);dataMatrix[2].push_back(0.6);dataMatrix[2].push_back(0.3);
  634. }
  635. else
  636. {
  637. // randomly generate features
  638. generateRandomFeatures ( dim, number, dataMatrix );
  639. // and make them sparse
  640. int nrZeros(0);
  641. for ( int i = 0 ; i < dim; i++ )
  642. {
  643. for ( int k = 0; k < number; k++ )
  644. if ( drand48() < sparse_prob )
  645. {
  646. dataMatrix[i][k] = 0.0;
  647. nrZeros++;
  648. }
  649. }
  650. }
  651. if ( verbose ) {
  652. std::cerr << "data matrix: " << std::endl;
  653. printMatrix ( dataMatrix );
  654. std::cerr << endl;
  655. }
  656. double noise = 1.0;
  657. //check the features stored in the fmk
  658. FastMinKernel fmk ( dataMatrix, noise );
  659. NICE::Vector alpha;
  660. ParameterizedFunction *pf = new PFAbsExp( 1.2 ); //1.0 is okay
  661. fmk.applyFunctionToFeatureMatrix( pf );
  662. // pf->applyFunctionToFeatureMatrix ( fmk.featureMatrix() );
  663. std::cerr << "generate alpha" << std::endl;
  664. if (smallTest)
  665. {
  666. //we explicitely give some values which can easily be verified
  667. alpha = Vector(5,1.0);
  668. alpha[0] = 0.1;alpha[1] = 0.2;alpha[2] = 0.4;alpha[3] = 0.8;alpha[4] = 1.6;
  669. }
  670. else
  671. { // randomly generate features
  672. alpha = Vector::UniformRandom( number, 0.0, 1.0, 0 );
  673. }
  674. std::cerr << "generate xStar" << std::endl;
  675. std::vector<double> xStar;
  676. if (smallTest)
  677. {
  678. // we check the following cases: largest elem in dim, smallest elem in dim, zero element
  679. // remember to adapt the feature in some lines apart as well
  680. xStar.push_back(0.9);xStar.push_back(0.0);xStar.push_back(0.1);
  681. }
  682. else
  683. {
  684. // again: random sampling
  685. for ( int i = 0 ; i < dim; i++ )
  686. {
  687. if ( drand48() < sparse_prob )
  688. xStar.push_back(0.0);
  689. else
  690. xStar.push_back(drand48());
  691. }
  692. }
  693. NICE::Vector xStarVec (xStar);
  694. NICE::SparseVector xStarSV (xStarVec);
  695. // check the alpha-preparations
  696. NICE::VVector A;
  697. NICE::VVector B;
  698. fmk.hik_prepare_alpha_multiplications( alpha, A, B );
  699. //check the quantization and LUT construction
  700. Quantization q ( numBins );
  701. //direct
  702. // double * LUT = fmk.hikPrepareLookupTable(alpha, q);
  703. //indirect
  704. double * LUT = fmk.hik_prepare_alpha_multiplications_fast( A, B, q, pf );
  705. //check for kernel vector norm approximation
  706. NICE::VVector AForKVN;
  707. fmk.hikPrepareKVNApproximation(AForKVN);
  708. //check the LUTs for fast kernel vector norm approximation
  709. //direct
  710. double* LUT_kernelVectorNormDirect = fmk.hikPrepareLookupTableForKVNApproximation(q, pf );
  711. //indirect
  712. double* LUT_kernelVectorNorm = fmk.hikPrepareKVNApproximationFast( AForKVN, q, pf );
  713. bool LUTKVN_equal( compareLUTs( LUT_kernelVectorNorm, LUT_kernelVectorNormDirect, q.size()*dim ) );
  714. if (verbose)
  715. {
  716. if (LUTKVN_equal == false)
  717. {
  718. std::cerr << "LUTKVN is not equal :( " << std::endl;
  719. std::cerr << "LUT_kernelVectorNorm: " << std::endl;
  720. for ( uint i = 0; i < q.size()*dim; i++ )
  721. {
  722. if ( (i % q.size()) == 0)
  723. std::cerr << std::endl;
  724. std::cerr << LUT_kernelVectorNorm[i] << " ";
  725. }
  726. std::cerr << "LUT_kernelVectorNormDirect: "<< std::endl;
  727. for ( uint i = 0; i < q.size()*dim; i++ )
  728. {
  729. if ( (i % q.size()) == 0)
  730. std::cerr << std::endl;
  731. std::cerr << LUT_kernelVectorNormDirect[i] << " ";
  732. }
  733. }
  734. }
  735. CPPUNIT_ASSERT( LUTKVN_equal == true );
  736. if (verbose)
  737. std::cerr << "start the incremental learning part" << std::endl;
  738. // ------ Incremental Learning -----
  739. double newAlpha;
  740. if (smallTest)
  741. newAlpha = 3.2;
  742. else
  743. newAlpha = drand48();
  744. alpha.append(newAlpha);
  745. // add an example
  746. if (verbose)
  747. std::cerr << "addExample" << std::endl;
  748. fmk.addExample( xStarSV, pf );
  749. // update the alpha preparation
  750. if (verbose)
  751. std::cerr << "update Alpha Preparation" << std::endl;
  752. fmk.updatePreparationForAlphaMultiplications( xStarSV, newAlpha, A, B, pf );
  753. // update the LUT for fast multiplications
  754. if (verbose)
  755. std::cerr << "update LUT" << std::endl;
  756. fmk.updateLookupTableForAlphaMultiplications( xStarSV, newAlpha, LUT, q, pf );
  757. //update VVector for Kernel vector norm
  758. if (verbose)
  759. std::cerr << "update VVector for Kernel vector norm" << std::endl;
  760. fmk.updatePreparationForKVNApproximation( xStarSV, AForKVN, pf );
  761. // update LUT for kernel vector norm
  762. if (verbose)
  763. std::cerr << "update LUT for kernel vector norm" << std::endl;
  764. fmk.updateLookupTableForKVNApproximation( xStarSV, LUT_kernelVectorNorm, q, pf );
  765. //and batch retraining
  766. if (verbose)
  767. std::cerr << "perform batch retraining " << std::endl;
  768. for ( int i = 0 ; i < dim; i++ )
  769. dataMatrix[i].push_back(xStar[i]);
  770. FastMinKernel fmk2 ( dataMatrix, noise );
  771. fmk2.applyFunctionToFeatureMatrix( pf );
  772. NICE::VVector A2;
  773. NICE::VVector B2;
  774. if (verbose)
  775. std::cerr << "prepare alpha multiplications" << std::endl;
  776. fmk2.hik_prepare_alpha_multiplications( alpha, A2, B2 );
  777. // compare the content of the data matrix
  778. if (verbose)
  779. std::cerr << "do the comparison of the resulting feature matrices" << std::endl;
  780. if (verbose)
  781. {
  782. std::cerr << "fmk.featureMatrix().print()" << std::endl;
  783. fmk.featureMatrix().print(std::cerr);
  784. std::cerr << "fmk2.featureMatrix().print()" << std::endl;
  785. fmk2.featureMatrix().print(std::cerr);
  786. }
  787. CPPUNIT_ASSERT(fmk.featureMatrix() == fmk2.featureMatrix());
  788. //compare the preparation for alpha multiplications
  789. if (verbose)
  790. std::cerr << "do the comparison of the resulting matrices A and B" << std::endl;
  791. CPPUNIT_ASSERT(compareVVector(A, A2));
  792. CPPUNIT_ASSERT(compareVVector(B, B2));
  793. if (verbose)
  794. {
  795. std::cerr << "compare the preparation for alpha multiplications" << std::endl;
  796. std::cerr << "A: " << std::endl;
  797. A.store(std::cerr);
  798. std::cerr << "A2: " << std::endl;
  799. A2.store(std::cerr);
  800. std::cerr << "B: " << std::endl;
  801. B.store(std::cerr);
  802. std::cerr << "B2: " << std::endl;
  803. B2.store(std::cerr);
  804. }
  805. // compare the resulting LUTs
  806. if (verbose)
  807. std::cerr << "prepare LUT" << std::endl;
  808. double * LUT2 = fmk2.hikPrepareLookupTable( alpha, q, pf );
  809. if (verbose)
  810. std::cerr << "do the comparison of the resulting LUTs" << std::endl;
  811. bool LUTequal( compareLUTs( LUT, LUT2, q.size()*dim) );
  812. if (verbose)
  813. {
  814. if ( LUTequal )
  815. std::cerr << "LUTs are equal :) " << std::endl;
  816. else
  817. {
  818. std::cerr << "LUTs are not equal :( " << std::endl;
  819. std::cerr << "new feature vector: " << xStarVec << std::endl;
  820. std::cerr << "newAlpha: " << newAlpha << " alpha " << alpha << std::endl;
  821. std::cerr << "LUT: " << std::endl;
  822. for ( uint i = 0; i < q.size()*dim; i++ )
  823. {
  824. if ( (i % q.size()) == 0)
  825. std::cerr << std::endl;
  826. std::cerr << LUT[i] << " ";
  827. }
  828. std::cerr << "LUT2: "<< std::endl;
  829. for ( uint i = 0; i < q.size()*dim; i++ )
  830. {
  831. if ( (i % q.size()) == 0)
  832. std::cerr << std::endl;
  833. std::cerr << LUT2[i] << " ";
  834. }
  835. }
  836. }
  837. CPPUNIT_ASSERT( LUTequal );
  838. //check for kernel vector norm approximation
  839. NICE::VVector A2ForKVN;
  840. fmk2.hikPrepareKVNApproximation( A2ForKVN );
  841. bool KVN_equal ( compareVVector(AForKVN, A2ForKVN) );
  842. if (verbose)
  843. {
  844. if ( KVN_equal )
  845. std::cerr << "VVectors for kernel vector norm are equal :) " << std::endl;
  846. else
  847. {
  848. std::cerr << "VVectors for vector norm are not equal :( " << std::endl;
  849. std::cerr << "new feature vector: " << xStarVec << std::endl;
  850. std::cerr << "AForKVN: " << std::endl;
  851. AForKVN.store(std::cerr);
  852. std::cerr << "A2ForKVN: "<< std::endl;
  853. A2ForKVN.store(std::cerr);
  854. }
  855. }
  856. CPPUNIT_ASSERT( KVN_equal );
  857. //check for kernel vector norm approximation with LUTs
  858. if (verbose)
  859. std::cerr << "prepare LUT for kernel vector norm" << std::endl;
  860. double* LUT2_kernelVectorNorm = fmk2.hikPrepareLookupTableForKVNApproximation( q, pf );
  861. if (verbose)
  862. std::cerr << "do the comparison of the resulting LUTs for kernel vector norm computation" << std::endl;
  863. bool LUT_KVN_equal( compareLUTs ( LUT_kernelVectorNorm, LUT2_kernelVectorNorm, q.size()*dim ) );
  864. if (verbose)
  865. {
  866. if ( LUT_KVN_equal )
  867. std::cerr << "LUTs for kernel vector norm are equal :) " << std::endl;
  868. else
  869. {
  870. std::cerr << "LUTs kernel vector norm are not equal :( " << std::endl;
  871. std::cerr << "new feature vector: " << xStarVec << std::endl;
  872. std::cerr << "LUT_kernelVectorNorm: " << std::endl;
  873. for ( int i = 0; i < q.size()*dim; i++ )
  874. {
  875. if ( (i % q.size()) == 0)
  876. std::cerr << std::endl;
  877. std::cerr << LUT_kernelVectorNorm[i] << " ";
  878. }
  879. std::cerr << std::endl << "LUT2_kernelVectorNorm: "<< std::endl;
  880. for ( uint i = 0; i < q.size()*dim; i++ )
  881. {
  882. if ( (i % q.size()) == 0)
  883. std::cerr << std::endl;
  884. std::cerr << LUT2_kernelVectorNorm[i] << " ";
  885. }
  886. }
  887. }
  888. CPPUNIT_ASSERT( LUT_KVN_equal );
  889. delete [] LUT;
  890. delete [] LUT2;
  891. delete [] LUT_kernelVectorNorm;
  892. delete [] LUT2_kernelVectorNorm;
  893. if (verboseStartEnd)
  894. std::cerr << "================== TestFastHIK::testAddExample done ===================== " << std::endl;
  895. }
  896. void TestFastHIK::testAddMultipleExamples()
  897. {
  898. if (verboseStartEnd)
  899. std::cerr << "================== TestFastHIK::testAddMultipleExamples ===================== " << std::endl;
  900. std::vector< std::vector<double> > dataMatrix;
  901. int dim = d;
  902. int number = n;
  903. // randomly generate features
  904. generateRandomFeatures ( dim, number, dataMatrix );
  905. // and make them sparse
  906. int nrZeros(0);
  907. for ( int i = 0 ; i < dim; i++ )
  908. {
  909. for ( int k = 0; k < number; k++ )
  910. if ( drand48() < sparse_prob )
  911. {
  912. dataMatrix[i][k] = 0.0;
  913. nrZeros++;
  914. }
  915. }
  916. if ( verbose ) {
  917. std::cerr << "data matrix: " << std::endl;
  918. printMatrix ( dataMatrix );
  919. std::cerr << endl;
  920. }
  921. double noise = 1.0;
  922. //check the features stored in the fmk
  923. FastMinKernel fmk ( dataMatrix, noise );
  924. NICE::Vector alpha;
  925. ParameterizedFunction *pf = new PFAbsExp( 1.0 ); //1.0 is okay
  926. fmk.applyFunctionToFeatureMatrix( pf );
  927. std::cerr << "generate alpha" << std::endl;
  928. // randomly generate features
  929. alpha = Vector::UniformRandom( number, 0.0, 1.0, 0 );
  930. /* // check the alpha-preparations
  931. NICE::VVector A;
  932. NICE::VVector B;
  933. fmk.hik_prepare_alpha_multiplications( alpha, A, B );*/
  934. if (verbose)
  935. std::cerr << "start the incremental learning part" << std::endl;
  936. // ------ Incremental Learning -----
  937. std::cerr << "generate xStar" << std::endl;
  938. std::vector<NICE::SparseVector > newExamples;
  939. int nrOfNewExamples(5);
  940. // again: random sampling
  941. for (int i = 0; i < nrOfNewExamples; i++)
  942. {
  943. NICE::Vector xStar(dim);
  944. for ( int j = 0 ; j < dim; j++ )
  945. {
  946. if ( drand48() < sparse_prob )
  947. {
  948. xStar[j] = 0.0;
  949. dataMatrix[j].push_back(0.0);
  950. }
  951. else
  952. {
  953. double tmp(drand48());
  954. xStar[j] = tmp;
  955. dataMatrix[j].push_back(tmp);
  956. }
  957. }
  958. NICE::SparseVector xStarSV (xStar);
  959. newExamples.push_back(xStarSV);
  960. }
  961. // add an example
  962. if (verbose)
  963. std::cerr << "addExample" << std::endl;
  964. for (int i = 0; i < nrOfNewExamples; i++)
  965. {
  966. fmk.addExample( newExamples[i], pf );
  967. }
  968. int oldSize(alpha.size());
  969. alpha.resize( oldSize + nrOfNewExamples);
  970. for (int i = 0; i < nrOfNewExamples; i++)
  971. {
  972. alpha[oldSize + i] = drand48();
  973. }
  974. // update the alpha preparation
  975. if (verbose)
  976. std::cerr << "update Alpha Preparation" << std::endl;
  977. // check the alpha-preparations
  978. NICE::VVector A;
  979. NICE::VVector B;
  980. fmk.hik_prepare_alpha_multiplications( alpha, A, B );
  981. FastMinKernel fmk2 ( dataMatrix, noise );
  982. fmk2.applyFunctionToFeatureMatrix( pf );
  983. NICE::VVector A2;
  984. NICE::VVector B2;
  985. fmk2.hik_prepare_alpha_multiplications( alpha, A2, B2 );
  986. bool equalA = compareVVector( A, A2 );
  987. bool equalB = compareVVector( B, B2 );
  988. CPPUNIT_ASSERT(equalA == true);
  989. CPPUNIT_ASSERT(equalB == true);
  990. if (verboseStartEnd)
  991. std::cerr << "================== TestFastHIK::testAddMultipleExamples done ===================== " << std::endl;
  992. }
  993. #endif