KernelData.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /**
  2. * @file KernelData.cpp
  3. * @brief caching some kernel data
  4. * @author Erik Rodner
  5. * @date 01/19/2010
  6. */
  7. #include <iostream>
  8. #include "KernelData.h"
  9. #include "core/algebra/CholeskyRobustAuto.h"
  10. #include "core/algebra/CholeskyRobust.h"
  11. #include "core/vector/Algorithms.h"
  12. #ifdef NICE_USELIB_OPENMP
  13. #include <omp.h> //for parallel processing
  14. #endif
  15. #include "vector"
  16. using namespace std;
  17. using namespace NICE;
  18. using namespace OBJREC;
  19. KernelData::KernelData()
  20. {
  21. // empty config
  22. Config conf;
  23. initFromConfig ( &conf, "DONTCARE" );
  24. }
  25. KernelData::KernelData ( const KernelData & src )
  26. {
  27. kernelMatrix = src.kernelMatrix;
  28. inverseKernelMatrix = src.inverseKernelMatrix;
  29. choleskyMatrix = src.choleskyMatrix;
  30. for ( map<int, NICE::Matrix *>::const_iterator i = src.cachedMatrices.begin();
  31. i != src.cachedMatrices.end(); i++ )
  32. {
  33. Matrix *M = new Matrix ( * ( i->second ) );
  34. cachedMatrices.insert ( pair<int, NICE::Matrix *> ( i->first, M ) );
  35. }
  36. logdet = src.logdet;
  37. verbose = src.verbose;
  38. cr = src.cr->clone();
  39. }
  40. KernelData::KernelData ( const Config *conf, const Matrix & kernelMatrix, const string & section )
  41. {
  42. initFromConfig ( conf, section );
  43. this->kernelMatrix = kernelMatrix;
  44. updateCholeskyFactorization();
  45. }
  46. KernelData::KernelData ( const Config *conf, const string & section )
  47. {
  48. initFromConfig ( conf, section );
  49. }
  50. void KernelData::initFromConfig ( const Config *conf, const string & section )
  51. {
  52. verbose = conf->gB ( section, "verbose", false );
  53. string inv_method = conf->gS ( section, "robust_cholesky", "auto" );
  54. double noiseStep = conf->gD ( section, "rchol_noise_variance", 1e-7 );
  55. double minimumLogDet = conf->gD ( section, "rchol_minimum_logdet", - std::numeric_limits<double>::max() );
  56. bool useCuda = conf->gB ( section, "rchol_cuda", true );
  57. if ( verbose && useCuda )
  58. std::cerr << "KernelData: using the cuda implementation of cholesky decomposition (might be inaccurate)" << std::endl;
  59. if ( inv_method == "auto" )
  60. {
  61. if ( verbose )
  62. std::cerr << "KernelData: using the cholesky method with automatic regularization" << std::endl;
  63. cr = new CholeskyRobustAuto ( verbose, noiseStep, minimumLogDet, useCuda );
  64. } else {
  65. if ( verbose )
  66. std::cerr << "KernelData: using the cholesky method with static regularization" << std::endl;
  67. cr = new CholeskyRobust ( verbose, noiseStep, useCuda );
  68. }
  69. }
  70. KernelData::~KernelData()
  71. {
  72. delete cr;
  73. }
  74. void KernelData::updateCholeskyFactorization ()
  75. {
  76. if ( verbose )
  77. std::cerr << "KernelData: kernel: " << kernelMatrix.rows() << " " << kernelMatrix.cols() << std::endl;
  78. if ( ( kernelMatrix.rows() <= 0 ) || ( kernelMatrix.cols() <= 0 ) )
  79. fthrow ( Exception, "KernelData: no kernel matrix available !" );
  80. if ( kernelMatrix.containsNaN() )
  81. {
  82. if ( verbose )
  83. std::cerr << "KernelData: kernel matrix contains NaNs (setting inverse to identity)" << std::endl;
  84. logdet = numeric_limits<double>::max();
  85. choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
  86. choleskyMatrix.setIdentity();
  87. } else {
  88. if ( verbose )
  89. std::cerr << "KernelData: calculating cholesky decomposition" << std::endl;
  90. cr->robustChol ( kernelMatrix, choleskyMatrix );
  91. logdet = cr->getLastLogDet();
  92. if ( !finite ( logdet ) )
  93. {
  94. choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
  95. choleskyMatrix.setIdentity();
  96. logdet = numeric_limits<double>::max();
  97. }
  98. }
  99. }
  100. void KernelData::updateInverseKernelMatrix ()
  101. {
  102. if ( ! hasCholeskyFactorization() )
  103. updateCholeskyFactorization();
  104. inverseKernelMatrix.resize ( choleskyMatrix.rows(), choleskyMatrix.cols() );
  105. choleskyInvertLargeScale ( choleskyMatrix, inverseKernelMatrix );
  106. }
  107. void KernelData::computeInverseKernelMultiply ( const Vector & x, Vector & result ) const
  108. {
  109. if ( choleskyMatrix.rows() == 0 )
  110. fthrow ( Exception, "Cholesky factorization was not initialized, use updateCholeskyFactorization() in advance" );
  111. choleskySolveLargeScale ( choleskyMatrix, x, result );
  112. }
  113. const NICE::Matrix & KernelData::getKernelMatrix() const
  114. {
  115. return kernelMatrix;
  116. }
  117. NICE::Matrix & KernelData::getKernelMatrix()
  118. {
  119. return kernelMatrix;
  120. }
  121. const NICE::Matrix & KernelData::getInverseKernelMatrix() const
  122. {
  123. return inverseKernelMatrix;
  124. };
  125. NICE::Matrix & KernelData::getInverseKernelMatrix()
  126. {
  127. return inverseKernelMatrix;
  128. };
  129. const NICE::Matrix & KernelData::getCholeskyMatrix() const
  130. {
  131. return choleskyMatrix;
  132. }
  133. const Matrix & KernelData::getCachedMatrix ( int i ) const
  134. {
  135. map<int, NICE::Matrix *>::const_iterator it = cachedMatrices.find ( i );
  136. if ( it != cachedMatrices.end() )
  137. return * ( it->second );
  138. else
  139. fthrow ( Exception, "Cached matrix with index " << i << " is not available." );
  140. }
  141. void KernelData::setCachedMatrix ( int i, Matrix *m )
  142. {
  143. cachedMatrices[i] = m;
  144. }
  145. uint KernelData::getKernelMatrixSize () const {
  146. uint mysize = ( kernelMatrix.rows() == 0 ) ? ( choleskyMatrix.rows() ) : kernelMatrix.rows();
  147. return mysize;
  148. };
  149. void KernelData::getLooEstimates ( const Vector & y, Vector & muLoo, Vector & sigmaLoo ) const
  150. {
  151. if ( inverseKernelMatrix.rows() != getKernelMatrixSize() )
  152. fthrow ( Exception, "updateInverseKernelMatrix() has to be called in advance to use this function\n" );
  153. if ( y.size() != inverseKernelMatrix.rows() )
  154. fthrow ( Exception, "inverse kernel matrix does not fit to the size of the vector of function values y\n" );
  155. Vector alpha;
  156. computeInverseKernelMultiply ( y, alpha );
  157. muLoo.resize ( y.size() );
  158. sigmaLoo.resize ( y.size() );
  159. for ( uint l = 0 ; l < y.size(); l++ )
  160. {
  161. sigmaLoo[l] = 1.0 / inverseKernelMatrix ( l, l );
  162. muLoo[l] = y[l] - alpha[l] * sigmaLoo[l];
  163. }
  164. }
  165. KernelData *KernelData::clone ( void ) const
  166. {
  167. return new KernelData ( *this );
  168. }
  169. /**
  170. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3)
  171. * @author Alexander Lütz
  172. * @date 01/12/2010 (dd/mm/yyyy)
  173. */
  174. void KernelData::getGPLikelihoodWithOneNewRow ( const NICE::Vector & y, const double & oldLogdetK, const int & rowIndex, const NICE::Vector & newRow, const Vector & oldAlpha , Vector & newAlpha, double & loglike )
  175. {
  176. // oldAlpha = K^{-1} y
  177. // K' new kernel matrix = exchange the row and column at position rowIndex
  178. // with newRow
  179. // try to find U and V such that K' = K + U*V with U,V having rank 2
  180. // rowIndex'th base vector
  181. Vector ei ( y.size(), 0.0 );
  182. ei[rowIndex] = 1.0;
  183. // we have to consider the diagonal entry
  184. Vector a = newRow - kernelMatrix.getRow ( rowIndex ) ;
  185. a[rowIndex] = a[rowIndex] / 2;
  186. NICE::Matrix U ( y.size(), 2 );
  187. NICE::Matrix V ( 2, y.size() );
  188. for ( uint i = 0; i < y.size(); i++ )
  189. {
  190. U ( i, 0 ) = a[i];
  191. U ( i, 1 ) = ei[i];
  192. V ( 0, i ) = ei[i];
  193. V ( 1, i ) = a[i];
  194. }
  195. // Sherman Woodbury-Morrison Formula:
  196. // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
  197. // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
  198. // = oldAlpha - B F^{-1} V oldAlpha
  199. // with B = K^{-1} U and F = (I+VB)
  200. // Time complexity: 2 choleskySolve calls: O(n^2)
  201. NICE::Vector B_1;
  202. computeInverseKernelMultiply ( U.getColumn ( 0 ), B_1 );
  203. NICE::Vector B_2;
  204. computeInverseKernelMultiply ( U.getColumn ( 1 ), B_2 );
  205. NICE::Matrix B ( y.size(), 2 );
  206. for ( uint i = 0; i < y.size(); i++ )
  207. {
  208. B ( i, 0 ) = B_1[i];
  209. B ( i, 1 ) = B_2[i];
  210. }
  211. NICE::Matrix F ( 2, 2 );
  212. F.setIdentity();
  213. Matrix V_B ( 2, 2 );
  214. V_B.multiply ( V, B );
  215. F += V_B;
  216. // Time complexity: 1 linear equation system with 2 variables
  217. // can be computed in O(1) using a fixed implementation
  218. NICE::Matrix F_inv = NICE::Matrix ( 2, 2 );
  219. double denominator = F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 );
  220. F_inv ( 0, 0 ) = F ( 1, 1 ) / denominator;
  221. F_inv ( 0, 1 ) = -F ( 0, 1 ) / denominator;
  222. F_inv ( 1, 0 ) = - F ( 1, 0 ) / denominator;
  223. F_inv ( 1, 1 ) = F ( 0, 0 ) / denominator;
  224. Matrix M_oldAlpha ( y.size(), 1 );
  225. for ( uint i = 0; i < y.size(); i++ )
  226. {
  227. M_oldAlpha ( i, 0 ) = oldAlpha[i];
  228. }
  229. NICE::Matrix V_oldAlpha;
  230. V_oldAlpha.multiply ( V, M_oldAlpha );
  231. NICE::Matrix F_inv_V_old_Alpha;
  232. F_inv_V_old_Alpha.multiply ( F_inv, V_oldAlpha );
  233. NICE::Matrix M_newAlpha;
  234. M_newAlpha.multiply ( B, F_inv_V_old_Alpha );
  235. M_newAlpha *= -1;
  236. M_newAlpha += M_oldAlpha;
  237. newAlpha = NICE::Vector ( y.size() );
  238. for ( uint i = 0; i < y.size(); i++ )
  239. {
  240. newAlpha[i] = M_newAlpha ( i, 0 );
  241. }
  242. // Matrix Determinant Lemma
  243. // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
  244. // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
  245. // logdet(K + U*V) = logdet( F ) + logdet(K)
  246. double logdetF = log ( F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 ) );
  247. double newLogdetK = logdetF + oldLogdetK;
  248. logdet = newLogdetK;
  249. loglike = newLogdetK + newAlpha.scalarProduct ( y );
  250. }
  251. /**
  252. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the first part. Usefull for multiclass-problems.
  253. * @author Alexander Lütz
  254. * @date 01/12/2010 (dd/mm/yyyy)
  255. */
  256. void KernelData::getGPLikelihoodWithOneNewRow_FirstPart ( const int & rowIndex, const NICE::Vector & newRow )
  257. {
  258. // oldAlpha = K^{-1} y
  259. // K' new kernel matrix = exchange the row and column at position rowIndex
  260. // with newRow
  261. // try to find U and V such that K' = K + U*V with U,V having rank 2
  262. // rowIndex'th base vector
  263. Vector ei ( newRow.size(), 0.0 );
  264. ei[rowIndex] = 1.0;
  265. // we have to consider the diagonal entry
  266. Vector a = newRow - kernelMatrix.getRow ( rowIndex ) ;
  267. a[rowIndex] = a[rowIndex] / 2;
  268. U.resize ( newRow.size(), 2 );
  269. V.resize ( 2, newRow.size() );
  270. // #pragma omp parallel for
  271. for ( uint i = 0; i < newRow.size(); i++ )
  272. {
  273. U ( i, 0 ) = a[i];
  274. U ( i, 1 ) = ei[i];
  275. V ( 0, i ) = ei[i];
  276. V ( 1, i ) = a[i];
  277. }
  278. if ( verbose )
  279. {
  280. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- U:" << std::endl;
  281. for ( uint ik = 0; ik < U.rows(); ik++ )
  282. {
  283. for ( uint jk = 0; jk < U.cols(); jk++ )
  284. {
  285. std::cerr << U ( ik, jk ) << " ";
  286. }
  287. std::cerr << std::endl;
  288. }
  289. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- V:" << std::endl;
  290. for ( uint ik = 0; ik < V.rows(); ik++ )
  291. {
  292. for ( uint jk = 0; jk < V.cols(); jk++ )
  293. {
  294. std::cerr << V ( ik, jk ) << " ";
  295. }
  296. std::cerr << std::endl;
  297. }
  298. }
  299. // Sherman Woodbury-Morrison Formula:
  300. // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
  301. // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
  302. // = oldAlpha - B F^{-1} V oldAlpha
  303. // with B = K^{-1} U and F = (I+VB)
  304. // Time complexity: 2 choleskySolve calls: O(n^2)
  305. NICE::Vector B_1;
  306. computeInverseKernelMultiply ( U.getColumn ( 0 ), B_1 );
  307. NICE::Vector B_2;
  308. computeInverseKernelMultiply ( U.getColumn ( 1 ), B_2 );
  309. B.resize ( newRow.size(), 2 );
  310. for ( uint i = 0; i < newRow.size(); i++ )
  311. {
  312. B ( i, 0 ) = B_1[i];
  313. B ( i, 1 ) = B_2[i];
  314. }
  315. if ( verbose )
  316. {
  317. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- B:" << std::endl;
  318. for ( uint ik = 0; ik < B.rows(); ik++ )
  319. {
  320. for ( uint jk = 0; jk < B.cols(); jk++ )
  321. {
  322. std::cerr << B ( ik, jk ) << " ";
  323. }
  324. std::cerr << std::endl;
  325. }
  326. }
  327. F.resize ( 2, 2 );
  328. F.setIdentity();
  329. Matrix V_B ( 2, 2 );
  330. V_B.multiply ( V, B );
  331. F += V_B;
  332. if ( verbose )
  333. {
  334. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F:" << std::endl;
  335. for ( uint ik = 0; ik < F.rows(); ik++ )
  336. {
  337. for ( uint jk = 0; jk < F.cols(); jk++ )
  338. {
  339. std::cerr << F ( ik, jk ) << " ";
  340. }
  341. std::cerr << std::endl;
  342. }
  343. }
  344. // Time complexity: 1 linear equation system with 2 variables
  345. // can be computed in O(1) using a fixed implementation
  346. F_inv.resize ( 2, 2 );
  347. double denominator = F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 );
  348. F_inv ( 0, 0 ) = F ( 1, 1 ) / denominator;
  349. F_inv ( 0, 1 ) = -F ( 0, 1 ) / denominator;
  350. F_inv ( 1, 0 ) = - F ( 1, 0 ) / denominator;
  351. F_inv ( 1, 1 ) = F ( 0, 0 ) / denominator;
  352. if ( verbose )
  353. {
  354. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F_inv:" << std::endl;
  355. for ( uint ik = 0; ik < F_inv.rows(); ik++ )
  356. {
  357. for ( uint jk = 0; jk < F_inv.cols(); jk++ )
  358. {
  359. std::cerr << F_inv ( ik, jk ) << " ";
  360. }
  361. std::cerr << std::endl;
  362. }
  363. NICE::Matrix MultiplicationResult ( F_inv.cols(), F_inv.cols(), 0.0 );
  364. MultiplicationResult.multiply ( F, F_inv );
  365. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F-inversion MultiplicationResult:" << std::endl;
  366. for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
  367. {
  368. for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++ )
  369. {
  370. std::cerr << MultiplicationResult ( ik, jk ) << " ";
  371. }
  372. std::cerr << std::endl;
  373. }
  374. }
  375. }
  376. /**
  377. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the second part. Usefull for multiclass-problems.
  378. * @author Alexander Lütz
  379. * @date 01/12/2010 (dd/mm/yyyy)
  380. */
  381. void KernelData::getGPLikelihoodWithOneNewRow_SecondPart ( const NICE::Vector & y, const double & oldLogdetK, const Vector & oldAlpha , Vector & newAlpha, double & loglike )
  382. {
  383. Matrix M_oldAlpha ( y.size(), 1 );
  384. for ( uint i = 0; i < y.size(); i++ )
  385. {
  386. M_oldAlpha ( i, 0 ) = oldAlpha[i];
  387. }
  388. NICE::Matrix V_oldAlpha;
  389. V_oldAlpha.multiply ( V, M_oldAlpha );
  390. NICE::Matrix F_inv_V_old_Alpha;
  391. F_inv_V_old_Alpha.multiply ( F_inv, V_oldAlpha );
  392. NICE::Matrix M_newAlpha;
  393. M_newAlpha.multiply ( B, F_inv_V_old_Alpha );
  394. M_newAlpha *= -1;
  395. M_newAlpha += M_oldAlpha;
  396. newAlpha = NICE::Vector ( y.size() );
  397. for ( uint i = 0; i < y.size(); i++ )
  398. {
  399. newAlpha[i] = M_newAlpha ( i, 0 );
  400. }
  401. // Matrix Determinant Lemma
  402. // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
  403. // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
  404. // logdet(K + U*V) = logdet( F ) + logdet(K)
  405. double logdetF = log ( F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 ) );
  406. double newLogdetK = logdetF + oldLogdetK;
  407. logdet = newLogdetK;
  408. loglike = newLogdetK + newAlpha.scalarProduct ( y );
  409. }
  410. /**
  411. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3).
  412. * @author Alexander Lütz
  413. * @date 01/09/2011 (dd/mm/yyyy)
  414. */
  415. void KernelData::perform_Rank_2_Update ( const int & rowIndex, const NICE::Vector & newRow )
  416. {
  417. getGPLikelihoodWithOneNewRow_FirstPart ( rowIndex, newRow );
  418. Matrix prod_1;
  419. prod_1.multiply ( V, inverseKernelMatrix );
  420. if ( verbose )
  421. {
  422. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_1:" << std::endl;
  423. for ( uint ik = 0; ik < prod_1.rows(); ik++ )
  424. {
  425. for ( uint jk = 0; jk < prod_1.cols(); jk++ )
  426. {
  427. std::cerr << prod_1 ( ik, jk ) << " ";
  428. }
  429. std::cerr << std::endl;
  430. }
  431. }
  432. Matrix prod_2;
  433. prod_2.multiply ( F_inv, prod_1 );
  434. if ( verbose )
  435. {
  436. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_2:" << std::endl;
  437. for ( uint ik = 0; ik < prod_2.rows(); ik++ )
  438. {
  439. for ( uint jk = 0; jk < prod_2.cols(); jk++ )
  440. {
  441. std::cerr << prod_2 ( ik, jk ) << " ";
  442. }
  443. std::cerr << std::endl;
  444. }
  445. }
  446. Matrix prod_3;
  447. prod_3.multiply ( B, prod_2 );
  448. if ( verbose )
  449. {
  450. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_3:" << std::endl;
  451. for ( uint ik = 0; ik < prod_3.rows(); ik++ )
  452. {
  453. for ( uint jk = 0; jk < prod_3.cols(); jk++ )
  454. {
  455. std::cerr << prod_3 ( ik, jk ) << " ";
  456. }
  457. std::cerr << std::endl;
  458. }
  459. }
  460. inverseKernelMatrix = inverseKernelMatrix - prod_3;
  461. // std::cerr << "perform rank 2 update: inverseKernelMatrix: " << inverseKernelMatrix << std::endl;
  462. //correct the stored kernel matrix after our computations
  463. for ( uint i = 0; i < newRow.size(); i++ )
  464. {
  465. kernelMatrix ( i, rowIndex ) = newRow[i];
  466. kernelMatrix ( rowIndex, i ) = newRow[i];
  467. }
  468. // std::cerr << "kernelMatrix: " << kernelMatrix << std::endl << " newRow: " << newRow << std::endl;
  469. }
  470. /**
  471. * @brief Updates the GP-Likelihood, if only k rows and colums are changed. Time is O(k^3+n^2) instaed O(n^3). Alternatively it could also be done with iteratively change one row, which leads to O(k*n^2).
  472. * @author Alexander Lütz
  473. * @date 01/09/2011 (dd/mm/yyyy)
  474. */
  475. void KernelData::perform_Rank_2k_Update ( const std::vector<int> & rowIndices, const std::vector<NICE::Vector> & newRows )
  476. {
  477. if ( ( rowIndices.size() != 0 ) && ( rowIndices.size() == newRows.size() ) )
  478. {
  479. std::vector<NICE::Vector> unity_vectors;
  480. std::vector<NICE::Vector> diff_vectors;
  481. for ( uint j = 0; j < rowIndices.size(); j++ )
  482. {
  483. NICE::Vector unity_vector ( newRows[0].size(), 0.0 );
  484. unity_vector[rowIndices[j] ] = 1.0;
  485. unity_vectors.push_back ( unity_vector );
  486. NICE::Vector a = newRows[j] - kernelMatrix.getRow ( rowIndices[j] );
  487. for ( uint x = 0; x < rowIndices.size(); x++ )
  488. {
  489. a[rowIndices[x] ] /= 2.0;
  490. }
  491. diff_vectors.push_back ( a );
  492. }
  493. U.resize ( newRows[0].size(), 2*rowIndices.size() );
  494. V.resize ( 2*rowIndices.size(), newRows[0].size() );
  495. for ( uint i = 0; i < newRows[0].size(); i++ )
  496. {
  497. for ( uint j = 0; j < rowIndices.size(); j++ )
  498. {
  499. U ( i, rowIndices.size() + j ) = ( unity_vectors[j] ) [i];
  500. U ( i, j ) = ( diff_vectors[j] ) [i];
  501. V ( rowIndices.size() + j, i ) = ( diff_vectors[j] ) [i];
  502. V ( j, i ) = ( unity_vectors[j] ) [i];
  503. }
  504. }
  505. if ( verbose )
  506. {
  507. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- U: " << U << std::endl;
  508. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- V: " << V << std::endl;
  509. }
  510. NICE::Matrix UV ( newRows[0].size(), newRows[0].size() );
  511. UV.multiply ( U, V );
  512. if ( verbose )
  513. {
  514. // we have to consider the entries which are added twice
  515. for ( int x = 0; x < ( int ) rowIndices.size(); x++ )
  516. {
  517. for ( int y = x; y < ( int ) rowIndices.size(); y++ )
  518. {
  519. UV ( rowIndices[x] , rowIndices[y] ) /= 2.0;
  520. if ( x != y )
  521. UV ( rowIndices[y] , rowIndices[x] ) /= 2.0;
  522. }
  523. }
  524. }
  525. if ( verbose )
  526. {
  527. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- UV:" << UV << std::endl;
  528. }
  529. B.resize ( newRows[0].size(), 2*rowIndices.size() );
  530. for ( uint j = 0; j < 2*rowIndices.size(); j++ )
  531. {
  532. NICE::Vector B_row;
  533. computeInverseKernelMultiply ( U.getColumn ( j ), B_row );
  534. for ( uint i = 0; i < newRows[0].size(); i++ )
  535. {
  536. B ( i, j ) = B_row[i];
  537. }
  538. }
  539. if ( verbose )
  540. {
  541. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- B:" << B << std::endl;
  542. }
  543. F.resize ( 2*rowIndices.size(), 2*rowIndices.size() );
  544. F.setIdentity();
  545. Matrix V_B ( 2*rowIndices.size(), 2*rowIndices.size() );
  546. V_B.multiply ( V, B );
  547. F += V_B;
  548. if ( verbose )
  549. {
  550. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F:" << F << std::endl;
  551. }
  552. //invert F!
  553. //we can't rely on methods like cholesky decomposition, since F has not to be positive definite
  554. F_inv = invert ( F );
  555. if ( verbose )
  556. {
  557. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F_inv:" << F_inv <<std::endl;
  558. }
  559. if ( verbose )
  560. {
  561. NICE::Matrix MultiplicationResult ( F.rows(), F_inv.cols(), 0.0 );
  562. MultiplicationResult.multiply ( F, F_inv );
  563. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F-inversion MultiplicationResult:" << MultiplicationResult << std::endl;
  564. }
  565. Matrix prod_1;
  566. prod_1.multiply ( V, inverseKernelMatrix );
  567. std::cerr << "prod_1: " << prod_1.rows() << " x " << prod_1.cols() << std::endl;
  568. std::cerr << "v and inverse matrix multiplied" << std::endl;
  569. if ( verbose )
  570. {
  571. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_1:" << prod_1 << std::endl;
  572. }
  573. Matrix prod_2;
  574. prod_2.resize(F.rows(), prod_1.cols());
  575. prod_2.multiply ( F_inv, prod_1 );
  576. if ( verbose )
  577. {
  578. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_2:" << prod_2 << std::endl;
  579. }
  580. std::cerr << "B: " << B.rows() << " x " << B.cols() << std::endl;
  581. Matrix prod_3;
  582. prod_3.multiply ( B, prod_2 );
  583. std::cerr << "prod_3 created: " << prod_3.rows() << " x " << prod_3.cols() << std::endl;
  584. if ( verbose )
  585. {
  586. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_3:" << prod_3 << std::endl;
  587. }
  588. inverseKernelMatrix = inverseKernelMatrix - prod_3;
  589. //remember the new kernel entries for the next time
  590. for ( uint i = 0; i < rowIndices.size(); i++ )
  591. {
  592. for ( uint ik = 0; ik < kernelMatrix.rows(); ik++ )
  593. {
  594. kernelMatrix ( ik, rowIndices[i] ) = ( newRows[i] ) [ik];
  595. kernelMatrix ( rowIndices[i], ik ) = ( newRows[i] ) [ik];
  596. }
  597. }
  598. }
  599. else
  600. {
  601. std::cerr << "Failure" << std::endl;
  602. }
  603. }
  604. void KernelData::delete_one_row ( const int & rowIndex )
  605. {
  606. if ( ( inverseKernelMatrix.rows() != 0 ) && ( inverseKernelMatrix.cols() != 0 ) )
  607. {
  608. inverseKernelMatrix.deleteCol ( rowIndex );
  609. inverseKernelMatrix.deleteRow ( rowIndex );
  610. }
  611. if ( ( choleskyMatrix.rows() != 0 ) && ( choleskyMatrix.cols() != 0 ) )
  612. {
  613. choleskyMatrix.deleteCol ( rowIndex );
  614. choleskyMatrix.deleteRow ( rowIndex );
  615. }
  616. if ( ( kernelMatrix.rows() != 0 ) && ( kernelMatrix.cols() != 0 ) )
  617. {
  618. kernelMatrix.deleteCol ( rowIndex );
  619. kernelMatrix.deleteRow ( rowIndex );
  620. }
  621. }
  622. void KernelData::delete_multiple_rows ( std::vector<int> & indices )
  623. {
  624. if ( ( inverseKernelMatrix.rows() >= indices.size() ) && ( inverseKernelMatrix.cols() >= indices.size() ) )
  625. {
  626. inverseKernelMatrix.deleteCols ( indices );
  627. inverseKernelMatrix.deleteRows ( indices );
  628. }
  629. if ( ( choleskyMatrix.rows() >= indices.size() ) && ( choleskyMatrix.cols() >= indices.size() ) )
  630. {
  631. choleskyMatrix.deleteCols ( indices );
  632. choleskyMatrix.deleteRows ( indices );
  633. }
  634. if ( ( kernelMatrix.rows() >= indices.size() ) && ( kernelMatrix.cols() >= indices.size() ) )
  635. {
  636. kernelMatrix.deleteCols ( indices );
  637. kernelMatrix.deleteRows ( indices );
  638. }
  639. }
  640. void KernelData::setKernelMatrix ( const NICE::Matrix & k_matrix )
  641. {
  642. kernelMatrix = k_matrix;
  643. }
  644. void KernelData::increase_size_by_One()
  645. {
  646. NICE::Matrix new_Kernel ( kernelMatrix.rows() + 1, kernelMatrix.cols() + 1 );
  647. new_Kernel.setBlock ( 0, 0, kernelMatrix );
  648. for ( uint i = 0; i < kernelMatrix.rows() - 1; i++ )
  649. {
  650. new_Kernel ( i, kernelMatrix.cols() ) = 0.0;
  651. new_Kernel ( kernelMatrix.rows(), i ) = 0.0;
  652. }
  653. new_Kernel ( kernelMatrix.rows(), kernelMatrix.cols() ) = 1.0;
  654. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  655. kernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  656. kernelMatrix = new_Kernel;
  657. new_Kernel.setBlock ( 0, 0, inverseKernelMatrix );
  658. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  659. inverseKernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  660. inverseKernelMatrix = new_Kernel;
  661. new_Kernel.setBlock ( 0, 0, choleskyMatrix );
  662. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  663. choleskyMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  664. choleskyMatrix = new_Kernel;
  665. }
  666. void KernelData::increase_size_by_k ( const uint & k )
  667. {
  668. NICE::Matrix new_Kernel ( kernelMatrix.rows() + k, kernelMatrix.cols() + k );
  669. new_Kernel.setBlock ( 0, 0, kernelMatrix );
  670. for ( uint i = 0; i < kernelMatrix.rows() - 1; i++ )
  671. {
  672. for ( uint j = 0; j < k; j++ )
  673. {
  674. new_Kernel ( i, kernelMatrix.cols() + j ) = 0.0;
  675. new_Kernel ( kernelMatrix.rows() + j, i ) = 0.0;
  676. }
  677. }
  678. for ( uint j = 0; j < k; j++ )
  679. {
  680. new_Kernel ( kernelMatrix.rows() + j, kernelMatrix.cols() + j ) = 1.0;
  681. }
  682. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  683. kernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  684. kernelMatrix = new_Kernel;
  685. new_Kernel.setBlock ( 0, 0, inverseKernelMatrix );
  686. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  687. inverseKernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  688. inverseKernelMatrix = new_Kernel;
  689. new_Kernel.setBlock ( 0, 0, choleskyMatrix );
  690. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  691. choleskyMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  692. choleskyMatrix = new_Kernel;
  693. }