KernelData.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. /**
  2. * @file KernelData.cpp
  3. * @brief caching some kernel data
  4. * @author Erik Rodner
  5. * @date 01/19/2010
  6. */
  7. #include <iostream>
  8. #include "KernelData.h"
  9. #include "core/algebra/CholeskyRobustAuto.h"
  10. #include "core/algebra/CholeskyRobust.h"
  11. #include "core/vector/Algorithms.h"
  12. #ifdef NICE_USELIB_OPENMP
  13. #include <omp.h> //for parallel processing
  14. #endif
  15. #include "vector"
  16. using namespace std;
  17. using namespace NICE;
  18. using namespace OBJREC;
  19. KernelData::KernelData()
  20. {
  21. // empty config
  22. Config conf;
  23. initFromConfig ( &conf, "DONTCARE" );
  24. }
  25. KernelData::KernelData ( const KernelData & src )
  26. {
  27. kernelMatrix = src.kernelMatrix;
  28. inverseKernelMatrix = src.inverseKernelMatrix;
  29. choleskyMatrix = src.choleskyMatrix;
  30. for ( map<int, NICE::Matrix *>::const_iterator i = src.cachedMatrices.begin();
  31. i != src.cachedMatrices.end(); i++ )
  32. {
  33. Matrix *M = new Matrix ( * ( i->second ) );
  34. cachedMatrices.insert ( pair<int, NICE::Matrix *> ( i->first, M ) );
  35. }
  36. logdet = src.logdet;
  37. verbose = src.verbose;
  38. cr = src.cr->clone();
  39. }
  40. KernelData::KernelData ( const Config *conf, const Matrix & kernelMatrix, const string & section, bool updateCholesky )
  41. {
  42. initFromConfig ( conf, section );
  43. this->kernelMatrix = kernelMatrix;
  44. if ( updateCholesky )
  45. updateCholeskyFactorization();
  46. }
  47. KernelData::KernelData ( const Config *conf, const string & section )
  48. {
  49. initFromConfig ( conf, section );
  50. }
  51. void KernelData::initFromConfig ( const Config *conf, const string & section )
  52. {
  53. verbose = conf->gB ( section, "verbose", false );
  54. string inv_method = conf->gS ( section, "robust_cholesky", "auto" );
  55. double noiseStep = conf->gD ( section, "rchol_noise_variance", 1e-7 );
  56. double minimumLogDet = conf->gD ( section, "rchol_minimum_logdet", - std::numeric_limits<double>::max() );
  57. bool useCuda = conf->gB ( section, "rchol_cuda", true );
  58. if ( verbose && useCuda )
  59. std::cerr << "KernelData: using the cuda implementation of cholesky decomposition (might be inaccurate)" << std::endl;
  60. if ( inv_method == "auto" )
  61. {
  62. if ( verbose )
  63. std::cerr << "KernelData: using the cholesky method with automatic regularization" << std::endl;
  64. cr = new CholeskyRobustAuto ( verbose, noiseStep, minimumLogDet, useCuda );
  65. } else {
  66. if ( verbose )
  67. std::cerr << "KernelData: using the cholesky method with static regularization" << std::endl;
  68. cr = new CholeskyRobust ( verbose, noiseStep, useCuda );
  69. }
  70. }
  71. KernelData::~KernelData()
  72. {
  73. delete cr;
  74. }
  75. void KernelData::updateCholeskyFactorization ()
  76. {
  77. if ( verbose )
  78. std::cerr << "KernelData: kernel: " << kernelMatrix.rows() << " " << kernelMatrix.cols() << std::endl;
  79. if ( ( kernelMatrix.rows() <= 0 ) || ( kernelMatrix.cols() <= 0 ) )
  80. fthrow ( Exception, "KernelData: no kernel matrix available !" );
  81. if ( kernelMatrix.containsNaN() )
  82. {
  83. if ( verbose )
  84. std::cerr << "KernelData: kernel matrix contains NaNs (setting inverse to identity)" << std::endl;
  85. logdet = numeric_limits<double>::max();
  86. choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
  87. choleskyMatrix.setIdentity();
  88. } else {
  89. if ( verbose )
  90. std::cerr << "KernelData: calculating cholesky decomposition" << std::endl;
  91. cr->robustChol ( kernelMatrix, choleskyMatrix );
  92. logdet = cr->getLastLogDet();
  93. if ( !finite ( logdet ) )
  94. {
  95. choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
  96. choleskyMatrix.setIdentity();
  97. logdet = numeric_limits<double>::max();
  98. }
  99. }
  100. }
  101. void KernelData::updateInverseKernelMatrix ()
  102. {
  103. if ( ! hasCholeskyFactorization() )
  104. updateCholeskyFactorization();
  105. inverseKernelMatrix.resize ( choleskyMatrix.rows(), choleskyMatrix.cols() );
  106. choleskyInvertLargeScale ( choleskyMatrix, inverseKernelMatrix );
  107. }
  108. void KernelData::computeInverseKernelMultiply ( const Vector & x, Vector & result ) const
  109. {
  110. if ( choleskyMatrix.rows() == 0 )
  111. fthrow ( Exception, "Cholesky factorization was not initialized, use updateCholeskyFactorization() in advance" );
  112. choleskySolveLargeScale ( choleskyMatrix, x, result );
  113. }
  114. const NICE::Matrix & KernelData::getKernelMatrix() const
  115. {
  116. return kernelMatrix;
  117. }
  118. NICE::Matrix & KernelData::getKernelMatrix()
  119. {
  120. return kernelMatrix;
  121. }
  122. const NICE::Matrix & KernelData::getInverseKernelMatrix() const
  123. {
  124. return inverseKernelMatrix;
  125. };
  126. NICE::Matrix & KernelData::getInverseKernelMatrix()
  127. {
  128. return inverseKernelMatrix;
  129. };
  130. const NICE::Matrix & KernelData::getCholeskyMatrix() const
  131. {
  132. return choleskyMatrix;
  133. }
  134. const Matrix & KernelData::getCachedMatrix ( int i ) const
  135. {
  136. map<int, NICE::Matrix *>::const_iterator it = cachedMatrices.find ( i );
  137. if ( it != cachedMatrices.end() )
  138. return * ( it->second );
  139. else
  140. fthrow ( Exception, "Cached matrix with index " << i << " is not available." );
  141. }
  142. void KernelData::setCachedMatrix ( int i, Matrix *m )
  143. {
  144. cachedMatrices[i] = m;
  145. }
  146. uint KernelData::getKernelMatrixSize () const {
  147. uint mysize = ( kernelMatrix.rows() == 0 ) ? ( choleskyMatrix.rows() ) : kernelMatrix.rows();
  148. return mysize;
  149. };
  150. void KernelData::getLooEstimates ( const Vector & y, Vector & muLoo, Vector & sigmaLoo ) const
  151. {
  152. if ( inverseKernelMatrix.rows() != getKernelMatrixSize() )
  153. fthrow ( Exception, "updateInverseKernelMatrix() has to be called in advance to use this function\n" );
  154. if ( y.size() != inverseKernelMatrix.rows() )
  155. fthrow ( Exception, "inverse kernel matrix does not fit to the size of the vector of function values y\n" );
  156. Vector alpha;
  157. computeInverseKernelMultiply ( y, alpha );
  158. muLoo.resize ( y.size() );
  159. sigmaLoo.resize ( y.size() );
  160. for ( uint l = 0 ; l < y.size(); l++ )
  161. {
  162. sigmaLoo[l] = 1.0 / inverseKernelMatrix ( l, l );
  163. muLoo[l] = y[l] - alpha[l] * sigmaLoo[l];
  164. }
  165. }
  166. KernelData *KernelData::clone ( void ) const
  167. {
  168. return new KernelData ( *this );
  169. }
  170. /**
  171. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3)
  172. * @author Alexander Lütz
  173. * @date 01/12/2010 (dd/mm/yyyy)
  174. */
  175. void KernelData::getGPLikelihoodWithOneNewRow ( const NICE::Vector & y, const double & oldLogdetK, const int & rowIndex, const NICE::Vector & newRow, const Vector & oldAlpha , Vector & newAlpha, double & loglike )
  176. {
  177. // oldAlpha = K^{-1} y
  178. // K' new kernel matrix = exchange the row and column at position rowIndex
  179. // with newRow
  180. // try to find U and V such that K' = K + U*V with U,V having rank 2
  181. // rowIndex'th base vector
  182. Vector ei ( y.size(), 0.0 );
  183. ei[rowIndex] = 1.0;
  184. // we have to consider the diagonal entry
  185. Vector a = newRow - kernelMatrix.getRow ( rowIndex ) ;
  186. a[rowIndex] = a[rowIndex] / 2;
  187. NICE::Matrix U ( y.size(), 2 );
  188. NICE::Matrix V ( 2, y.size() );
  189. for ( uint i = 0; i < y.size(); i++ )
  190. {
  191. U ( i, 0 ) = a[i];
  192. U ( i, 1 ) = ei[i];
  193. V ( 0, i ) = ei[i];
  194. V ( 1, i ) = a[i];
  195. }
  196. // Sherman Woodbury-Morrison Formula:
  197. // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
  198. // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
  199. // = oldAlpha - B F^{-1} V oldAlpha
  200. // with B = K^{-1} U and F = (I+VB)
  201. // Time complexity: 2 choleskySolve calls: O(n^2)
  202. NICE::Vector B_1;
  203. computeInverseKernelMultiply ( U.getColumn ( 0 ), B_1 );
  204. NICE::Vector B_2;
  205. computeInverseKernelMultiply ( U.getColumn ( 1 ), B_2 );
  206. NICE::Matrix B ( y.size(), 2 );
  207. for ( uint i = 0; i < y.size(); i++ )
  208. {
  209. B ( i, 0 ) = B_1[i];
  210. B ( i, 1 ) = B_2[i];
  211. }
  212. NICE::Matrix F ( 2, 2 );
  213. F.setIdentity();
  214. Matrix V_B ( 2, 2 );
  215. V_B.multiply ( V, B );
  216. F += V_B;
  217. // Time complexity: 1 linear equation system with 2 variables
  218. // can be computed in O(1) using a fixed implementation
  219. NICE::Matrix F_inv = NICE::Matrix ( 2, 2 );
  220. double denominator = F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 );
  221. F_inv ( 0, 0 ) = F ( 1, 1 ) / denominator;
  222. F_inv ( 0, 1 ) = -F ( 0, 1 ) / denominator;
  223. F_inv ( 1, 0 ) = - F ( 1, 0 ) / denominator;
  224. F_inv ( 1, 1 ) = F ( 0, 0 ) / denominator;
  225. Matrix M_oldAlpha ( y.size(), 1 );
  226. for ( uint i = 0; i < y.size(); i++ )
  227. {
  228. M_oldAlpha ( i, 0 ) = oldAlpha[i];
  229. }
  230. NICE::Matrix V_oldAlpha;
  231. V_oldAlpha.multiply ( V, M_oldAlpha );
  232. NICE::Matrix F_inv_V_old_Alpha;
  233. F_inv_V_old_Alpha.multiply ( F_inv, V_oldAlpha );
  234. NICE::Matrix M_newAlpha;
  235. M_newAlpha.multiply ( B, F_inv_V_old_Alpha );
  236. M_newAlpha *= -1;
  237. M_newAlpha += M_oldAlpha;
  238. newAlpha = NICE::Vector ( y.size() );
  239. for ( uint i = 0; i < y.size(); i++ )
  240. {
  241. newAlpha[i] = M_newAlpha ( i, 0 );
  242. }
  243. // Matrix Determinant Lemma
  244. // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
  245. // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
  246. // logdet(K + U*V) = logdet( F ) + logdet(K)
  247. double logdetF = log ( F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 ) );
  248. double newLogdetK = logdetF + oldLogdetK;
  249. logdet = newLogdetK;
  250. loglike = newLogdetK + newAlpha.scalarProduct ( y );
  251. }
  252. /**
  253. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the first part. Usefull for multiclass-problems.
  254. * @author Alexander Lütz
  255. * @date 01/12/2010 (dd/mm/yyyy)
  256. */
  257. void KernelData::getGPLikelihoodWithOneNewRow_FirstPart ( const int & rowIndex, const NICE::Vector & newRow )
  258. {
  259. // oldAlpha = K^{-1} y
  260. // K' new kernel matrix = exchange the row and column at position rowIndex
  261. // with newRow
  262. // try to find U and V such that K' = K + U*V with U,V having rank 2
  263. // rowIndex'th base vector
  264. Vector ei ( newRow.size(), 0.0 );
  265. ei[rowIndex] = 1.0;
  266. // we have to consider the diagonal entry
  267. Vector a = newRow - kernelMatrix.getRow ( rowIndex ) ;
  268. a[rowIndex] = a[rowIndex] / 2;
  269. U.resize ( newRow.size(), 2 );
  270. V.resize ( 2, newRow.size() );
  271. // #pragma omp parallel for
  272. for ( uint i = 0; i < newRow.size(); i++ )
  273. {
  274. U ( i, 0 ) = a[i];
  275. U ( i, 1 ) = ei[i];
  276. V ( 0, i ) = ei[i];
  277. V ( 1, i ) = a[i];
  278. }
  279. if ( verbose )
  280. {
  281. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- U:" << std::endl;
  282. for ( uint ik = 0; ik < U.rows(); ik++ )
  283. {
  284. for ( uint jk = 0; jk < U.cols(); jk++ )
  285. {
  286. std::cerr << U ( ik, jk ) << " ";
  287. }
  288. std::cerr << std::endl;
  289. }
  290. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- V:" << std::endl;
  291. for ( uint ik = 0; ik < V.rows(); ik++ )
  292. {
  293. for ( uint jk = 0; jk < V.cols(); jk++ )
  294. {
  295. std::cerr << V ( ik, jk ) << " ";
  296. }
  297. std::cerr << std::endl;
  298. }
  299. }
  300. // Sherman Woodbury-Morrison Formula:
  301. // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
  302. // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
  303. // = oldAlpha - B F^{-1} V oldAlpha
  304. // with B = K^{-1} U and F = (I+VB)
  305. // Time complexity: 2 choleskySolve calls: O(n^2)
  306. NICE::Vector B_1;
  307. computeInverseKernelMultiply ( U.getColumn ( 0 ), B_1 );
  308. NICE::Vector B_2;
  309. computeInverseKernelMultiply ( U.getColumn ( 1 ), B_2 );
  310. B.resize ( newRow.size(), 2 );
  311. for ( uint i = 0; i < newRow.size(); i++ )
  312. {
  313. B ( i, 0 ) = B_1[i];
  314. B ( i, 1 ) = B_2[i];
  315. }
  316. if ( verbose )
  317. {
  318. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- B:" << std::endl;
  319. for ( uint ik = 0; ik < B.rows(); ik++ )
  320. {
  321. for ( uint jk = 0; jk < B.cols(); jk++ )
  322. {
  323. std::cerr << B ( ik, jk ) << " ";
  324. }
  325. std::cerr << std::endl;
  326. }
  327. }
  328. F.resize ( 2, 2 );
  329. F.setIdentity();
  330. Matrix V_B ( 2, 2 );
  331. V_B.multiply ( V, B );
  332. F += V_B;
  333. if ( verbose )
  334. {
  335. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F:" << std::endl;
  336. for ( uint ik = 0; ik < F.rows(); ik++ )
  337. {
  338. for ( uint jk = 0; jk < F.cols(); jk++ )
  339. {
  340. std::cerr << F ( ik, jk ) << " ";
  341. }
  342. std::cerr << std::endl;
  343. }
  344. }
  345. // Time complexity: 1 linear equation system with 2 variables
  346. // can be computed in O(1) using a fixed implementation
  347. F_inv.resize ( 2, 2 );
  348. double denominator = F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 );
  349. F_inv ( 0, 0 ) = F ( 1, 1 ) / denominator;
  350. F_inv ( 0, 1 ) = -F ( 0, 1 ) / denominator;
  351. F_inv ( 1, 0 ) = - F ( 1, 0 ) / denominator;
  352. F_inv ( 1, 1 ) = F ( 0, 0 ) / denominator;
  353. if ( verbose )
  354. {
  355. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F_inv:" << std::endl;
  356. for ( uint ik = 0; ik < F_inv.rows(); ik++ )
  357. {
  358. for ( uint jk = 0; jk < F_inv.cols(); jk++ )
  359. {
  360. std::cerr << F_inv ( ik, jk ) << " ";
  361. }
  362. std::cerr << std::endl;
  363. }
  364. NICE::Matrix MultiplicationResult ( F_inv.cols(), F_inv.cols(), 0.0 );
  365. MultiplicationResult.multiply ( F, F_inv );
  366. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F-inversion MultiplicationResult:" << std::endl;
  367. for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
  368. {
  369. for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++ )
  370. {
  371. std::cerr << MultiplicationResult ( ik, jk ) << " ";
  372. }
  373. std::cerr << std::endl;
  374. }
  375. }
  376. }
  377. /**
  378. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the second part. Usefull for multiclass-problems.
  379. * @author Alexander Lütz
  380. * @date 01/12/2010 (dd/mm/yyyy)
  381. */
  382. void KernelData::getGPLikelihoodWithOneNewRow_SecondPart ( const NICE::Vector & y, const double & oldLogdetK, const Vector & oldAlpha , Vector & newAlpha, double & loglike )
  383. {
  384. Matrix M_oldAlpha ( y.size(), 1 );
  385. for ( uint i = 0; i < y.size(); i++ )
  386. {
  387. M_oldAlpha ( i, 0 ) = oldAlpha[i];
  388. }
  389. NICE::Matrix V_oldAlpha;
  390. V_oldAlpha.multiply ( V, M_oldAlpha );
  391. NICE::Matrix F_inv_V_old_Alpha;
  392. F_inv_V_old_Alpha.multiply ( F_inv, V_oldAlpha );
  393. NICE::Matrix M_newAlpha;
  394. M_newAlpha.multiply ( B, F_inv_V_old_Alpha );
  395. M_newAlpha *= -1;
  396. M_newAlpha += M_oldAlpha;
  397. newAlpha = NICE::Vector ( y.size() );
  398. for ( uint i = 0; i < y.size(); i++ )
  399. {
  400. newAlpha[i] = M_newAlpha ( i, 0 );
  401. }
  402. // Matrix Determinant Lemma
  403. // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
  404. // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
  405. // logdet(K + U*V) = logdet( F ) + logdet(K)
  406. double logdetF = log ( F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 ) );
  407. double newLogdetK = logdetF + oldLogdetK;
  408. logdet = newLogdetK;
  409. loglike = newLogdetK + newAlpha.scalarProduct ( y );
  410. }
  411. /**
  412. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3).
  413. * @author Alexander Lütz
  414. * @date 01/09/2011 (dd/mm/yyyy)
  415. */
  416. void KernelData::perform_Rank_2_Update ( const int & rowIndex, const NICE::Vector & newRow )
  417. {
  418. getGPLikelihoodWithOneNewRow_FirstPart ( rowIndex, newRow );
  419. Matrix prod_1;
  420. prod_1.multiply ( V, inverseKernelMatrix );
  421. if ( verbose )
  422. {
  423. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_1:" << std::endl;
  424. for ( uint ik = 0; ik < prod_1.rows(); ik++ )
  425. {
  426. for ( uint jk = 0; jk < prod_1.cols(); jk++ )
  427. {
  428. std::cerr << prod_1 ( ik, jk ) << " ";
  429. }
  430. std::cerr << std::endl;
  431. }
  432. }
  433. Matrix prod_2;
  434. prod_2.multiply ( F_inv, prod_1 );
  435. if ( verbose )
  436. {
  437. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_2:" << std::endl;
  438. for ( uint ik = 0; ik < prod_2.rows(); ik++ )
  439. {
  440. for ( uint jk = 0; jk < prod_2.cols(); jk++ )
  441. {
  442. std::cerr << prod_2 ( ik, jk ) << " ";
  443. }
  444. std::cerr << std::endl;
  445. }
  446. }
  447. Matrix prod_3;
  448. prod_3.multiply ( B, prod_2 );
  449. if ( verbose )
  450. {
  451. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_3:" << std::endl;
  452. for ( uint ik = 0; ik < prod_3.rows(); ik++ )
  453. {
  454. for ( uint jk = 0; jk < prod_3.cols(); jk++ )
  455. {
  456. std::cerr << prod_3 ( ik, jk ) << " ";
  457. }
  458. std::cerr << std::endl;
  459. }
  460. }
  461. inverseKernelMatrix = inverseKernelMatrix - prod_3;
  462. // std::cerr << "perform rank 2 update: inverseKernelMatrix: " << inverseKernelMatrix << std::endl;
  463. //correct the stored kernel matrix after our computations
  464. for ( uint i = 0; i < newRow.size(); i++ )
  465. {
  466. kernelMatrix ( i, rowIndex ) = newRow[i];
  467. kernelMatrix ( rowIndex, i ) = newRow[i];
  468. }
  469. // std::cerr << "kernelMatrix: " << kernelMatrix << std::endl << " newRow: " << newRow << std::endl;
  470. }
  471. /**
  472. * @brief Updates the GP-Likelihood, if only k rows and colums are changed. Time is O(k^3+n^2) instaed O(n^3). Alternatively it could also be done with iteratively change one row, which leads to O(k*n^2).
  473. * @author Alexander Lütz
  474. * @date 01/09/2011 (dd/mm/yyyy)
  475. */
  476. void KernelData::perform_Rank_2k_Update ( const std::vector<int> & rowIndices, const std::vector<NICE::Vector> & newRows )
  477. {
  478. if ( ( rowIndices.size() != 0 ) && ( rowIndices.size() == newRows.size() ) )
  479. {
  480. std::vector<NICE::Vector> unity_vectors;
  481. std::vector<NICE::Vector> diff_vectors;
  482. for ( uint j = 0; j < rowIndices.size(); j++ )
  483. {
  484. NICE::Vector unity_vector ( newRows[0].size(), 0.0 );
  485. unity_vector[rowIndices[j] ] = 1.0;
  486. unity_vectors.push_back ( unity_vector );
  487. NICE::Vector a = newRows[j] - kernelMatrix.getRow ( rowIndices[j] );
  488. for ( uint x = 0; x < rowIndices.size(); x++ )
  489. {
  490. a[rowIndices[x] ] /= 2.0;
  491. }
  492. diff_vectors.push_back ( a );
  493. }
  494. U.resize ( newRows[0].size(), 2*rowIndices.size() );
  495. V.resize ( 2*rowIndices.size(), newRows[0].size() );
  496. for ( uint i = 0; i < newRows[0].size(); i++ )
  497. {
  498. for ( uint j = 0; j < rowIndices.size(); j++ )
  499. {
  500. U ( i, rowIndices.size() + j ) = ( unity_vectors[j] ) [i];
  501. U ( i, j ) = ( diff_vectors[j] ) [i];
  502. V ( rowIndices.size() + j, i ) = ( diff_vectors[j] ) [i];
  503. V ( j, i ) = ( unity_vectors[j] ) [i];
  504. }
  505. }
  506. if ( verbose )
  507. {
  508. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- U: " << U << std::endl;
  509. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- V: " << V << std::endl;
  510. }
  511. NICE::Matrix UV ( newRows[0].size(), newRows[0].size() );
  512. UV.multiply ( U, V );
  513. if ( verbose )
  514. {
  515. // we have to consider the entries which are added twice
  516. for ( int x = 0; x < ( int ) rowIndices.size(); x++ )
  517. {
  518. for ( int y = x; y < ( int ) rowIndices.size(); y++ )
  519. {
  520. UV ( rowIndices[x] , rowIndices[y] ) /= 2.0;
  521. if ( x != y )
  522. UV ( rowIndices[y] , rowIndices[x] ) /= 2.0;
  523. }
  524. }
  525. }
  526. if ( verbose )
  527. {
  528. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- UV:" << UV << std::endl;
  529. }
  530. B.resize ( newRows[0].size(), 2*rowIndices.size() );
  531. for ( uint j = 0; j < 2*rowIndices.size(); j++ )
  532. {
  533. NICE::Vector B_row;
  534. computeInverseKernelMultiply ( U.getColumn ( j ), B_row );
  535. for ( uint i = 0; i < newRows[0].size(); i++ )
  536. {
  537. B ( i, j ) = B_row[i];
  538. }
  539. }
  540. if ( verbose )
  541. {
  542. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- B:" << B << std::endl;
  543. }
  544. F.resize ( 2*rowIndices.size(), 2*rowIndices.size() );
  545. F.setIdentity();
  546. Matrix V_B ( 2*rowIndices.size(), 2*rowIndices.size() );
  547. V_B.multiply ( V, B );
  548. F += V_B;
  549. if ( verbose )
  550. {
  551. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F:" << F << std::endl;
  552. }
  553. //invert F!
  554. //we can't rely on methods like cholesky decomposition, since F has not to be positive definite
  555. F_inv = invert ( F );
  556. if ( verbose )
  557. {
  558. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F_inv:" << F_inv <<std::endl;
  559. }
  560. if ( verbose )
  561. {
  562. NICE::Matrix MultiplicationResult ( F.rows(), F_inv.cols(), 0.0 );
  563. MultiplicationResult.multiply ( F, F_inv );
  564. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F-inversion MultiplicationResult:" << MultiplicationResult << std::endl;
  565. }
  566. Matrix prod_1;
  567. prod_1.multiply ( V, inverseKernelMatrix );
  568. std::cerr << "prod_1: " << prod_1.rows() << " x " << prod_1.cols() << std::endl;
  569. std::cerr << "v and inverse matrix multiplied" << std::endl;
  570. if ( verbose )
  571. {
  572. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_1:" << prod_1 << std::endl;
  573. }
  574. Matrix prod_2;
  575. prod_2.resize(F.rows(), prod_1.cols());
  576. prod_2.multiply ( F_inv, prod_1 );
  577. if ( verbose )
  578. {
  579. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_2:" << prod_2 << std::endl;
  580. }
  581. std::cerr << "B: " << B.rows() << " x " << B.cols() << std::endl;
  582. Matrix prod_3;
  583. prod_3.multiply ( B, prod_2 );
  584. std::cerr << "prod_3 created: " << prod_3.rows() << " x " << prod_3.cols() << std::endl;
  585. if ( verbose )
  586. {
  587. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_3:" << prod_3 << std::endl;
  588. }
  589. inverseKernelMatrix = inverseKernelMatrix - prod_3;
  590. //remember the new kernel entries for the next time
  591. for ( uint i = 0; i < rowIndices.size(); i++ )
  592. {
  593. for ( uint ik = 0; ik < kernelMatrix.rows(); ik++ )
  594. {
  595. kernelMatrix ( ik, rowIndices[i] ) = ( newRows[i] ) [ik];
  596. kernelMatrix ( rowIndices[i], ik ) = ( newRows[i] ) [ik];
  597. }
  598. }
  599. }
  600. else
  601. {
  602. std::cerr << "Failure" << std::endl;
  603. }
  604. }
  605. void KernelData::delete_one_row ( const int & rowIndex )
  606. {
  607. if ( ( inverseKernelMatrix.rows() != 0 ) && ( inverseKernelMatrix.cols() != 0 ) )
  608. {
  609. inverseKernelMatrix.deleteCol ( rowIndex );
  610. inverseKernelMatrix.deleteRow ( rowIndex );
  611. }
  612. if ( ( choleskyMatrix.rows() != 0 ) && ( choleskyMatrix.cols() != 0 ) )
  613. {
  614. choleskyMatrix.deleteCol ( rowIndex );
  615. choleskyMatrix.deleteRow ( rowIndex );
  616. }
  617. if ( ( kernelMatrix.rows() != 0 ) && ( kernelMatrix.cols() != 0 ) )
  618. {
  619. kernelMatrix.deleteCol ( rowIndex );
  620. kernelMatrix.deleteRow ( rowIndex );
  621. }
  622. }
  623. void KernelData::delete_multiple_rows ( std::vector<int> & indices )
  624. {
  625. if ( ( inverseKernelMatrix.rows() >= indices.size() ) && ( inverseKernelMatrix.cols() >= indices.size() ) )
  626. {
  627. inverseKernelMatrix.deleteCols ( indices );
  628. inverseKernelMatrix.deleteRows ( indices );
  629. }
  630. if ( ( choleskyMatrix.rows() >= indices.size() ) && ( choleskyMatrix.cols() >= indices.size() ) )
  631. {
  632. choleskyMatrix.deleteCols ( indices );
  633. choleskyMatrix.deleteRows ( indices );
  634. }
  635. if ( ( kernelMatrix.rows() >= indices.size() ) && ( kernelMatrix.cols() >= indices.size() ) )
  636. {
  637. kernelMatrix.deleteCols ( indices );
  638. kernelMatrix.deleteRows ( indices );
  639. }
  640. }
  641. void KernelData::setKernelMatrix ( const NICE::Matrix & k_matrix )
  642. {
  643. kernelMatrix = k_matrix;
  644. }
  645. void KernelData::increase_size_by_One()
  646. {
  647. NICE::Matrix new_Kernel ( kernelMatrix.rows() + 1, kernelMatrix.cols() + 1 );
  648. new_Kernel.setBlock ( 0, 0, kernelMatrix );
  649. for ( uint i = 0; i < kernelMatrix.rows() - 1; i++ )
  650. {
  651. new_Kernel ( i, kernelMatrix.cols() ) = 0.0;
  652. new_Kernel ( kernelMatrix.rows(), i ) = 0.0;
  653. }
  654. new_Kernel ( kernelMatrix.rows(), kernelMatrix.cols() ) = 1.0;
  655. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  656. kernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  657. kernelMatrix = new_Kernel;
  658. new_Kernel.setBlock ( 0, 0, inverseKernelMatrix );
  659. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  660. inverseKernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  661. inverseKernelMatrix = new_Kernel;
  662. new_Kernel.setBlock ( 0, 0, choleskyMatrix );
  663. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  664. choleskyMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  665. choleskyMatrix = new_Kernel;
  666. }
  667. void KernelData::increase_size_by_k ( const uint & k )
  668. {
  669. NICE::Matrix new_Kernel ( kernelMatrix.rows() + k, kernelMatrix.cols() + k );
  670. new_Kernel.setBlock ( 0, 0, kernelMatrix );
  671. for ( uint i = 0; i < kernelMatrix.rows() - 1; i++ )
  672. {
  673. for ( uint j = 0; j < k; j++ )
  674. {
  675. new_Kernel ( i, kernelMatrix.cols() + j ) = 0.0;
  676. new_Kernel ( kernelMatrix.rows() + j, i ) = 0.0;
  677. }
  678. }
  679. for ( uint j = 0; j < k; j++ )
  680. {
  681. new_Kernel ( kernelMatrix.rows() + j, kernelMatrix.cols() + j ) = 1.0;
  682. }
  683. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  684. kernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  685. kernelMatrix = new_Kernel;
  686. new_Kernel.setBlock ( 0, 0, inverseKernelMatrix );
  687. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  688. inverseKernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  689. inverseKernelMatrix = new_Kernel;
  690. new_Kernel.setBlock ( 0, 0, choleskyMatrix );
  691. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  692. choleskyMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
  693. choleskyMatrix = new_Kernel;
  694. }