KernelData.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. /**
  2. * @file KernelData.cpp
  3. * @brief caching some kernel data
  4. * @author Erik Rodner
  5. * @date 01/19/2010
  6. */
  7. #include <iostream>
  8. #include "KernelData.h"
  9. #include "core/algebra/CholeskyRobustAuto.h"
  10. #include "core/algebra/CholeskyRobust.h"
  11. #include "core/vector/Algorithms.h"
  12. #ifdef NICE_USELIB_OPENMP
  13. #include <omp.h> //for parallel processing
  14. #endif
  15. #include "vector"
  16. using namespace std;
  17. using namespace NICE;
  18. using namespace OBJREC;
  19. KernelData::KernelData()
  20. {
  21. // empty config
  22. Config conf;
  23. initFromConfig( &conf, "DONTCARE");
  24. }
  25. KernelData::KernelData ( const KernelData & src )
  26. {
  27. kernelMatrix = src.kernelMatrix;
  28. inverseKernelMatrix = src.inverseKernelMatrix;
  29. choleskyMatrix = src.choleskyMatrix;
  30. for ( map<int, NICE::Matrix *>::const_iterator i = src.cachedMatrices.begin();
  31. i != src.cachedMatrices.end(); i++ )
  32. {
  33. Matrix *M = new Matrix ( *(i->second) );
  34. cachedMatrices.insert ( pair<int, NICE::Matrix *> ( i->first, M ) );
  35. }
  36. logdet = src.logdet;
  37. verbose = src.verbose;
  38. cr = src.cr->clone();
  39. }
  40. KernelData::KernelData( const Config *conf, const Matrix & kernelMatrix, const string & section )
  41. {
  42. initFromConfig ( conf, section );
  43. this->kernelMatrix = kernelMatrix;
  44. updateCholeskyFactorization();
  45. }
  46. KernelData::KernelData( const Config *conf, const string & section )
  47. {
  48. initFromConfig ( conf, section );
  49. }
  50. void KernelData::initFromConfig ( const Config *conf, const string & section )
  51. {
  52. verbose = conf->gB(section, "verbose", false );
  53. string inv_method = conf->gS(section, "robust_cholesky", "auto" );
  54. double noiseStep = conf->gD(section, "rchol_noise_variance", 1e-7 );
  55. double minimumLogDet = conf->gD(section, "rchol_minimum_logdet", - std::numeric_limits<double>::max() );
  56. bool useCuda = conf->gB(section, "rchol_cuda", true );
  57. if ( verbose && useCuda )
  58. std::cerr << "KernelData: using the cuda implementation of cholesky decomposition (might be inaccurate)" << std::endl;
  59. if ( inv_method == "auto" )
  60. {
  61. if ( verbose )
  62. std::cerr << "KernelData: using the cholesky method with automatic regularization" << std::endl;
  63. cr = new CholeskyRobustAuto ( verbose, noiseStep, minimumLogDet, useCuda );
  64. } else {
  65. if ( verbose )
  66. std::cerr << "KernelData: using the cholesky method with static regularization" << std::endl;
  67. cr = new CholeskyRobust ( verbose, noiseStep, useCuda );
  68. }
  69. }
  70. KernelData::~KernelData()
  71. {
  72. delete cr;
  73. }
  74. void KernelData::updateCholeskyFactorization ()
  75. {
  76. if ( verbose )
  77. std::cerr << "KernelData: kernel: " << kernelMatrix.rows() << " " << kernelMatrix.cols() << std::endl;
  78. if ( (kernelMatrix.rows() <= 0) || (kernelMatrix.cols() <= 0) )
  79. fthrow(Exception, "KernelData: no kernel matrix available !");
  80. if ( kernelMatrix.containsNaN() )
  81. {
  82. if ( verbose )
  83. std::cerr << "KernelData: kernel matrix contains NaNs (setting inverse to identity)" << std::endl;
  84. logdet = numeric_limits<double>::max();
  85. choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
  86. choleskyMatrix.setIdentity();
  87. } else {
  88. if ( verbose )
  89. std::cerr << "KernelData: calculating cholesky decomposition" << std::endl;
  90. cr->robustChol ( kernelMatrix, choleskyMatrix );
  91. logdet = cr->getLastLogDet();
  92. if ( !finite(logdet) )
  93. {
  94. choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
  95. choleskyMatrix.setIdentity();
  96. logdet = numeric_limits<double>::max();
  97. }
  98. }
  99. }
  100. void KernelData::updateInverseKernelMatrix ()
  101. {
  102. if ( ! hasCholeskyFactorization() )
  103. updateCholeskyFactorization();
  104. inverseKernelMatrix.resize ( choleskyMatrix.rows(), choleskyMatrix.cols() );
  105. choleskyInvertLargeScale ( choleskyMatrix, inverseKernelMatrix );
  106. }
  107. void KernelData::computeInverseKernelMultiply ( const Vector & x, Vector & result ) const
  108. {
  109. if ( choleskyMatrix.rows() == 0 )
  110. fthrow(Exception, "Cholesky factorization was not initialized, use updateCholeskyFactorization() in advance");
  111. choleskySolveLargeScale ( choleskyMatrix, x, result );
  112. }
  113. const NICE::Matrix & KernelData::getKernelMatrix() const
  114. {
  115. return kernelMatrix;
  116. }
  117. NICE::Matrix & KernelData::getKernelMatrix()
  118. {
  119. return kernelMatrix;
  120. }
  121. const NICE::Matrix & KernelData::getInverseKernelMatrix() const
  122. {
  123. return inverseKernelMatrix;
  124. };
  125. NICE::Matrix & KernelData::getInverseKernelMatrix()
  126. {
  127. return inverseKernelMatrix;
  128. };
  129. const NICE::Matrix & KernelData::getCholeskyMatrix() const
  130. {
  131. return choleskyMatrix;
  132. }
  133. const Matrix & KernelData::getCachedMatrix (int i) const
  134. {
  135. map<int, NICE::Matrix *>::const_iterator it = cachedMatrices.find(i);
  136. if ( it != cachedMatrices.end() )
  137. return *(it->second);
  138. else
  139. fthrow(Exception, "Cached matrix with index " << i << " is not available.");
  140. }
  141. void KernelData::setCachedMatrix (int i, Matrix *m)
  142. {
  143. cachedMatrices[i] = m;
  144. }
  145. uint KernelData::getKernelMatrixSize () const {
  146. uint mysize = ( kernelMatrix.rows() == 0 ) ? (choleskyMatrix.rows()) : kernelMatrix.rows();
  147. return mysize;
  148. };
  149. void KernelData::getLooEstimates ( const Vector & y, Vector & muLoo, Vector & sigmaLoo ) const
  150. {
  151. if ( inverseKernelMatrix.rows() != getKernelMatrixSize() )
  152. fthrow(Exception, "updateInverseKernelMatrix() has to be called in advance to use this function\n");
  153. if ( y.size() != inverseKernelMatrix.rows() )
  154. fthrow(Exception, "inverse kernel matrix does not fit to the size of the vector of function values y\n");
  155. Vector alpha;
  156. computeInverseKernelMultiply ( y, alpha );
  157. muLoo.resize ( y.size() );
  158. sigmaLoo.resize ( y.size() );
  159. for ( uint l = 0 ; l < y.size(); l++ )
  160. {
  161. sigmaLoo[l] = 1.0 / inverseKernelMatrix(l,l);
  162. muLoo[l] = y[l] - alpha[l] * sigmaLoo[l];
  163. }
  164. }
  165. KernelData *KernelData::clone(void) const
  166. {
  167. return new KernelData( *this );
  168. }
  169. /**
  170. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3)
  171. * @author Alexander Lütz
  172. * @date 01/12/2010 (dd/mm/yyyy)
  173. */
  174. void KernelData::getGPLikelihoodWithOneNewRow( const NICE::Vector & y, const double & oldLogdetK, const int & rowIndex, const NICE::Vector & newRow, const Vector & oldAlpha , Vector & newAlpha, double & loglike)
  175. {
  176. // oldAlpha = K^{-1} y
  177. // K' new kernel matrix = exchange the row and column at position rowIndex
  178. // with newRow
  179. // try to find U and V such that K' = K + U*V with U,V having rank 2
  180. // rowIndex'th base vector
  181. Vector ei (y.size(), 0.0);
  182. ei[rowIndex] = 1.0;
  183. // we have to consider the diagonal entry
  184. Vector a = newRow - kernelMatrix.getRow(rowIndex) ;
  185. a[rowIndex] = a[rowIndex]/2;
  186. NICE::Matrix U (y.size(),2);
  187. NICE::Matrix V (2,y.size());
  188. for (uint i = 0; i < y.size(); i++)
  189. {
  190. U(i,0) = a[i];
  191. U(i,1) = ei[i];
  192. V(0,i) = ei[i];
  193. V(1,i) = a[i];
  194. }
  195. // Sherman Woodbury-Morrison Formula:
  196. // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
  197. // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
  198. // = oldAlpha - B F^{-1} V oldAlpha
  199. // with B = K^{-1} U and F = (I+VB)
  200. // Time complexity: 2 choleskySolve calls: O(n^2)
  201. NICE::Vector B_1;
  202. computeInverseKernelMultiply(U.getColumn(0),B_1);
  203. NICE::Vector B_2;
  204. computeInverseKernelMultiply(U.getColumn(1),B_2);
  205. NICE::Matrix B(y.size(),2);
  206. for (uint i = 0; i < y.size(); i++)
  207. {
  208. B(i,0) = B_1[i];
  209. B(i,1) = B_2[i];
  210. }
  211. NICE::Matrix F (2,2);
  212. F.setIdentity();
  213. Matrix V_B (2,2);
  214. V_B.multiply(V,B);
  215. F += V_B;
  216. // Time complexity: 1 linear equation system with 2 variables
  217. // can be computed in O(1) using a fixed implementation
  218. NICE::Matrix F_inv = NICE::Matrix(2,2);
  219. double denominator = F(0,0)*F(1,1)-F(0,1)*F(1,0);
  220. F_inv(0,0) = F(1,1)/denominator;
  221. F_inv(0,1) = -F(0,1)/denominator;
  222. F_inv(1,0) = - F(1,0)/denominator;
  223. F_inv(1,1) = F(0,0)/denominator;
  224. Matrix M_oldAlpha (y.size(),1);
  225. for (uint i = 0; i < y.size(); i++)
  226. {
  227. M_oldAlpha(i,0) = oldAlpha[i];
  228. }
  229. NICE::Matrix V_oldAlpha;
  230. V_oldAlpha.multiply( V,M_oldAlpha);
  231. NICE::Matrix F_inv_V_old_Alpha;
  232. F_inv_V_old_Alpha.multiply(F_inv, V_oldAlpha);
  233. NICE::Matrix M_newAlpha;
  234. M_newAlpha.multiply(B, F_inv_V_old_Alpha);
  235. M_newAlpha *= -1;
  236. M_newAlpha += M_oldAlpha;
  237. newAlpha = NICE::Vector(y.size());
  238. for (uint i = 0; i < y.size(); i++)
  239. {
  240. newAlpha[i] = M_newAlpha(i,0);
  241. }
  242. // Matrix Determinant Lemma
  243. // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
  244. // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
  245. // logdet(K + U*V) = logdet( F ) + logdet(K)
  246. double logdetF = log(F(0,0) * F(1,1) - F(0,1) * F(1,0));
  247. double newLogdetK = logdetF + oldLogdetK;
  248. logdet = newLogdetK;
  249. loglike = newLogdetK + newAlpha.scalarProduct(y);
  250. }
  251. /**
  252. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the first part. Usefull for multiclass-problems.
  253. * @author Alexander Lütz
  254. * @date 01/12/2010 (dd/mm/yyyy)
  255. */
  256. void KernelData::getGPLikelihoodWithOneNewRow_FirstPart(const int & rowIndex, const NICE::Vector & newRow)
  257. {
  258. // oldAlpha = K^{-1} y
  259. // K' new kernel matrix = exchange the row and column at position rowIndex
  260. // with newRow
  261. // try to find U and V such that K' = K + U*V with U,V having rank 2
  262. // rowIndex'th base vector
  263. Vector ei (newRow.size(), 0.0);
  264. ei[rowIndex] = 1.0;
  265. // we have to consider the diagonal entry
  266. Vector a = newRow - kernelMatrix.getRow(rowIndex) ;
  267. a[rowIndex] = a[rowIndex]/2;
  268. U.resize(newRow.size(),2);
  269. V.resize(2,newRow.size());
  270. // #pragma omp parallel for
  271. for (uint i = 0; i < newRow.size(); i++)
  272. {
  273. U(i,0) = a[i];
  274. U(i,1) = ei[i];
  275. V(0,i) = ei[i];
  276. V(1,i) = a[i];
  277. }
  278. if (verbose)
  279. {
  280. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- U:" << std::endl;
  281. for ( uint ik = 0; ik < U.rows(); ik++ )
  282. {
  283. for ( uint jk = 0; jk < U.cols(); jk++)
  284. {
  285. std::cerr << U(ik,jk) << " ";
  286. }
  287. std::cerr << std::endl;
  288. }
  289. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- V:" << std::endl;
  290. for ( uint ik = 0; ik < V.rows(); ik++ )
  291. {
  292. for ( uint jk = 0; jk < V.cols(); jk++)
  293. {
  294. std::cerr << V(ik,jk) << " ";
  295. }
  296. std::cerr << std::endl;
  297. }
  298. }
  299. // Sherman Woodbury-Morrison Formula:
  300. // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
  301. // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
  302. // = oldAlpha - B F^{-1} V oldAlpha
  303. // with B = K^{-1} U and F = (I+VB)
  304. // Time complexity: 2 choleskySolve calls: O(n^2)
  305. NICE::Vector B_1;
  306. computeInverseKernelMultiply(U.getColumn(0),B_1);
  307. NICE::Vector B_2;
  308. computeInverseKernelMultiply(U.getColumn(1),B_2);
  309. B.resize(newRow.size(),2);
  310. for (uint i = 0; i < newRow.size(); i++)
  311. {
  312. B(i,0) = B_1[i];
  313. B(i,1) = B_2[i];
  314. }
  315. if (verbose)
  316. {
  317. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- B:" << std::endl;
  318. for ( uint ik = 0; ik < B.rows(); ik++ )
  319. {
  320. for ( uint jk = 0; jk < B.cols(); jk++)
  321. {
  322. std::cerr << B(ik,jk) << " ";
  323. }
  324. std::cerr << std::endl;
  325. }
  326. }
  327. F.resize(2,2);
  328. F.setIdentity();
  329. Matrix V_B (2,2);
  330. V_B.multiply(V,B);
  331. F += V_B;
  332. if (verbose)
  333. {
  334. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F:" << std::endl;
  335. for ( uint ik = 0; ik < F.rows(); ik++ )
  336. {
  337. for ( uint jk = 0; jk < F.cols(); jk++)
  338. {
  339. std::cerr << F(ik,jk) << " ";
  340. }
  341. std::cerr << std::endl;
  342. }
  343. }
  344. // Time complexity: 1 linear equation system with 2 variables
  345. // can be computed in O(1) using a fixed implementation
  346. F_inv.resize(2,2);
  347. double denominator = F(0,0)*F(1,1)-F(0,1)*F(1,0);
  348. F_inv(0,0) = F(1,1)/denominator;
  349. F_inv(0,1) = -F(0,1)/denominator;
  350. F_inv(1,0) = - F(1,0)/denominator;
  351. F_inv(1,1) = F(0,0)/denominator;
  352. if (verbose)
  353. {
  354. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F_inv:" << std::endl;
  355. for ( uint ik = 0; ik < F_inv.rows(); ik++ )
  356. {
  357. for ( uint jk = 0; jk < F_inv.cols(); jk++)
  358. {
  359. std::cerr << F_inv(ik,jk) << " ";
  360. }
  361. std::cerr << std::endl;
  362. }
  363. NICE::Matrix MultiplicationResult( F_inv.cols(), F_inv.cols(), 0.0 );
  364. MultiplicationResult.multiply(F,F_inv);
  365. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F-inversion MultiplicationResult:" << std::endl;
  366. for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
  367. {
  368. for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++)
  369. {
  370. std::cerr << MultiplicationResult(ik,jk) << " ";
  371. }
  372. std::cerr << std::endl;
  373. }
  374. }
  375. }
  376. /**
  377. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the second part. Usefull for multiclass-problems.
  378. * @author Alexander Lütz
  379. * @date 01/12/2010 (dd/mm/yyyy)
  380. */
  381. void KernelData::getGPLikelihoodWithOneNewRow_SecondPart( const NICE::Vector & y, const double & oldLogdetK, const Vector & oldAlpha , Vector & newAlpha, double & loglike)
  382. {
  383. Matrix M_oldAlpha (y.size(),1);
  384. for (uint i = 0; i < y.size(); i++)
  385. {
  386. M_oldAlpha(i,0) = oldAlpha[i];
  387. }
  388. NICE::Matrix V_oldAlpha;
  389. V_oldAlpha.multiply( V,M_oldAlpha);
  390. NICE::Matrix F_inv_V_old_Alpha;
  391. F_inv_V_old_Alpha.multiply(F_inv, V_oldAlpha);
  392. NICE::Matrix M_newAlpha;
  393. M_newAlpha.multiply(B, F_inv_V_old_Alpha);
  394. M_newAlpha *= -1;
  395. M_newAlpha += M_oldAlpha;
  396. newAlpha = NICE::Vector(y.size());
  397. for (uint i = 0; i < y.size(); i++)
  398. {
  399. newAlpha[i] = M_newAlpha(i,0);
  400. }
  401. // Matrix Determinant Lemma
  402. // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
  403. // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
  404. // logdet(K + U*V) = logdet( F ) + logdet(K)
  405. double logdetF = log(F(0,0) * F(1,1) - F(0,1) * F(1,0));
  406. double newLogdetK = logdetF + oldLogdetK;
  407. logdet = newLogdetK;
  408. loglike = newLogdetK + newAlpha.scalarProduct(y);
  409. }
  410. /**
  411. * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3).
  412. * @author Alexander Lütz
  413. * @date 01/09/2011 (dd/mm/yyyy)
  414. */
  415. void KernelData::perform_Rank_2_Update(const int & rowIndex, const NICE::Vector & newRow)
  416. {
  417. getGPLikelihoodWithOneNewRow_FirstPart(rowIndex,newRow);
  418. Matrix prod_1;
  419. prod_1.multiply(V,inverseKernelMatrix);
  420. if (verbose)
  421. {
  422. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_1:" << std::endl;
  423. for ( uint ik = 0; ik < prod_1.rows(); ik++ )
  424. {
  425. for ( uint jk = 0; jk < prod_1.cols(); jk++)
  426. {
  427. std::cerr << prod_1(ik,jk) << " ";
  428. }
  429. std::cerr << std::endl;
  430. }
  431. }
  432. Matrix prod_2;
  433. prod_2.multiply(F_inv,prod_1);
  434. if (verbose)
  435. {
  436. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_2:" << std::endl;
  437. for ( uint ik = 0; ik < prod_2.rows(); ik++ )
  438. {
  439. for ( uint jk = 0; jk < prod_2.cols(); jk++)
  440. {
  441. std::cerr << prod_2(ik,jk) << " ";
  442. }
  443. std::cerr << std::endl;
  444. }
  445. }
  446. Matrix prod_3;
  447. prod_3.multiply(B,prod_2);
  448. if (verbose)
  449. {
  450. std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_3:" << std::endl;
  451. for ( uint ik = 0; ik < prod_3.rows(); ik++ )
  452. {
  453. for ( uint jk = 0; jk < prod_3.cols(); jk++)
  454. {
  455. std::cerr << prod_3(ik,jk) << " ";
  456. }
  457. std::cerr << std::endl;
  458. }
  459. }
  460. inverseKernelMatrix = inverseKernelMatrix - prod_3;
  461. //correct the stored kernel matrix after our computations
  462. for (uint i = 0; i < newRow.size(); i++)
  463. {
  464. kernelMatrix(i,rowIndex) = newRow[i];
  465. kernelMatrix(rowIndex,i) = newRow[i];
  466. }
  467. }
  468. /**
  469. * @brief Updates the GP-Likelihood, if only k rows and colums are changed. Time is O(k^3+n^2) instaed O(n^3). Alternatively it could also be done with iteratively change one row, which leads to O(k*n^2).
  470. * @author Alexander Lütz
  471. * @date 01/09/2011 (dd/mm/yyyy)
  472. */
  473. void KernelData::perform_Rank_2k_Update(const std::vector<int> & rowIndices, const std::vector<NICE::Vector> & newRows)
  474. {
  475. if ( (rowIndices.size() != 0) && (rowIndices.size() == newRows.size()) )
  476. {
  477. std::vector<NICE::Vector> unity_vectors;
  478. std::vector<NICE::Vector> diff_vectors;
  479. for (uint j = 0; j < rowIndices.size(); j++)
  480. {
  481. NICE::Vector unity_vector(newRows[0].size(), 0.0);
  482. unity_vector[rowIndices[j] ] = 1.0;
  483. unity_vectors.push_back(unity_vector);
  484. NICE::Vector a = newRows[j] - kernelMatrix.getRow(rowIndices[j]);
  485. for (uint x = 0; x < rowIndices.size(); x++)
  486. {
  487. a[rowIndices[x] ] /= 2.0;
  488. }
  489. diff_vectors.push_back(a);
  490. }
  491. U.resize(newRows[0].size(),2*rowIndices.size());
  492. V.resize(2*rowIndices.size(),newRows[0].size());
  493. for (uint i = 0; i < newRows[0].size(); i++)
  494. {
  495. for (uint j = 0; j < rowIndices.size(); j++)
  496. {
  497. U(i,rowIndices.size()+j) = (unity_vectors[j])[i];
  498. U(i,j) = (diff_vectors[j])[i];
  499. V(rowIndices.size()+j,i) = (diff_vectors[j])[i];
  500. V(j,i) = (unity_vectors[j])[i];
  501. }
  502. }
  503. if (verbose)
  504. {
  505. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- U:" << std::endl;
  506. for ( uint ik = 0; ik < U.rows(); ik++ )
  507. {
  508. for ( uint jk = 0; jk < U.cols(); jk++)
  509. {
  510. std::cerr << U(ik,jk) << " ";
  511. }
  512. std::cerr << std::endl;
  513. }
  514. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- V:" << std::endl;
  515. for ( uint ik = 0; ik < V.rows(); ik++ )
  516. {
  517. for ( uint jk = 0; jk < V.cols(); jk++)
  518. {
  519. std::cerr << V(ik,jk) << " ";
  520. }
  521. std::cerr << std::endl;
  522. }
  523. }
  524. NICE::Matrix UV(newRows[0].size(),newRows[0].size());
  525. UV.multiply(U,V);
  526. if (verbose)
  527. {
  528. // we have to consider the entries which are added twice
  529. for (int x = 0; x < (int)rowIndices.size(); x++)
  530. {
  531. for (int y = x; y < (int)rowIndices.size(); y++)
  532. {
  533. UV(rowIndices[x] , rowIndices[y]) /= 2.0;
  534. if (x!=y)
  535. UV(rowIndices[y] , rowIndices[x]) /= 2.0;
  536. }
  537. }
  538. }
  539. if (verbose)
  540. {
  541. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- UV:" << std::endl;
  542. for ( uint ik = 0; ik < UV.rows(); ik++ )
  543. {
  544. for ( uint jk = 0; jk < UV.cols(); jk++)
  545. {
  546. std::cerr << UV(ik,jk) << " ";
  547. }
  548. std::cerr << std::endl;
  549. }
  550. }
  551. B.resize(newRows[0].size(),2*rowIndices.size());
  552. for (uint j = 0; j < 2*rowIndices.size(); j++)
  553. {
  554. NICE::Vector B_row;
  555. computeInverseKernelMultiply(U.getColumn(j),B_row);
  556. for (uint i = 0; i < newRows[0].size(); i++)
  557. {
  558. B(i,j) = B_row[i];
  559. }
  560. }
  561. if (verbose)
  562. {
  563. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- B:" << std::endl;
  564. for ( uint ik = 0; ik < B.rows(); ik++ )
  565. {
  566. for ( uint jk = 0; jk < B.cols(); jk++)
  567. {
  568. std::cerr << B(ik,jk) << " ";
  569. }
  570. std::cerr << std::endl;
  571. }
  572. }
  573. F.resize(2*rowIndices.size(),2*rowIndices.size());
  574. F.setIdentity();
  575. Matrix V_B (2*rowIndices.size(),2*rowIndices.size());
  576. V_B.multiply(V,B);
  577. F += V_B;
  578. if (verbose)
  579. {
  580. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F:" << std::endl;
  581. for ( uint ik = 0; ik < F.rows(); ik++ )
  582. {
  583. for ( uint jk = 0; jk < F.cols(); jk++)
  584. {
  585. std::cerr << F(ik,jk) << " ";
  586. }
  587. std::cerr << std::endl;
  588. }
  589. }
  590. //invert F!
  591. F_inv = invert(F);
  592. if (verbose)
  593. {
  594. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F_inv:" << std::endl;
  595. for ( uint ik = 0; ik < F_inv.rows(); ik++ )
  596. {
  597. for ( uint jk = 0; jk < F_inv.cols(); jk++)
  598. {
  599. std::cerr << F_inv(ik,jk) << " ";
  600. }
  601. std::cerr << std::endl;
  602. }
  603. }
  604. NICE::Matrix MultiplicationResult( F.rows(), F_inv.cols(), 0.0 );
  605. MultiplicationResult.multiply(F,F_inv);
  606. if (verbose)
  607. {
  608. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F-inversion MultiplicationResult:" << std::endl;
  609. for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
  610. {
  611. for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++)
  612. {
  613. std::cerr << MultiplicationResult(ik,jk) << " ";
  614. }
  615. std::cerr << std::endl;
  616. }
  617. }
  618. Matrix prod_1;
  619. prod_1.multiply(V,inverseKernelMatrix);
  620. if (verbose)
  621. {
  622. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_1:" << std::endl;
  623. for ( uint ik = 0; ik < prod_1.rows(); ik++ )
  624. {
  625. for ( uint jk = 0; jk < prod_1.cols(); jk++)
  626. {
  627. std::cerr << prod_1(ik,jk) << " ";
  628. }
  629. std::cerr << std::endl;
  630. }
  631. }
  632. Matrix prod_2;
  633. prod_2.multiply(F_inv,prod_1);
  634. if (verbose)
  635. {
  636. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_2:" << std::endl;
  637. for ( uint ik = 0; ik < prod_2.rows(); ik++ )
  638. {
  639. for ( uint jk = 0; jk < prod_2.cols(); jk++)
  640. {
  641. std::cerr << prod_2(ik,jk) << " ";
  642. }
  643. std::cerr << std::endl;
  644. }
  645. }
  646. Matrix prod_3;
  647. prod_3.multiply(B,prod_2);
  648. if (verbose)
  649. {
  650. std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_3:" << std::endl;
  651. for ( uint ik = 0; ik < prod_3.rows(); ik++ )
  652. {
  653. for ( uint jk = 0; jk < prod_3.cols(); jk++)
  654. {
  655. std::cerr << prod_3(ik,jk) << " ";
  656. }
  657. std::cerr << std::endl;
  658. }
  659. }
  660. inverseKernelMatrix = inverseKernelMatrix - prod_3;
  661. //remember the new kernel entries for the next time
  662. for (uint i = 0; i < rowIndices.size(); i++)
  663. {
  664. for ( uint ik = 0; ik < kernelMatrix.rows(); ik++ )
  665. {
  666. kernelMatrix(ik,rowIndices[i]) = (newRows[i])[ik];
  667. kernelMatrix(rowIndices[i],ik) = (newRows[i])[ik];
  668. }
  669. }
  670. }
  671. else
  672. {
  673. std::cerr << "Failure" << std::endl;
  674. }
  675. }
  676. void KernelData::delete_one_row(const int & rowIndex)
  677. {
  678. if ( (inverseKernelMatrix.rows() != 0) && (inverseKernelMatrix.cols() != 0))
  679. {
  680. inverseKernelMatrix.deleteCol(rowIndex);
  681. inverseKernelMatrix.deleteRow(rowIndex);
  682. }
  683. if ( (choleskyMatrix.rows() != 0) && (choleskyMatrix.cols() != 0))
  684. {
  685. choleskyMatrix.deleteCol(rowIndex);
  686. choleskyMatrix.deleteRow(rowIndex);
  687. }
  688. if ( (kernelMatrix.rows() != 0) && (kernelMatrix.cols() != 0))
  689. {
  690. kernelMatrix.deleteCol(rowIndex);
  691. kernelMatrix.deleteRow(rowIndex);
  692. }
  693. }
  694. void KernelData::delete_multiple_rows(std::vector<int> & indices)
  695. {
  696. if ( (inverseKernelMatrix.rows() >= indices.size()) && (inverseKernelMatrix.cols() >= indices.size()))
  697. {
  698. inverseKernelMatrix.deleteCols(indices);
  699. inverseKernelMatrix.deleteRows(indices);
  700. }
  701. if ( (choleskyMatrix.rows() >= indices.size()) && (choleskyMatrix.cols() >= indices.size()))
  702. {
  703. choleskyMatrix.deleteCols(indices);
  704. choleskyMatrix.deleteRows(indices);
  705. }
  706. if ( (kernelMatrix.rows() >= indices.size()) && (kernelMatrix.cols() >= indices.size()))
  707. {
  708. kernelMatrix.deleteCols(indices);
  709. kernelMatrix.deleteRows(indices);
  710. }
  711. }
  712. void KernelData::setKernelMatrix(const NICE::Matrix & k_matrix)
  713. {
  714. kernelMatrix = k_matrix;
  715. }
  716. void KernelData::increase_size_by_One()
  717. {
  718. NICE::Matrix new_Kernel(kernelMatrix.rows()+1, kernelMatrix.cols()+1);
  719. new_Kernel.setBlock(0, 0, kernelMatrix);
  720. for (uint i = 0; i < kernelMatrix.rows()-1; i++)
  721. {
  722. new_Kernel(i,kernelMatrix.cols()) = 0.0;
  723. new_Kernel(kernelMatrix.rows(),i) = 0.0;
  724. }
  725. new_Kernel(kernelMatrix.rows(),kernelMatrix.cols()) = 1.0;
  726. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  727. kernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
  728. kernelMatrix = new_Kernel;
  729. new_Kernel.setBlock(0, 0, inverseKernelMatrix);
  730. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  731. inverseKernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
  732. inverseKernelMatrix = new_Kernel;
  733. new_Kernel.setBlock(0, 0, choleskyMatrix);
  734. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  735. choleskyMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
  736. choleskyMatrix = new_Kernel;
  737. }
  738. void KernelData::increase_size_by_k(const uint & k)
  739. {
  740. NICE::Matrix new_Kernel(kernelMatrix.rows()+k, kernelMatrix.cols()+k);
  741. new_Kernel.setBlock(0, 0, kernelMatrix);
  742. for (uint i = 0; i < kernelMatrix.rows()-1; i++)
  743. {
  744. for (uint j = 0; j < k; j++)
  745. {
  746. new_Kernel(i,kernelMatrix.cols()+j) = 0.0;
  747. new_Kernel(kernelMatrix.rows()+j,i) = 0.0;
  748. }
  749. }
  750. for (uint j = 0; j < k; j++)
  751. {
  752. new_Kernel(kernelMatrix.rows()+j,kernelMatrix.cols()+j) = 1.0;
  753. }
  754. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  755. kernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
  756. kernelMatrix = new_Kernel;
  757. new_Kernel.setBlock(0, 0, inverseKernelMatrix);
  758. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  759. inverseKernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
  760. inverseKernelMatrix = new_Kernel;
  761. new_Kernel.setBlock(0, 0, choleskyMatrix);
  762. //NOTE Maybe it would be more efficient to work directly with pointers to the memory
  763. choleskyMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
  764. choleskyMatrix = new_Kernel;
  765. }