TraceApproximation.cpp 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /**
  2. * @file TraceApproximation.cpp
  3. * @brief approximation of the trace term within the gp regression gradient
  4. * @author Erik Rodner
  5. * @date 01/29/2010
  6. */
  7. #include <iostream>
  8. #include "TraceApproximation.h"
  9. using namespace std;
  10. using namespace NICE;
  11. using namespace OBJREC;
  12. TraceApproximation::TraceApproximation( const Config *conf, const string & section )
  13. {
  14. numTraceSamples = conf->gI(section, "num_trace_samples", 100 );
  15. displayPoints = conf->gI(section, "display_points", 100 );
  16. if ( displayPoints > numTraceSamples )
  17. displayPoints = numTraceSamples;
  18. }
  19. TraceApproximation::~TraceApproximation()
  20. {
  21. }
  22. void TraceApproximation::preSampling ( const KernelData *kernelData, Matrix & samples, Matrix & samplesPreMultiplied ) const
  23. {
  24. if ( numTraceSamples == 0 ) return;
  25. uint kernelSize = kernelData->getKernelMatrixSize();
  26. samples.resize ( kernelSize, numTraceSamples );
  27. samplesPreMultiplied.resize ( kernelSize, numTraceSamples );
  28. cerr << "TraceApproximation: number of samples " << numTraceSamples << endl;
  29. cerr << "TraceApproximation: pre-sampling " << endl;
  30. for ( uint i = 0 ; i < kernelSize ; i++ )
  31. for ( uint k = 0 ; (int)k < numTraceSamples ; k++ )
  32. samples(i,k) = randGaussDouble ( 1.0 );
  33. cerr << "TraceApproximation: pre-multiplication ";
  34. for ( uint k = 0 ; (int)k < numTraceSamples; k++ )
  35. {
  36. if ( k % (numTraceSamples/displayPoints) == 0 )
  37. {
  38. cerr << ".";
  39. cerr.flush();
  40. }
  41. Vector rinv = samplesPreMultiplied.getColumnRef ( k );
  42. kernelData->computeInverseKernelMultiply ( samples.getColumn(k), rinv );
  43. }
  44. cerr << endl;
  45. }
  46. double TraceApproximation::getApproximateTraceTerm ( const KernelData *kernelData, const Matrix & jacobiMatrix ) const
  47. {
  48. if ( numTraceSamples == 0 ) return 0.0;
  49. Vector r ( jacobiMatrix.rows() );
  50. Vector rinv;
  51. Vector rJacobi;
  52. double trace_approx = 0.0;
  53. double trace_approx_old = 0.0;
  54. double stddev = 0.0;
  55. cerr << "TraceApproximation: number of samples " << numTraceSamples << endl;
  56. cerr << "TraceApproximation: sampling ";
  57. for ( uint k = 0 ; (int)k < numTraceSamples; k++ )
  58. {
  59. if ( k % (numTraceSamples/displayPoints) == 0 )
  60. {
  61. cerr << ".";
  62. cerr.flush();
  63. }
  64. // normality is not required ? (says wikipedia)
  65. // but seems to yield "better results"
  66. r = Vector::GaussRandom ( r.size(), 0.0, 1.0 );
  67. kernelData->computeInverseKernelMultiply ( r, rinv );
  68. rJacobi = jacobiMatrix * r;
  69. rJacobi = r;
  70. double tau = rinv.scalarProduct ( rJacobi );
  71. // idea of RunningStat
  72. trace_approx = trace_approx + (tau - trace_approx) / (k+1);
  73. stddev += ( tau - trace_approx_old ) * ( tau - trace_approx );
  74. trace_approx_old = trace_approx;
  75. }
  76. cerr << endl;
  77. stddev /= (numTraceSamples-1);
  78. stddev = sqrt(stddev);
  79. double standard_error = stddev / sqrt((double)numTraceSamples);
  80. double rel_standard_error = standard_error / fabs(trace_approx);
  81. cerr << "TraceApproximation: relative error " << rel_standard_error << endl;
  82. return trace_approx;
  83. }
  84. double TraceApproximation::getApproximateTraceTermKronecker ( const KernelData *kernelData, const Matrix & A, const Matrix & B ) const
  85. {
  86. if ( numTraceSamples == 0 ) return 0.0;
  87. Vector r ( A.rows() * B.rows() );
  88. Vector rinv;
  89. Vector rJacobi;
  90. double trace_approx = 0.0;
  91. double trace_approx_old = 0.0;
  92. double stddev = 0.0;
  93. cerr << "TraceApproximation: number of samples " << numTraceSamples << endl;
  94. cerr << "TraceApproximation: sampling ";
  95. for ( uint k = 0 ; (int)k < numTraceSamples; k++ )
  96. {
  97. if ( k % (numTraceSamples/displayPoints) == 0 )
  98. {
  99. cerr << ".";
  100. cerr.flush();
  101. }
  102. // normality is not required ? (says wikipedia)
  103. // but seems to yield "better results"
  104. r = Vector::GaussRandom ( r.size(), 0.0, 1.0 );
  105. kernelData->computeInverseKernelMultiply ( r, rinv );
  106. // this is the only difference -----------------------
  107. kroneckerProductMultiplication ( A, B, r, rJacobi );
  108. double tau = rinv.scalarProduct ( rJacobi );
  109. // idea of RunningStat
  110. trace_approx = trace_approx + (tau - trace_approx) / (k+1);
  111. stddev += ( tau - trace_approx_old ) * ( tau - trace_approx );
  112. trace_approx_old = trace_approx;
  113. }
  114. cerr << endl;
  115. stddev /= (numTraceSamples-1);
  116. stddev = sqrt(stddev);
  117. double standard_error = stddev / sqrt((double)numTraceSamples);
  118. double rel_standard_error = standard_error / fabs(trace_approx);
  119. cerr << "TraceApproximation: relative error " << rel_standard_error << endl;
  120. return trace_approx;
  121. }
  122. double TraceApproximation::getApproxTraceTermKroneckerPre ( Matrix & samples, Matrix & samplesPreMultiplied, const Matrix & A, const Matrix & B ) const
  123. {
  124. if ( numTraceSamples == 0 ) return 0.0;
  125. double trace_approx = 0.0;
  126. double trace_approx_old = 0.0;
  127. double stddev = 0.0;
  128. Vector rJacobi;
  129. cerr << "TraceApproximation: estimating ";
  130. for ( uint k = 0 ; (int)k < numTraceSamples; k++ )
  131. {
  132. if ( k % (numTraceSamples/displayPoints) == 0 )
  133. {
  134. cerr << ".";
  135. cerr.flush();
  136. }
  137. // this is the only difference -----------------------
  138. Vector r = samples.getColumnRef(k);
  139. Vector rinv = samplesPreMultiplied.getColumnRef(k);
  140. kroneckerProductMultiplication ( A, B, r, rJacobi );
  141. double tau = rinv.scalarProduct ( rJacobi );
  142. // idea of RunningStat
  143. trace_approx = trace_approx + (tau - trace_approx) / (k+1);
  144. stddev += ( tau - trace_approx_old ) * ( tau - trace_approx );
  145. trace_approx_old = trace_approx;
  146. }
  147. cerr << endl;
  148. stddev /= (numTraceSamples-1);
  149. stddev = sqrt(stddev);
  150. double standard_error = stddev / sqrt( (double)numTraceSamples);
  151. double rel_standard_error = standard_error / fabs(trace_approx);
  152. cerr << "TraceApproximation: relative error " << rel_standard_error << endl;
  153. return trace_approx;
  154. }