MAPMultinomialGaussianPrior.cpp 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /**
  2. * @file MAPMultinomialGaussianPrior.cpp
  3. * @brief MAP estimation of multinomial with gaussian prior
  4. * @author Erik Rodner
  5. * @date 05/26/2008
  6. */
  7. #include <iostream>
  8. #include <limits>
  9. #include "vislearning/optimization/mapestimation/MAPMultinomialGaussianPrior.h"
  10. using namespace OBJREC;
  11. using namespace std;
  12. // refactor-nice.pl: check this substitution
  13. // old: using namespace ice;
  14. using namespace NICE;
  15. MAPMultinomialGaussianPrior::MAPMultinomialGaussianPrior()
  16. {
  17. }
  18. MAPMultinomialGaussianPrior::~MAPMultinomialGaussianPrior()
  19. {
  20. }
  21. #undef DEBUG_OPTIMIZATION
  22. #undef DEBUG_ANALYZEOPTIMIZATION
  23. double MAPMultinomialGaussianPrior::objective (
  24. // refactor-nice.pl: check this substitution
  25. // old: const Vector & theta,
  26. const NICE::Vector & theta,
  27. // refactor-nice.pl: check this substitution
  28. // old: const Vector & counts,
  29. const NICE::Vector & counts,
  30. // refactor-nice.pl: check this substitution
  31. // old: const Vector & mu,
  32. const NICE::Vector & mu,
  33. double sigmaq )
  34. {
  35. // calculate - log
  36. double sum_likelihood = 0.0;
  37. for ( size_t i = 0 ; i < counts.size() ; i++ )
  38. {
  39. double ti = (theta[i] < 10e-7) ? 10e-7 : theta[i];
  40. sum_likelihood += counts[i] * log( ti );
  41. }
  42. double sum_prior = 0.0;
  43. for ( size_t i = 0 ; i < counts.size() ; i++ )
  44. {
  45. double d = (theta[i] - mu[i]);
  46. sum_prior -= d*d*0.5;
  47. }
  48. sum_prior /= sigmaq;
  49. #ifdef DEBUG_OPTIMIZATION
  50. fprintf (stderr, "log likelihood : %f\n", sum_likelihood );
  51. fprintf (stderr, "log prior : %f\n", sum_prior );
  52. #endif
  53. return sum_likelihood + sum_prior;
  54. }
  55. // refactor-nice.pl: check this substitution
  56. // old: void MAPMultinomialGaussianPrior::normalizeProb ( Vector & x )
  57. void MAPMultinomialGaussianPrior::normalizeProb ( NICE::Vector & x )
  58. {
  59. double sum = 0.0;
  60. for ( size_t i = 0 ; i < x.size() ; i++ )
  61. sum += x[i];
  62. for ( size_t i = 0 ; i < x.size() ; i++ )
  63. x[i] /= sum;
  64. }
  65. double MAPMultinomialGaussianPrior::estimateLambdaNonLinear (
  66. // refactor-nice.pl: check this substitution
  67. // old: const Vector & counts,
  68. const NICE::Vector & counts,
  69. // refactor-nice.pl: check this substitution
  70. // old: const Vector & mu,
  71. const NICE::Vector & mu,
  72. double sigmaq )
  73. {
  74. // calculate lagrange multiplier
  75. const int maxIterations = 10000;
  76. const double eps = 10e-9;
  77. double count = 0;
  78. for ( size_t i = 0 ; i < counts.size() ; i++ )
  79. count += counts[i];
  80. double lambda = - count;
  81. int m = mu.size();
  82. int iterations = 0;
  83. double diff = 0.0;
  84. do {
  85. // calculate f(\lambda) = 0.5 m \lambda \sigma^2 + \sum_l \sqrt{ g_l(\lambda) } - 0.5
  86. double f = 0.5 * m * lambda * sigmaq - 0.5;
  87. double fabl = 0.5 * m * sigmaq;
  88. for ( size_t i = 0 ; i < mu.size() ; i++ )
  89. {
  90. double phalf = 0.5 * (mu[i] + lambda * sigmaq);
  91. double undersqrt = phalf*phalf + sigmaq * counts[i];
  92. assert ( undersqrt >= 0.0 );
  93. // undersqrt = g_l(\lambda)
  94. double gabl = (mu[i] + lambda * sigmaq) * sigmaq;
  95. fabl -= gabl / sqrt(undersqrt);
  96. f += sqrt(undersqrt);
  97. }
  98. double lambdanew = lambda - f / fabl;
  99. diff = fabs(f / fabl);
  100. #ifdef DEBUG_OPTIMIZATION
  101. fprintf (stderr, "(%d) %f %f\n", iterations, lambdanew, diff );
  102. #endif
  103. lambda = lambdanew;
  104. #ifdef DEBUG_ANALYZEOPTIMIZATION
  105. // refactor-nice.pl: check this substitution
  106. // old: Vector theta (m);
  107. NICE::Vector theta (m);
  108. // calculate theta estimation
  109. double sum = 0.0;
  110. for ( size_t i = 0 ; i < mu.size() ; i++ )
  111. {
  112. double phalf = 0.5 * (mu[i] + lambda*sigmaq);
  113. theta[i] = phalf + sqrt( phalf*phalf + sigmaq * counts[i] );
  114. assert ( theta[i] >= 0.0 );
  115. sum += theta[i];
  116. }
  117. fprintf (stderr, "OPT %f %d %f\n", sigmaq, iterations,
  118. objective ( theta, counts, mu, sigmaq ) );
  119. #endif
  120. iterations++;
  121. } while ( (iterations < maxIterations) && (diff > eps) );
  122. fprintf (stderr, "MAPMultinomialGaussianPrior: optimization finished (%d/%d iterations, %f last delta)\n",
  123. iterations, maxIterations, diff );
  124. fprintf (stderr, "MAPMultinomialGaussianPrior: final lambda: %f\n", lambda );
  125. return lambda;
  126. }
  127. double MAPMultinomialGaussianPrior::estimateLambda (
  128. // refactor-nice.pl: check this substitution
  129. // old: const Vector & counts,
  130. const NICE::Vector & counts,
  131. // refactor-nice.pl: check this substitution
  132. // old: const Vector & mu,
  133. const NICE::Vector & mu,
  134. double sigmaq )
  135. {
  136. // calculate lagrange multiplier
  137. const int maxIterations = 10000;
  138. //const double eps = 10e-5;
  139. const double eps = 10e-9;
  140. int m = mu.size();
  141. double count = 0;
  142. for ( size_t i = 0 ; i < counts.size() ; i++ )
  143. count += counts[i];
  144. // using iterative formula, hopefully this converges
  145. double lambda = - count;
  146. double diff;
  147. int iterations = 0;
  148. do {
  149. // calculate evil sqrt sum
  150. double sqrsum = 0.0;
  151. for ( size_t i = 0 ; i < mu.size() ; i++ )
  152. {
  153. double phalf = 0.5 * (mu[i] + lambda * sigmaq);
  154. double undersqrt = phalf*phalf + sigmaq * counts[i];
  155. assert ( undersqrt >= 0.0 );
  156. sqrsum += sqrt( undersqrt );
  157. }
  158. double lambdanew = - (sqrsum - 0.5) * 2 / (sigmaq * m);
  159. diff = fabs(lambda-lambdanew);
  160. #ifdef DEBUG_OPTIMIZATION
  161. fprintf (stderr, "(%d) %f %f\n", iterations, lambdanew, diff );
  162. #endif
  163. lambda = lambdanew;
  164. #ifdef DEBUG_ANALYZEOPTIMIZATION
  165. // refactor-nice.pl: check this substitution
  166. // old: Vector theta (m);
  167. NICE::Vector theta (m);
  168. // calculate theta estimation
  169. double sum = 0.0;
  170. for ( size_t i = 0 ; i < mu.size() ; i++ )
  171. {
  172. double phalf = 0.5 * (mu[i] + lambda*sigmaq);
  173. theta[i] = phalf + sqrt( phalf*phalf + sigmaq * counts[i] );
  174. assert ( theta[i] >= 0.0 );
  175. sum += theta[i];
  176. }
  177. fprintf (stderr, "OPT %f %d %f\n", sigmaq, iterations,
  178. objective ( theta, counts, mu, sigmaq ) );
  179. #endif
  180. iterations++;
  181. } while ( (iterations < maxIterations) && (diff > eps) );
  182. fprintf (stderr, "MAPMultinomialGaussianPrior: optimization finished (%d/%d iterations, %f last delta)\n",
  183. iterations, maxIterations, diff );
  184. fprintf (stderr, "MAPMultinomialGaussianPrior: final lambda: %f\n", lambda );
  185. return lambda;
  186. }
  187. void MAPMultinomialGaussianPrior::estimate (
  188. // refactor-nice.pl: check this substitution
  189. // old: Vector & theta,
  190. NICE::Vector & theta,
  191. // refactor-nice.pl: check this substitution
  192. // old: const Vector & counts,
  193. const NICE::Vector & counts,
  194. // refactor-nice.pl: check this substitution
  195. // old: const Vector & mu,
  196. const NICE::Vector & mu,
  197. double sigmaq )
  198. {
  199. theta = Vector(counts);
  200. normalizeProb ( theta );
  201. #ifdef DEBUG_OPTIMIZATION
  202. fprintf (stderr, "objective (non informative) : %f\n",
  203. objective ( theta, counts, mu, sigmaq ) );
  204. #endif
  205. double lambda = 1.0;
  206. if ( sigmaq < std::numeric_limits<double>::epsilon() )
  207. {
  208. fprintf (stderr, "MAPMultinomialGaussianPrior: skipping lambda optimization (sigmaq = 0.0)\n");
  209. } else {
  210. //lambda = estimateLambdaNonLinear ( counts, mu, sigmaq );
  211. lambda = estimateLambda ( counts, mu, sigmaq );
  212. }
  213. // calculate theta estimation
  214. double sum = 0.0;
  215. for ( size_t i = 0 ; i < mu.size() ; i++ )
  216. {
  217. double phalf = 0.5 * (mu[i] + lambda*sigmaq);
  218. theta[i] = phalf + sqrt( phalf*phalf + sigmaq * counts[i] );
  219. assert ( theta[i] >= 0.0 );
  220. sum += theta[i];
  221. }
  222. #ifdef DEBUG_OPTIMIZATION
  223. fprintf (stderr, "objective (informative prior) : %f\n",
  224. objective ( theta, counts, mu, sigmaq ) );
  225. fprintf (stderr, "Sum over theta = %f\n", sum );
  226. #endif
  227. }
  228. // refactor-nice.pl: check this substitution
  229. // old: void MAPMultinomialGaussianPrior::estimate ( Vector & mapEstimate,
  230. void MAPMultinomialGaussianPrior::estimate ( NICE::Vector & mapEstimate,
  231. const VVector & likelihoodDistributionSamples,
  232. const VVector & priorDistributionSamples,
  233. double sigmaq )
  234. {
  235. assert ( likelihoodDistributionSamples.size() == 1 );
  236. // refactor-nice.pl: check this substitution
  237. // old: const Vector & mlEstimate = likelihoodDistributionSamples[0];
  238. const NICE::Vector & mlEstimate = likelihoodDistributionSamples[0];
  239. // refactor-nice.pl: check this substitution
  240. // old: Vector mu;
  241. NICE::Vector mu;
  242. for ( VVector::const_iterator i = priorDistributionSamples.begin();
  243. i != priorDistributionSamples.end(); i++ )
  244. {
  245. // refactor-nice.pl: check this substitution
  246. // old: const Vector & x = *i;
  247. const NICE::Vector & x = *i;
  248. if ( mu.size() == 0 ) mu = x;
  249. else mu = mu + x;
  250. }
  251. mu = mu * (1.0/priorDistributionSamples.size());
  252. mapEstimate.resize(mlEstimate.size());
  253. mapEstimate.set(0);
  254. estimate ( mapEstimate, mlEstimate, mu, sigmaq );
  255. double sum = 0.0;
  256. for ( int i = 0 ; i < (int)mapEstimate.size() ; i++ )
  257. sum += mapEstimate[i];
  258. assert ( fabs(sum-1) < 10e-4 );
  259. }