123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317 |
- /**
- * @file MAPMultinomialGaussianPrior.cpp
- * @brief MAP estimation of multinomial with gaussian prior
- * @author Erik Rodner
- * @date 05/26/2008
- */
- #include <iostream>
- #include <limits>
- #include "vislearning/optimization/mapestimation/MAPMultinomialGaussianPrior.h"
- using namespace OBJREC;
- using namespace std;
- // refactor-nice.pl: check this substitution
- // old: using namespace ice;
- using namespace NICE;
- MAPMultinomialGaussianPrior::MAPMultinomialGaussianPrior()
- {
- }
- MAPMultinomialGaussianPrior::~MAPMultinomialGaussianPrior()
- {
- }
- #undef DEBUG_OPTIMIZATION
- #undef DEBUG_ANALYZEOPTIMIZATION
- double MAPMultinomialGaussianPrior::objective (
- // refactor-nice.pl: check this substitution
- // old: const Vector & theta,
- const NICE::Vector & theta,
- // refactor-nice.pl: check this substitution
- // old: const Vector & counts,
- const NICE::Vector & counts,
- // refactor-nice.pl: check this substitution
- // old: const Vector & mu,
- const NICE::Vector & mu,
- double sigmaq )
- {
- // calculate - log
- double sum_likelihood = 0.0;
- for ( size_t i = 0 ; i < counts.size() ; i++ )
- {
- double ti = (theta[i] < 10e-7) ? 10e-7 : theta[i];
- sum_likelihood += counts[i] * log( ti );
- }
-
- double sum_prior = 0.0;
- for ( size_t i = 0 ; i < counts.size() ; i++ )
- {
- double d = (theta[i] - mu[i]);
- sum_prior -= d*d*0.5;
- }
-
- sum_prior /= sigmaq;
- #ifdef DEBUG_OPTIMIZATION
- fprintf (stderr, "log likelihood : %f\n", sum_likelihood );
- fprintf (stderr, "log prior : %f\n", sum_prior );
- #endif
- return sum_likelihood + sum_prior;
- }
- // refactor-nice.pl: check this substitution
- // old: void MAPMultinomialGaussianPrior::normalizeProb ( Vector & x )
- void MAPMultinomialGaussianPrior::normalizeProb ( NICE::Vector & x )
- {
- double sum = 0.0;
- for ( size_t i = 0 ; i < x.size() ; i++ )
- sum += x[i];
- for ( size_t i = 0 ; i < x.size() ; i++ )
- x[i] /= sum;
- }
- double MAPMultinomialGaussianPrior::estimateLambdaNonLinear (
- // refactor-nice.pl: check this substitution
- // old: const Vector & counts,
- const NICE::Vector & counts,
- // refactor-nice.pl: check this substitution
- // old: const Vector & mu,
- const NICE::Vector & mu,
- double sigmaq )
- {
- // calculate lagrange multiplier
- const int maxIterations = 10000;
- const double eps = 10e-9;
- double count = 0;
- for ( size_t i = 0 ; i < counts.size() ; i++ )
- count += counts[i];
- double lambda = - count;
- int m = mu.size();
- int iterations = 0;
- double diff = 0.0;
-
- do {
- // calculate f(\lambda) = 0.5 m \lambda \sigma^2 + \sum_l \sqrt{ g_l(\lambda) } - 0.5
- double f = 0.5 * m * lambda * sigmaq - 0.5;
- double fabl = 0.5 * m * sigmaq;
- for ( size_t i = 0 ; i < mu.size() ; i++ )
- {
- double phalf = 0.5 * (mu[i] + lambda * sigmaq);
- double undersqrt = phalf*phalf + sigmaq * counts[i];
- assert ( undersqrt >= 0.0 );
-
- // undersqrt = g_l(\lambda)
- double gabl = (mu[i] + lambda * sigmaq) * sigmaq;
- fabl -= gabl / sqrt(undersqrt);
- f += sqrt(undersqrt);
- }
- double lambdanew = lambda - f / fabl;
- diff = fabs(f / fabl);
- #ifdef DEBUG_OPTIMIZATION
- fprintf (stderr, "(%d) %f %f\n", iterations, lambdanew, diff );
- #endif
- lambda = lambdanew;
- #ifdef DEBUG_ANALYZEOPTIMIZATION
- // refactor-nice.pl: check this substitution
- // old: Vector theta (m);
- NICE::Vector theta (m);
- // calculate theta estimation
- double sum = 0.0;
- for ( size_t i = 0 ; i < mu.size() ; i++ )
- {
- double phalf = 0.5 * (mu[i] + lambda*sigmaq);
- theta[i] = phalf + sqrt( phalf*phalf + sigmaq * counts[i] );
- assert ( theta[i] >= 0.0 );
- sum += theta[i];
- }
- fprintf (stderr, "OPT %f %d %f\n", sigmaq, iterations,
- objective ( theta, counts, mu, sigmaq ) );
- #endif
- iterations++;
- } while ( (iterations < maxIterations) && (diff > eps) );
- fprintf (stderr, "MAPMultinomialGaussianPrior: optimization finished (%d/%d iterations, %f last delta)\n",
- iterations, maxIterations, diff );
- fprintf (stderr, "MAPMultinomialGaussianPrior: final lambda: %f\n", lambda );
- return lambda;
- }
- double MAPMultinomialGaussianPrior::estimateLambda (
- // refactor-nice.pl: check this substitution
- // old: const Vector & counts,
- const NICE::Vector & counts,
- // refactor-nice.pl: check this substitution
- // old: const Vector & mu,
- const NICE::Vector & mu,
- double sigmaq )
- {
- // calculate lagrange multiplier
- const int maxIterations = 10000;
- //const double eps = 10e-5;
- const double eps = 10e-9;
- int m = mu.size();
- double count = 0;
- for ( size_t i = 0 ; i < counts.size() ; i++ )
- count += counts[i];
- // using iterative formula, hopefully this converges
- double lambda = - count;
- double diff;
- int iterations = 0;
- do {
- // calculate evil sqrt sum
- double sqrsum = 0.0;
- for ( size_t i = 0 ; i < mu.size() ; i++ )
- {
- double phalf = 0.5 * (mu[i] + lambda * sigmaq);
- double undersqrt = phalf*phalf + sigmaq * counts[i];
- assert ( undersqrt >= 0.0 );
- sqrsum += sqrt( undersqrt );
- }
- double lambdanew = - (sqrsum - 0.5) * 2 / (sigmaq * m);
- diff = fabs(lambda-lambdanew);
- #ifdef DEBUG_OPTIMIZATION
- fprintf (stderr, "(%d) %f %f\n", iterations, lambdanew, diff );
- #endif
- lambda = lambdanew;
- #ifdef DEBUG_ANALYZEOPTIMIZATION
- // refactor-nice.pl: check this substitution
- // old: Vector theta (m);
- NICE::Vector theta (m);
- // calculate theta estimation
- double sum = 0.0;
- for ( size_t i = 0 ; i < mu.size() ; i++ )
- {
- double phalf = 0.5 * (mu[i] + lambda*sigmaq);
- theta[i] = phalf + sqrt( phalf*phalf + sigmaq * counts[i] );
- assert ( theta[i] >= 0.0 );
- sum += theta[i];
- }
- fprintf (stderr, "OPT %f %d %f\n", sigmaq, iterations,
- objective ( theta, counts, mu, sigmaq ) );
- #endif
- iterations++;
- } while ( (iterations < maxIterations) && (diff > eps) );
- fprintf (stderr, "MAPMultinomialGaussianPrior: optimization finished (%d/%d iterations, %f last delta)\n",
- iterations, maxIterations, diff );
- fprintf (stderr, "MAPMultinomialGaussianPrior: final lambda: %f\n", lambda );
- return lambda;
- }
- void MAPMultinomialGaussianPrior::estimate (
- // refactor-nice.pl: check this substitution
- // old: Vector & theta,
- NICE::Vector & theta,
- // refactor-nice.pl: check this substitution
- // old: const Vector & counts,
- const NICE::Vector & counts,
- // refactor-nice.pl: check this substitution
- // old: const Vector & mu,
- const NICE::Vector & mu,
- double sigmaq )
- {
- theta = Vector(counts);
- normalizeProb ( theta );
- #ifdef DEBUG_OPTIMIZATION
- fprintf (stderr, "objective (non informative) : %f\n",
- objective ( theta, counts, mu, sigmaq ) );
- #endif
- double lambda = 1.0;
- if ( sigmaq < std::numeric_limits<double>::epsilon() )
- {
- fprintf (stderr, "MAPMultinomialGaussianPrior: skipping lambda optimization (sigmaq = 0.0)\n");
- } else {
- //lambda = estimateLambdaNonLinear ( counts, mu, sigmaq );
- lambda = estimateLambda ( counts, mu, sigmaq );
- }
- // calculate theta estimation
- double sum = 0.0;
- for ( size_t i = 0 ; i < mu.size() ; i++ )
- {
- double phalf = 0.5 * (mu[i] + lambda*sigmaq);
- theta[i] = phalf + sqrt( phalf*phalf + sigmaq * counts[i] );
- assert ( theta[i] >= 0.0 );
- sum += theta[i];
- }
- #ifdef DEBUG_OPTIMIZATION
- fprintf (stderr, "objective (informative prior) : %f\n",
- objective ( theta, counts, mu, sigmaq ) );
- fprintf (stderr, "Sum over theta = %f\n", sum );
- #endif
- }
- // refactor-nice.pl: check this substitution
- // old: void MAPMultinomialGaussianPrior::estimate ( Vector & mapEstimate,
- void MAPMultinomialGaussianPrior::estimate ( NICE::Vector & mapEstimate,
- const VVector & likelihoodDistributionSamples,
- const VVector & priorDistributionSamples,
- double sigmaq )
- {
- assert ( likelihoodDistributionSamples.size() == 1 );
- // refactor-nice.pl: check this substitution
- // old: const Vector & mlEstimate = likelihoodDistributionSamples[0];
- const NICE::Vector & mlEstimate = likelihoodDistributionSamples[0];
- // refactor-nice.pl: check this substitution
- // old: Vector mu;
- NICE::Vector mu;
- for ( VVector::const_iterator i = priorDistributionSamples.begin();
- i != priorDistributionSamples.end(); i++ )
- {
- // refactor-nice.pl: check this substitution
- // old: const Vector & x = *i;
- const NICE::Vector & x = *i;
- if ( mu.size() == 0 ) mu = x;
- else mu = mu + x;
- }
- mu = mu * (1.0/priorDistributionSamples.size());
- mapEstimate.resize(mlEstimate.size());
- mapEstimate.set(0);
-
- estimate ( mapEstimate, mlEstimate, mu, sigmaq );
-
- double sum = 0.0;
- for ( int i = 0 ; i < (int)mapEstimate.size() ; i++ )
- sum += mapEstimate[i];
- assert ( fabs(sum-1) < 10e-4 );
- }
|