فهرست منبع

minor changes in libdepend

bodesheim 12 سال پیش
والد
کامیت
ece569fc48

+ 22 - 0
cbaselib/LabeledSet.cpp

@@ -468,4 +468,26 @@ void LabeledSetVector::getFlatRepresentation ( VVector & vecSet, NICE::Vector &
 
 }
 
+void LabeledSetVector::removePointersToDataWithoutDeletion()
+{
+  //remove pointers in the order-struct if needed
+  if ( ! this->selection ) {
+    for ( Permutation::iterator i  = this->insertOrder.begin();
+          i != this->insertOrder.end();
+          i++ )
+    {
+      i->second = NULL;
+    }
+  }
+  
+  //remove pointers in normal map
+  for ( std::map< int, std::vector<NICE::Vector *> >::iterator iter = this->begin(); iter != this->end(); ++iter )
+  {
+    for ( int j = 0; j < (int)iter->second.size(); j++ )
+    {
+      iter->second[j] = NULL;
+    }
+  }  
+}
+
 #endif

+ 5 - 0
cbaselib/LabeledSet.h

@@ -165,6 +165,11 @@ class LabeledSetVector :
      * @param vecSetLabels labels (output)
      */
     void getFlatRepresentation ( NICE::VVector & vecSet, NICE::Vector & vecSetLabels ) const;
+    
+    /**
+     * @brief set all pointers to the data to NULL, i.e., keep the data in storage, but remove the pointers of this data struct
+     */
+    void removePointersToDataWithoutDeletion();
 
     friend class LabeledSetSelection<LabeledSetVector>;
 };

+ 6 - 0
classifier/classifierbase/FeaturePoolClassifier.cpp

@@ -78,3 +78,9 @@ void FeaturePoolClassifier::setComplexity ( int size )
     fprintf (stderr, "FeaturePoolClassifier::setComplexity: not yet implemented in subordinate class\n");
     exit(-1);
 }
+
+void FeaturePoolClassifier::addMultipleExamples( OBJREC::Examples & newExamples)
+{
+    fprintf (stderr, "FeaturePoolClassifier::addMultipleExamples: not yet implemented in subordinate class\n");
+    exit(-1);
+}

+ 3 - 0
classifier/classifierbase/FeaturePoolClassifier.h

@@ -50,6 +50,9 @@ class FeaturePoolClassifier : public NICE::Persistent
 
 	/** set complexity for the next training process e.g. number of weak classifiers */
 	virtual void setComplexity ( int size );
+  
+  /** add multiple examples given in the OBJREC::Examples data structure*/
+  virtual void addMultipleExamples( OBJREC::Examples & newExamples);
 
 };
 

+ 7 - 0
classifier/classifierbase/VecClassifier.h

@@ -15,6 +15,7 @@
 #include "vislearning/cbaselib/ClassificationResult.h"
 
 #define ROADWORKS fthrow(NICE::Exception, "clone(): not yet implemented!");
+#define ROADWORKSADD fthrow(NICE::Exception, "teach (int classno, const NICE::Vector & x ): not yet implemented!");
 
 namespace OBJREC
 {
@@ -56,6 +57,12 @@ class VecClassifier : public NICE::Persistent
     {
       ROADWORKS;
     };
+    
+    virtual void teach (int classno, const NICE::Vector & x )
+    {
+      ROADWORKSADD;
+    };
+
 };
 
 #undef ROADWORKS

+ 317 - 317
classifier/kernelclassifier/KCGPRegOneVsAll.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file KCGPRegOneVsAll.cpp
 * @brief One vs. All interface for kernel classifiers
 * @author Erik Rodner
@@ -32,182 +32,182 @@ using namespace std;
 using namespace OBJREC;
 
 KCGPRegOneVsAll::KCGPRegOneVsAll( const Config *conf, Kernel *kernelFunction, const string & section )
-	: KernelClassifier ( conf, kernelFunction )
+    : KernelClassifier ( conf, kernelFunction )
 {
-	this->maxClassNo = 0;
-	this->verbose = conf->gB( section, "verbose", false );
-	this->optimizeParameters = (kernelFunction == NULL) ? false : conf->gB( section, "optimize_parameters", true );
-	this->maxIterations = conf->gI( section, "optimization_maxiterations", 500 );
-	this->numSamplesCalibration = conf->gI( section, "calibrated_probabilities_numsamples", 2000 );
-	this->calibrateProbabilities = conf->gB( section, "calibrated_probabilities", false );
-	if ( verbose && this->calibrateProbabilities )
-		cerr << "KCGPRegOneVsAll: probability calibration is turned on, this can result in massive additional load!" << endl;
-
-	// we do joint optimization, therefore single classifiers (cloned from prototype)
-	// are not allowed to do optimization themselves
-	Config confNoOptimize ( *conf );
-	confNoOptimize.sB( section, "optimize_parameters", false );
-	this->prototype = new RegGaussianProcess ( &confNoOptimize, kernelFunction, section ); 
-
-	// Do not use this option, unless you know what you are doing!
-	// This function is just necessary for hyperparameter optimization with really large
-	// kernel matrices of a kronecker structure!
-	bool approximateTraceTerm = conf->gB(section, "approximate_trace_term", false);
-	if ( approximateTraceTerm ) 
-		traceApproximation = new TraceApproximation ( conf, section );
-	else
-		traceApproximation = NULL;
-
-	// select final hyperparameters according to leave-one-out
-	useLooParameters = conf->gB(section, "use_loo_parameters", false );
-	
-	// select the criterion
-	modelselcrit = NULL;
-	if ( useLooParameters ) {
-		string modelselcrit_s = conf->gS(section, "loo_crit", "loo_pred_prob" );
-		modelselcrit = GenericGPModelSelection::selectModelSel ( conf, modelselcrit_s );
-		cerr << "KCGPRegOneVsAll: using additional model selection criterion " << modelselcrit_s << endl;
-	}
-
-	// do we want to compute the uncertainty of the estimate ?
-	computeUncertainty = conf->gB(section, "compute_uncertainty", false );
+  this->maxClassNo = 0;
+  this->verbose = conf->gB( section, "verbose", false );
+  this->optimizeParameters = (kernelFunction == NULL) ? false : conf->gB( section, "optimize_parameters", true );
+  this->maxIterations = conf->gI( section, "optimization_maxiterations", 500 );
+  this->numSamplesCalibration = conf->gI( section, "calibrated_probabilities_numsamples", 2000 );
+  this->calibrateProbabilities = conf->gB( section, "calibrated_probabilities", false );
+  if ( verbose && this->calibrateProbabilities )
+    cerr << "KCGPRegOneVsAll: probability calibration is turned on, this can result in massive additional load!" << endl;
+
+  // we do joint optimization, therefore single classifiers (cloned from prototype)
+  // are not allowed to do optimization themselves
+  Config confNoOptimize ( *conf );
+  confNoOptimize.sB( section, "optimize_parameters", false );
+  this->prototype = new RegGaussianProcess ( &confNoOptimize, kernelFunction, section );
+
+  // Do not use this option, unless you know what you are doing!
+  // This function is just necessary for hyperparameter optimization with really large
+  // kernel matrices of a kronecker structure!
+  bool approximateTraceTerm = conf->gB(section, "approximate_trace_term", false);
+  if ( approximateTraceTerm )
+    traceApproximation = new TraceApproximation ( conf, section );
+  else
+    traceApproximation = NULL;
+
+  // select final hyperparameters according to leave-one-out
+  useLooParameters = conf->gB(section, "use_loo_parameters", false );
+
+  // select the criterion
+  modelselcrit = NULL;
+  if ( useLooParameters ) {
+    string modelselcrit_s = conf->gS(section, "loo_crit", "loo_pred_prob" );
+    modelselcrit = GenericGPModelSelection::selectModelSel ( conf, modelselcrit_s );
+    cerr << "KCGPRegOneVsAll: using additional model selection criterion " << modelselcrit_s << endl;
+  }
+
+  // do we want to compute the uncertainty of the estimate ?
+  computeUncertainty = conf->gB(section, "compute_uncertainty", false );
 }
 
 KCGPRegOneVsAll::KCGPRegOneVsAll( const KCGPRegOneVsAll &vcova ): KernelClassifier(vcova)
 {
-	prototype = vcova.prototype->clone();
-	optimizeParameters = vcova.optimizeParameters;
-	verbose = vcova.verbose;
-	maxIterations = vcova.maxIterations;
-	useLooParameters = vcova.useLooParameters;
-	computeUncertainty = vcova.computeUncertainty;
-	choleskyMatrix = vcova.choleskyMatrix;
-	calibrateProbabilities = vcova.calibrateProbabilities;
-	numSamplesCalibration = vcova.numSamplesCalibration;
-	
-	for(int i = 0; i < (int)vcova.classifiers.size(); i++)
-	{
-		classifiers.push_back(pair<int, RegGaussianProcess*>(vcova.classifiers[i].first,vcova.classifiers[i].second->clone()));
-	}
-
-	if ( vcova.traceApproximation == NULL )
-		traceApproximation = NULL;
-	else
-		traceApproximation = new TraceApproximation(*vcova.traceApproximation);
-
-	if ( vcova.modelselcrit == NULL )
-		modelselcrit = NULL;
-	else
-		modelselcrit = new GPMSCLooLikelihoodRegression(*vcova.modelselcrit);
+  prototype = vcova.prototype->clone();
+  optimizeParameters = vcova.optimizeParameters;
+  verbose = vcova.verbose;
+  maxIterations = vcova.maxIterations;
+  useLooParameters = vcova.useLooParameters;
+  computeUncertainty = vcova.computeUncertainty;
+  choleskyMatrix = vcova.choleskyMatrix;
+  calibrateProbabilities = vcova.calibrateProbabilities;
+  numSamplesCalibration = vcova.numSamplesCalibration;
+
+  for (int i = 0; i < (int)vcova.classifiers.size(); i++)
+  {
+    classifiers.push_back(pair<int, RegGaussianProcess*>(vcova.classifiers[i].first, vcova.classifiers[i].second->clone()));
+  }
+
+  if ( vcova.traceApproximation == NULL )
+    traceApproximation = NULL;
+  else
+    traceApproximation = new TraceApproximation(*vcova.traceApproximation);
+
+  if ( vcova.modelselcrit == NULL )
+    modelselcrit = NULL;
+  else
+    modelselcrit = new GPMSCLooLikelihoodRegression(*vcova.modelselcrit);
 }
 
 KCGPRegOneVsAll::~KCGPRegOneVsAll()
 {
-	if ( traceApproximation != NULL )
-		delete traceApproximation;
-	if ( modelselcrit != NULL )
-		delete modelselcrit;
+  if ( traceApproximation != NULL )
+    delete traceApproximation;
+  if ( modelselcrit != NULL )
+    delete modelselcrit;
 }
 
 void KCGPRegOneVsAll::teach ( KernelData *kernelData, const NICE::Vector & y )
 {
-	maxClassNo = (int)y.Max();
-
-	set<int> classesUsed;
-	for ( uint i = 0 ; i < y.size(); i++ )
-		classesUsed.insert ( (int)y[i] );
-	
-	classifiers.clear();
-
-	VVector ySet;
-	VVector ySetZeroMean;
-	for ( set<int>::const_iterator it = classesUsed.begin();
-		it != classesUsed.end(); it++)
-	{
-		int i = *it;
-
-		NICE::Vector ySub ( y.size() );
-		NICE::Vector ySubZeroMean ( y.size() );
-		for ( size_t j = 0 ; j < ySub.size() ; j++ )
-		{
-			ySub[j] = ((int)y[j] == i) ? 1 : 0;
-			ySubZeroMean[j] = ((int)y[j] == i) ? 1 : -1;
-		}
-		ySet.push_back ( ySub );
-		ySetZeroMean.push_back ( ySubZeroMean );
-	}
-
-	// Hyperparameter optimization
-	if ( optimizeParameters ) 
-	{
-		ParameterizedKernel *kernelPara = dynamic_cast< ParameterizedKernel * > ( kernelFunction );
-		if ( kernelPara == NULL ) {
-			fthrow(Exception, "KCGPRegOneVsAll: you have to specify a parameterized kernel !");
-		}
-		GPRegressionOptimizationProblem gpopt ( kernelData, ySetZeroMean, kernelPara, verbose, modelselcrit, traceApproximation );
-
-		// the trust region classifier is better for my large collection of one classification problem :)
-		// FirstOrderRasmussen optimizer;
-		FirstOrderTrustRegion optimizer;
-		optimizer.setMaxIterations ( maxIterations );
-		optimizer.setEpsilonG ( 0.01 );
-
-		cout << "KCGPRegOneVsAll: Hyperparameter optimization ..." << endl;
-		optimizer.optimizeFirst ( gpopt );
-		cout << "KCGPRegOneVsAll: Hyperparameter optimization ...done" << endl;
-	
-		if ( useLooParameters ) 
-		{
-			cerr << "KCGPRegOneVsAll: using best loo parameters" << endl;
-			gpopt.useLooParameters();
-		}
-
-		gpopt.update();
-
-		Vector parameters;
-		kernelPara->getParameters ( parameters );
-		cout << "KCGPRegOneVsAll: Optimization finished: " << parameters << endl << endl;
-	} else {
-		kernelData->updateCholeskyFactorization();
-	}
-	
-	//for binary problems
-	if(classesUsed.size() == 2 && false)
-	{
-		set<int>::const_iterator it = classesUsed.begin();
-		int classno = *it;
-		it++;
-		int classno2 = *it;
-		const Vector & ySub = ySet[0];
-		RegGaussianProcess *classifier;
-		classifier = prototype->clone();
-		if (verbose)
-			fprintf (stderr, "KCGPRegOneVsAll: training classifier class %d <-> %d\n", classno, classno2 );
-		classifier->teach ( kernelData, ySub );
-		classifiers.push_back ( pair<int, RegGaussianProcess*> (classno, classifier) );
-		classifiers.push_back ( pair<int, RegGaussianProcess*> (classno2, classifier) );
-	}
-	else
-	{
-		int i = 0;
-		for ( set<int>::const_iterator it = classesUsed.begin(); it != classesUsed.end(); it++,i++)
-		{
-			int classno = *it;
-			const Vector & ySub = ySet[i];
-			RegGaussianProcess *classifier;
-			classifier = prototype->clone();
-	
-			if (verbose)
-				fprintf (stderr, "KCGPRegOneVsAll: training classifier class %d <-> remainder\n", classno );
-	
-			classifier->teach ( kernelData, ySub );
-
-			classifiers.push_back ( pair<int, RegGaussianProcess*> (classno, classifier) );
-		}
-	}
-
-	if ( computeUncertainty || calibrateProbabilities )
-		choleskyMatrix = kernelData->getCholeskyMatrix();
+  maxClassNo = (int)y.Max();
+
+  set<int> classesUsed;
+  for ( uint i = 0 ; i < y.size(); i++ )
+    classesUsed.insert ( (int)y[i] );
+
+  classifiers.clear();
+
+  VVector ySet;
+  VVector ySetZeroMean;
+  for ( set<int>::const_iterator it = classesUsed.begin();
+        it != classesUsed.end(); it++)
+  {
+    int i = *it;
+
+    NICE::Vector ySub ( y.size() );
+    NICE::Vector ySubZeroMean ( y.size() );
+    for ( size_t j = 0 ; j < ySub.size() ; j++ )
+    {
+      ySub[j] = ((int)y[j] == i) ? 1 : 0;
+      ySubZeroMean[j] = ((int)y[j] == i) ? 1 : -1;
+    }
+    ySet.push_back ( ySub );
+    ySetZeroMean.push_back ( ySubZeroMean );
+  }
+
+  // Hyperparameter optimization
+  if ( optimizeParameters )
+  {
+    ParameterizedKernel *kernelPara = dynamic_cast< ParameterizedKernel * > ( kernelFunction );
+    if ( kernelPara == NULL ) {
+      fthrow(Exception, "KCGPRegOneVsAll: you have to specify a parameterized kernel !");
+    }
+    GPRegressionOptimizationProblem gpopt ( kernelData, ySetZeroMean, kernelPara, verbose, modelselcrit, traceApproximation );
+
+    // the trust region classifier is better for my large collection of one classification problem :)
+    // FirstOrderRasmussen optimizer;
+    FirstOrderTrustRegion optimizer;
+    optimizer.setMaxIterations ( maxIterations );
+    optimizer.setEpsilonG ( 0.01 );
+
+    cout << "KCGPRegOneVsAll: Hyperparameter optimization ..." << endl;
+    optimizer.optimizeFirst ( gpopt );
+    cout << "KCGPRegOneVsAll: Hyperparameter optimization ...done" << endl;
+
+    if ( useLooParameters )
+    {
+      cerr << "KCGPRegOneVsAll: using best loo parameters" << endl;
+      gpopt.useLooParameters();
+    }
+
+    gpopt.update();
+
+    Vector parameters;
+    kernelPara->getParameters ( parameters );
+    cout << "KCGPRegOneVsAll: Optimization finished: " << parameters << endl << endl;
+  } else {
+    kernelData->updateCholeskyFactorization();
+  }
+
+  //for binary problems
+  if (classesUsed.size() == 2 && false)
+  {
+    set<int>::const_iterator it = classesUsed.begin();
+    int classno = *it;
+    it++;
+    int classno2 = *it;
+    const Vector & ySub = ySet[0];
+    RegGaussianProcess *classifier;
+    classifier = prototype->clone();
+    if (verbose)
+      fprintf (stderr, "KCGPRegOneVsAll: training classifier class %d <-> %d\n", classno, classno2 );
+    classifier->teach ( kernelData, ySub );
+    classifiers.push_back ( pair<int, RegGaussianProcess*> (classno, classifier) );
+    classifiers.push_back ( pair<int, RegGaussianProcess*> (classno2, classifier) );
+  }
+  else
+  {
+    int i = 0;
+    for ( set<int>::const_iterator it = classesUsed.begin(); it != classesUsed.end(); it++, i++)
+    {
+      int classno = *it;
+      const Vector & ySubZeroMean = ySetZeroMean[i];
+      RegGaussianProcess *classifier;
+      classifier = prototype->clone();
+
+      if (verbose)
+        fprintf (stderr, "KCGPRegOneVsAll: training classifier class %d <-> remainder\n", classno );
+
+      classifier->teach ( kernelData, ySubZeroMean );
+
+      classifiers.push_back ( pair<int, RegGaussianProcess*> (classno, classifier) );
+    }
+  }
+
+  if ( computeUncertainty || calibrateProbabilities )
+    choleskyMatrix = kernelData->getCholeskyMatrix();
 
 }
 
@@ -218,180 +218,180 @@ void KCGPRegOneVsAll::teach ( KernelData *kernelData, const NICE::Vector & y )
  */
 void KCGPRegOneVsAll::teach ( KernelData *kernelData, const std::vector<double> & y )
 {
-	// FIXME: Do we really need this function? (erik)
-	Vector y_nice (y);
-	teach ( kernelData, y_nice );
+  // FIXME: Do we really need this function? (erik)
+  Vector y_nice (y);
+  teach ( kernelData, y_nice );
 }
 
 
 ClassificationResult KCGPRegOneVsAll::classifyKernel ( const NICE::Vector & kernelVector, double kernelSelf ) const
 {
-	if ( classifiers.size() <= 0 ) 
-		fthrow(Exception, "The classifier was not trained with training data (use teach(...))");
-
-	FullVector scores ( maxClassNo+1 );
-	scores.set(0);
-
-	//for binary problems
-	if(classifiers.size() == 2 && false)
-	{
-		int classno = classifiers[0].first;
-		int classno2 = classifiers[1].first;
-		RegGaussianProcess *classifier = classifiers[0].second;
-		double yEstimate = classifier->predictKernel(kernelVector, kernelSelf);
-		scores[classno] = yEstimate;
-		scores[classno2] = 0;//1-yEstimate;
-		//cout << "i: " << 0 << ": " << scores[classno] << endl;
-		//cout << "i: " << 1 << ": " << scores[classno2] << endl;
-	}
-	else
-	{
+  if ( classifiers.size() <= 0 )
+    fthrow(Exception, "The classifier was not trained with training data (use teach(...))");
+
+  FullVector scores ( maxClassNo + 1 );
+  scores.set(0);
+
+  //for binary problems
+  if (classifiers.size() == 2 && false)
+  {
+    int classno = classifiers[0].first;
+    int classno2 = classifiers[1].first;
+    RegGaussianProcess *classifier = classifiers[0].second;
+    double yEstimate = classifier->predictKernel(kernelVector, kernelSelf);
+    scores[classno] = yEstimate;
+    scores[classno2] = 0;//1-yEstimate;
+    //cout << "i: " << 0 << ": " << scores[classno] << endl;
+    //cout << "i: " << 1 << ": " << scores[classno2] << endl;
+  }
+  else
+  {
 #pragma omp parallel for
-		for ( int classifierIndex = 0 ; classifierIndex < (int)classifiers.size(); classifierIndex++ )
-		{
-			int classno = classifiers[(uint)classifierIndex].first;
-			RegGaussianProcess *classifier = classifiers[(uint)classifierIndex].second;
-			double yEstimate = classifier->predictKernel(kernelVector, kernelSelf);
-			scores[classno] += yEstimate;
-			//cout << "i: " << classifierIndex << ": " << scores[classno] << endl;
-		}
-	}
-
-	double uncertainty = 0.0;
-	if ( computeUncertainty || calibrateProbabilities )
-	{
-		Vector tmp;
-		choleskySolveLargeScale ( choleskyMatrix, kernelVector, tmp );
-
-		// tmp = K^{-1} k*
-		uncertainty = kernelSelf - kernelVector.scalarProduct ( tmp );
-		
-		/*if(uncertainty < 0.0)
-// 			uncertainty = 0.0;*/
-
-		if ( calibrateProbabilities )
-		{
-			/*********************************************************************
-			 * Calibration of probabilities is based on the following
-			 * idea: 
-			 *
-			 * The scores \mu_i (or scores[i]) and the uncertainty
-			 * r.uncertainty are the mean and the variance of the predictive
-			 * distribution p(y_i | ...). So actually we do not know the correct value for
-			 * y_i and therefore just taking the maximum score ignores this uncertainty 
-			 * completely! 
-			 * What we might want to have is the probability for each class k
-			 * p_k = p( k = argmax_i y_i )
-			 *
-			 * Calculating this probability for n>2 is intractable and we
-			 * have to do monte carlo estimation which is performed in the following code
-			 *
-			 * An alternative would be to approximate p_k with 
-			 * p( mu_k >= max_{i != k} y_i ) = prod_{i != k} F_i(mu_k) 
-			 * with F_i being the cumulative distribution of the normal distribution i
-			 *
-			 * Details: Erik Rodner, "Learning with Few Examples for Visual Recognition Problems", PhD thesis
-			 *
-			 ********************************************************************/
-
-			double stddev = sqrt(uncertainty);
-			FullVector calibratedScores ( maxClassNo + 1 );
-			calibratedScores.set(0.0);
-			for ( uint i = 0 ; i < numSamplesCalibration ; i++ )
-			{
-				uint cmaxClassno = 0;
-				double cmax = - std::numeric_limits<double>::max();
-				for ( int classifierIndex = 0 ; classifierIndex < (int)classifiers.size(); classifierIndex++ )
-				{
-					int classno = classifiers[(uint)classifierIndex].first;
-					double s = randGaussDouble ( stddev ) + scores[classno];
-					if ( s > cmax )
-					{
-						cmax = s;
-						cmaxClassno = classno;
-					}
-				}
-				calibratedScores[ cmaxClassno ]++;
-			}
-			calibratedScores.normalize();
-
-			// calibrating probabilities should not affect our hard
-			// decision for a specific class
-			if ( verbose ) {
-				if ( scores.maxElement() != calibratedScores.maxElement() )
-					cerr << "Calibration of probabilities affected the hard decision, you should increase calibrated_probabilities_numsamples !!" << endl;
-			}
-
-			scores = calibratedScores;
-		}
-	}
-
-	ClassificationResult r ( scores.maxElement(), scores );
-	r.uncertainty = uncertainty;
-
-	return r;
+    for ( int classifierIndex = 0 ; classifierIndex < (int)classifiers.size(); classifierIndex++ )
+    {
+      int classno = classifiers[(uint)classifierIndex].first;
+      RegGaussianProcess *classifier = classifiers[(uint)classifierIndex].second;
+      double yEstimate = classifier->predictKernel(kernelVector, kernelSelf);
+      scores[classno] += yEstimate;
+      //cout << "i: " << classifierIndex << ": " << scores[classno] << endl;
+    }
+  }
+
+  double uncertainty = 0.0;
+  if ( computeUncertainty || calibrateProbabilities )
+  {
+    Vector tmp;
+    choleskySolveLargeScale ( choleskyMatrix, kernelVector, tmp );
+
+    // tmp = K^{-1} k*
+    uncertainty = kernelSelf - kernelVector.scalarProduct ( tmp );
+
+    /*if(uncertainty < 0.0)
+    //    uncertainty = 0.0;*/
+
+    if ( calibrateProbabilities )
+    {
+      /*********************************************************************
+       * Calibration of probabilities is based on the following
+       * idea:
+       *
+       * The scores \mu_i (or scores[i]) and the uncertainty
+       * r.uncertainty are the mean and the variance of the predictive
+       * distribution p(y_i | ...). So actually we do not know the correct value for
+       * y_i and therefore just taking the maximum score ignores this uncertainty
+       * completely!
+       * What we might want to have is the probability for each class k
+       * p_k = p( k = argmax_i y_i )
+       *
+       * Calculating this probability for n>2 is intractable and we
+       * have to do monte carlo estimation which is performed in the following code
+       *
+       * An alternative would be to approximate p_k with
+       * p( mu_k >= max_{i != k} y_i ) = prod_{i != k} F_i(mu_k)
+       * with F_i being the cumulative distribution of the normal distribution i
+       *
+       * Details: Erik Rodner, "Learning with Few Examples for Visual Recognition Problems", PhD thesis
+       *
+       ********************************************************************/
+
+      double stddev = sqrt(uncertainty);
+      FullVector calibratedScores ( maxClassNo + 1 );
+      calibratedScores.set(0.0);
+      for ( uint i = 0 ; i < numSamplesCalibration ; i++ )
+      {
+        uint cmaxClassno = 0;
+        double cmax = - std::numeric_limits<double>::max();
+        for ( int classifierIndex = 0 ; classifierIndex < (int)classifiers.size(); classifierIndex++ )
+        {
+          int classno = classifiers[(uint)classifierIndex].first;
+          double s = randGaussDouble ( stddev ) + scores[classno];
+          if ( s > cmax )
+          {
+            cmax = s;
+            cmaxClassno = classno;
+          }
+        }
+        calibratedScores[ cmaxClassno ]++;
+      }
+      calibratedScores.normalize();
+
+      // calibrating probabilities should not affect our hard
+      // decision for a specific class
+      if ( verbose ) {
+        if ( scores.maxElement() != calibratedScores.maxElement() )
+          cerr << "Calibration of probabilities affected the hard decision, you should increase calibrated_probabilities_numsamples !!" << endl;
+      }
+
+      scores = calibratedScores;
+    }
+  }
+
+  ClassificationResult r ( scores.maxElement(), scores );
+  r.uncertainty = uncertainty;
+
+  return r;
 }
 
 KCGPRegOneVsAll* KCGPRegOneVsAll::clone(void) const
 {
-	KCGPRegOneVsAll *classifier = new KCGPRegOneVsAll( *this );
-	return classifier;
+  KCGPRegOneVsAll *classifier = new KCGPRegOneVsAll( *this );
+  return classifier;
 }
 
 void KCGPRegOneVsAll::clear()
 {
-	//nothing to clear
+  //nothing to clear
 }
 
 void KCGPRegOneVsAll::restore(std::istream& ifs, int type)
 {
-	ifs.precision (numeric_limits<double>::digits10 + 1);
-	ifs >> maxClassNo;
-	ifs >> computeUncertainty;
-	ifs >> calibrateProbabilities;
-
-	if(calibrateProbabilities || computeUncertainty)
-	{
-		ifs >> choleskyMatrix;
-	}
-
-	int size;
-	ifs >> size;
-
-	for(int i = 0; i < size; i++)
-	{
-		int tmp;
-		ifs >> tmp;
-		RegGaussianProcess *classifier;
-		classifier = prototype->clone();
-		classifier->restore(ifs);
-		classifiers.push_back ( pair<int, RegGaussianProcess*> (tmp, classifier) );
-	}
-
-	KernelClassifier::restore(ifs,type);
+  ifs.precision (numeric_limits<double>::digits10 + 1);
+  ifs >> maxClassNo;
+  ifs >> computeUncertainty;
+  ifs >> calibrateProbabilities;
+
+  if (calibrateProbabilities || computeUncertainty)
+  {
+    ifs >> choleskyMatrix;
+  }
+
+  int size;
+  ifs >> size;
+
+  for (int i = 0; i < size; i++)
+  {
+    int tmp;
+    ifs >> tmp;
+    RegGaussianProcess *classifier;
+    classifier = prototype->clone();
+    classifier->restore(ifs);
+    classifiers.push_back ( pair<int, RegGaussianProcess*> (tmp, classifier) );
+  }
+
+  KernelClassifier::restore(ifs, type);
 }
 
 void KCGPRegOneVsAll::store(std::ostream& ofs, int type) const
 {
-	ofs.precision (numeric_limits<double>::digits10 + 1);
-	
-	ofs << maxClassNo << endl;
-	ofs << computeUncertainty << endl;
-	ofs << calibrateProbabilities << endl;
-
-	
-	if(calibrateProbabilities || computeUncertainty)
-	{
-		ofs << choleskyMatrix << endl;
-	}
-	
-	ofs << classifiers.size() << endl;
-	for(uint i = 0; i < classifiers.size(); i++)
-	{
-		ofs << classifiers[i].first << endl;
-		classifiers[i].second->store(ofs);
-		ofs << endl;
-	}
-	KernelClassifier::store(ofs,type);
+  ofs.precision (numeric_limits<double>::digits10 + 1);
+
+  ofs << maxClassNo << endl;
+  ofs << computeUncertainty << endl;
+  ofs << calibrateProbabilities << endl;
+
+
+  if (calibrateProbabilities || computeUncertainty)
+  {
+    ofs << choleskyMatrix << endl;
+  }
+
+  ofs << classifiers.size() << endl;
+  for (uint i = 0; i < classifiers.size(); i++)
+  {
+    ofs << classifiers[i].first << endl;
+    classifiers[i].second->store(ofs);
+    ofs << endl;
+  }
+  KernelClassifier::store(ofs, type);
 }
 

+ 2 - 2
classifier/kernelclassifier/progs/libdepend.inc

@@ -5,5 +5,5 @@ $(call PKG_DEPEND_INT,vislearning/baselib)
 $(call PKG_DEPEND_INT,vislearning/cbaselib)
 $(call PKG_DEPEND_INT,vislearning/classifier/kernelclassifier)
 $(call PKG_DEPEND_INT,vislearning/matlabAccessHighLevel)
-
-
+$(call PKG_DEPEND_EXT,MATIO)
+$(call PKG_DEPEND_EXT,HDF5)

+ 72 - 49
classifier/vclassifier/VCNearestNeighbour.cpp

@@ -57,27 +57,27 @@ ClassificationResult VCNearestNeighbour::classify ( const NICE::Vector & x ) con
     priority_queue< pair<double, int> > examples;
     LOOP_ALL(teachSet) 
     {
-	EACH(classno,y)
-
-	double distance = distancefunc->calculate ( x, y );
-
-	if ( isnan(distance) )
-	{
-	    fprintf (stderr, "VCNearestNeighbour::classify: NAN value found !!\n");
-	    cerr << x << endl;
-	    cerr << y << endl;
-	}
-
-	if ( mindists[classno] > distance )
-	    mindists[classno] = distance;
-
-	if ( mindist > distance )
-	{
-	    minclass = classno;
-	    mindist  = distance;
-	}
-	if ( K > 1 ) 
-	    examples.push ( pair<double, int> ( -distance, classno ) );
+      EACH(classno,y)
+
+      double distance = distancefunc->calculate ( x, y );
+
+      if ( isnan(distance) )
+      {
+          fprintf (stderr, "VCNearestNeighbour::classify: NAN value found !!\n");
+          cerr << x << endl;
+          cerr << y << endl;
+      }
+
+      if ( mindists[classno] > distance )
+          mindists[classno] = distance;
+
+      if ( mindist > distance )
+      {
+          minclass = classno;
+          mindist  = distance;
+      }
+      if ( K > 1 ) 
+        examples.push ( pair<double, int> ( -distance, classno ) );
     }
 
     if ( mindist == 0.0 )
@@ -88,24 +88,28 @@ ClassificationResult VCNearestNeighbour::classify ( const NICE::Vector & x ) con
 		fprintf (stderr, "class %d : %f\n", i, mindists.get(i) );
 #endif
 
-    if ( K > 1 ) {
-		FullVector votes ( maxClassNo + 1 );
-		votes.set(0.0);
-		for ( int k = 0 ; k < K ; k++ )
-		{
-			const pair<double, int> & t = examples.top();
-			votes[ t.second ]++;
-			examples.pop();
-		}
-		votes.normalize();
-		return ClassificationResult ( votes.maxElement(), votes );
-    } else {
-		for ( int i = 0 ; i < mindists.size() ; i++ )
-		{
-			mindists[i] = 1.0 / (mindists[i] + 1.0);
-		}
-		mindists.normalize();
-		return ClassificationResult ( minclass, mindists );
+    if ( K > 1 )
+    {
+      FullVector votes ( maxClassNo + 1 );
+      votes.set(0.0);
+      for ( int k = 0 ; k < K ; k++ )
+      {
+        const pair<double, int> & t = examples.top();
+        votes[ t.second ]++;
+        examples.pop();
+      }
+      votes.normalize();
+      return ClassificationResult ( votes.maxElement(), votes );
+    }
+    else
+    {
+      //do we really want to do this? Only useful, if we want to obtain probability like scores      
+//       for ( int i = 0 ; i < mindists.size() ; i++ )
+//       {
+//         mindists[i] = 1.0 / (mindists[i] + 1.0);
+//       }
+      //mindists.normalize();
+      return ClassificationResult ( minclass, mindists );
     }
 }
 
@@ -114,24 +118,45 @@ void VCNearestNeighbour::teach ( const LabeledSetVector & _teachSet )
 {
     fprintf (stderr, "teach using all !\n");
     maxClassNo = _teachSet.getMaxClassno();
-    teachSet = _teachSet;
+    //NOTE this is crucial if we clear _teachSet afterwards!
+    //therefore, take care NOT to call _techSet.clear() somewhere out of this method
+    this->teachSet = _teachSet;
+    
+    std::cerr << "number of known training samples: " << this->teachSet.begin()->second.size() << std::endl;
+    
+//     //just for testing - remove everything but the first element
+//     map< int, vector<NICE::Vector *> >::iterator it = this->teachSet.begin();
+//     it++;
+//     this->teachSet.erase(it, this->teachSet.end());
+//     std::cerr << "keep " << this->teachSet.size() << " elements" << std::endl;
+    
+    
 }
 
 
 void VCNearestNeighbour::teach ( int classno, const NICE::Vector & x )
 {
-    fprintf (stderr, "teach!\n");
+    std::cerr << "VCNearestNeighbour::teach one new example" << std::endl;
+    
     for ( size_t i = 0 ; i < x.size() ; i++ )
-	if ( isnan(x[i]) ) 
-	{
-	    fprintf (stderr, "There is a NAN value in within this vector: x[%d] = %f\n", (int)i, x[i]);
-	    cerr << x << endl;
-	    exit(-1);
-	}
+      if ( isnan(x[i]) ) 
+      {
+          fprintf (stderr, "There is a NAN value in within this vector: x[%d] = %f\n", (int)i, x[i]);
+          cerr << x << endl;
+          exit(-1);
+      }
     
     if ( classno > maxClassNo ) maxClassNo = classno;
 
     teachSet.add ( classno, x );
+    
+    std::cerr << "adden class " << classno << " with feature " << x << std::endl;
+    int tmpCnt(0);
+    for (LabeledSetVector::const_iterator it = this->teachSet.begin(); it != this->teachSet.end(); it++)
+    {
+      tmpCnt += it->second.size();
+    }
+    std::cerr << "number of known training samples: " << tmpCnt << std::endl;
 }
 
 void VCNearestNeighbour::finishTeaching()
@@ -159,5 +184,3 @@ void VCNearestNeighbour::restore ( std::istream & is, int format )
     teachSet.restore ( is, format );
     maxClassNo = teachSet.getMaxClassno();
 }
-
-

+ 1 - 0
classifier/vclassifier/VCNearestNeighbour.h

@@ -56,6 +56,7 @@ class VCNearestNeighbour : public VecClassifier
 		void store ( std::ostream & os, int format = 0 ) const;
 
 		void restore ( std::istream & is, int format = 0 );
+    
 };
 
 

+ 49 - 308
features/localfeatures/LFColorWeijer.cpp

@@ -2,6 +2,7 @@
 
 #include <fstream>
 #include <iostream>
+#include <exception>
 #include "vislearning/baselib/ColorSpace.h"
 
 using namespace OBJREC;
@@ -30,49 +31,12 @@ const int colors[11][3] =
 LFColorWeijer::LFColorWeijer( const Config *c )
 {
   conf = c;
-
-  bin[0] = conf->gI( "LFColorWeijer", "binL", 10 );
-  bin[1] = conf->gI( "LFColorWeijer", "bina", 20 );
-  bin[2] = conf->gI( "LFColorWeijer", "binb", 20 );
-
-  maxv[0] =  100.0;
-  maxv[1] =   80.0;
-  maxv[2] =   50.0;
-
-  minv[0] =    0.0;
-  minv[1] = -105.0;
-  minv[2] = -200.0;
-
-  tfile = conf->gS( "LFColorWeijer", "table", "/home/dbv/bilder/colorWeijer/color.txt" );
-
-  for ( int i = 0; i < 3; i++ )
-  {
-    interval[i] = ( maxv[i] - minv[i] ) / ( double )bin[i];
-  }
-
-  ifstream test( tfile.c_str() );
-
-  if ( test )
-  {
-    restore();
-  }
-  else
-  {
-    train();
-  }
+  tfile = conf->gS( "LFColorWeijer", "table", "/home/dbv/bilder/colorWeijer/w2c.txt");
+  restore();
 }
 
 LFColorWeijer::~LFColorWeijer()
 {
-  for ( uint i = 0; i < hist.size(); i++ )
-  {
-    for ( uint j = 0; j < hist[i].size(); j++ )
-    {
-      hist[i][j].clear();
-    }
-    hist[i].clear();
-  }
-  hist.clear();
 }
 
 int LFColorWeijer::getDescSize() const
@@ -80,105 +44,45 @@ int LFColorWeijer::getDescSize() const
   return LASTCOLOR;
 }
 
-void LFColorWeijer::store()
+void LFColorWeijer::restore()
 {
-  ofstream fout( tfile.c_str(), ios_base::app );
-
-  fout << hist.size() << " " << hist[0].size() << " " << hist[0][0].size() << " " << hist[0][0][0].size() << endl;
-
-  for ( uint i = 0; i < hist.size(); i++ )
+  ifstream fin( tfile.c_str() );
+  if(!fin.is_open())
   {
-    for ( uint i0 = 0; i0 < hist[i].size(); i0++ )
-    {
-      for ( uint i1 = 0; i1 < hist[i][i0].size(); i1++ )
-      {
-        for ( uint i2 = 0; i2 < hist[i][i0][i1].size(); i2++ )
-        {
-          fout << hist[i][i0][i1][i2] << " ";
-        }
-      }
-    }
+    fthrow(Exception,"ColorWeijer: could not find lookup table file.");
   }
-}
-
-void LFColorWeijer::smooth()
-{
-  int size0 = ( int )hist.size();
-  int size1 = ( int )hist[0].size();
-  int size2 = ( int )hist[0][0].size();
-  int size3 = ( int )hist[0][0][0].size();
-  for ( int i0 = 0; i0 < size1; i0++ )
+  
+  while(!fin.eof())
   {
-    for ( int i1 = 0; i1 < size2; i1++ )
+    double rd,gd,bd;
+    int r,g,b;
+    fin >> rd;
+    fin >> gd;
+    fin >> bd;
+        
+    r = rd/8;
+    g = gd/8;
+    b = bd/8;
+
+    for(int i = 0; i < 11; i++)
     {
-      for ( int i2 = 0; i2 < size3; i2++ )
-      {
-        double maxval = 0.0;
-        for ( int i = 0; i < size0; i++ )
-        {
-          maxval = std::max( maxval, hist[i][i0][i1][i2] );
-        }
-        if ( maxval == 0.0 )
-        {
-          for ( int i = 0; i < size0; i++ )
-          {
-            int anz = 0;
-            for ( int a0 = std::max( i0 - 1, 0 ); a0 <= std::min( i0 + 1, size1 - 1 ); a0++ )
-            {
-              for ( int a1 = std::max( i1 - 1, 0 ); a1 <= std::min( i1 + 1, size2 - 1 ); a1++ )
-              {
-                for ( int a2 = std::max( i2 - 1, 0 ); a2 <= std::min( i2 + 1, size3 - 1 ); a2++ )
-                {
-                  anz++;
-                  hist[i][i0][i1][i2] += hist[i][a0][a1][a2];
-                }
-              }
-            }
-            hist[i][i0][i1][i2] /= anz;
-          }
-        }
-      }
+      fin >> hist[r][g][b][i];
     }
   }
-}
-
-void LFColorWeijer::restore()
-{
-  int size0, size1, size2, size3;
-  ifstream fin( tfile.c_str() );
-  fin >> size0;
-  fin >> size1;
-  fin >> size2;
-  fin >> size3;
-  hist.clear();
-
-  for ( int i = 0; i < size0; i++ )
+    /*
+  for(int r = 0; r < 32; r++)
   {
-    vector<vector<vector<double> > > v2;
-
-    for ( int i0 = 0; i0 < size1; i0++ )
+    for(int g = 0; g < 32; g++)
     {
-      vector<vector<double> > v1;
-
-      for ( int i1 = 0; i1 < size2; i1++ )
+      for(int b = 0; b < 32; b++)
       {
-        vector<double> v0;
-
-        for ( int i2 = 0; i2 < size3; i2++ )
+        for(int i = 0; i < 11; i++)
         {
-          double val;
-          fin >> val;
-          v0.push_back( val );
+          fin >> hist[r][g][b][i];
         }
-
-        v1.push_back( v0 );
       }
-
-      v2.push_back( v1 );
     }
-
-    hist.push_back( v2 );
-  }
+  }  */
 }
 
 int LFColorWeijer::getDescriptors( const NICE::Image & img, VVector & positions, VVector & features ) const
@@ -189,41 +93,25 @@ int LFColorWeijer::getDescriptors( const NICE::Image & img, VVector & positions,
 
 int LFColorWeijer::getDescriptors( const NICE::ColorImage & img, VVector & positions, VVector & features ) const
 {
-  // in Lab umwandeln
-  for ( int i = 0; i < ( int )positions.size(); i++ )
-  {
-    vector<double> vals;
-    vector<int> b;
-    int x = positions[i][0];
-    int y = positions[i][1];
-
-    double R, G, B, X, Y, Z;
-    vector<double> lab( 3, 0.0 );
-
-    R = ( double )img.getPixel( x, y, 0 ) / 255.0;
-    G = ( double )img.getPixel( x, y, 1 ) / 255.0;
-    B = ( double )img.getPixel( x, y, 2 ) / 255.0;
-
-    ColorConversion::ccRGBtoXYZ( R, G, B, &X, &Y, &Z, 0 );
-    ColorConversion::ccXYZtoCIE_Lab( X, Y, Z, &lab[0], &lab[1], &lab[2], 0 );
-
-    for ( int i = 0; i < 3; i++ )
-    {
-      int val = ( int )(( lab[i] - minv[i] ) / interval[i] );
-      val = std::min( val, bin[i] - 1 );
-      val = std::max( val, 0 );
-      b.push_back( val );
-    }
-
-    Vector feat( hist.size() );
+  int width = ( int )img.width();
+  int height = ( int )img.height();
 
-    for ( uint i = 0; i < hist.size(); i++ )
+  for ( int j = 0; j < ( int )positions.size(); j++ )
+  {
+    int x = positions[j][0];
+    int y = positions[j][1];
+    int r = img(x,y,0)/8;
+    int g = img(x,y,1)/8;
+    int b = img(x,y,2)/8;
+      
+    Vector feat( 11 );
+    for ( uint i = 0; i < 11; i++ )
     {
-      feat[i] = hist[i][b[0]][b[1]][b[2]];
+      feat[i] = hist[r][g][b][i];
     }
     features.push_back( feat );
   }
-
+ 
   return 1;
 }
 
@@ -232,20 +120,6 @@ void LFColorWeijer::visualizeFeatures( NICE::Image & mark, const VVector & posit
 
 }
 
-void LFColorWeijer::add( vector<vector<vector<double> > > &dest, vector<vector<vector<double> > > &src )
-{
-  for ( uint i0 = 0; i0 < src.size(); i0++ )
-  {
-    for ( uint i1 = 0; i1 < src[i0].size(); i1++ )
-    {
-      for ( uint i2 = 0; i2 < src[i0][i1].size(); i2++ )
-      {
-        dest[i0][i1][i2] += src[i0][i1][i2];
-      }
-    }
-  }
-}
-
 int LFColorWeijer::findColor( string &fn )
 {
   if ( fn.find( "black" ) != string::npos )
@@ -274,130 +148,6 @@ int LFColorWeijer::findColor( string &fn )
   return -1;
 }
 
-vector<vector<vector<double > > > LFColorWeijer::createTable()
-{
-  vector<vector<vector<double> > > h;
-  for ( int i0 = 0; i0 < bin[0]; i0++ )
-  {
-    vector<vector< double > > vec;
-    for ( int i1 = 0; i1 < bin[1]; i1++ )
-    {
-      vector<double> v;
-      for ( int i2 = 0; i2 < bin[2]; i2++ )
-      {
-        v.push_back( 0.0 );
-      }
-      vec.push_back( v );
-    }
-    h.push_back( vec );
-  }
-  return h;
-}
-
-void LFColorWeijer::normalize( vector<vector<vector<double> > > &tab )
-{
-  double sum = 0.0;
-
-  for ( uint i0 = 0; i0 < tab.size(); i0++ )
-  {
-    for ( uint i1 = 0; i1 < tab[i0].size(); i1++ )
-    {
-      for ( uint i2 = 0; i2 < tab[i0][i1].size(); i2++ )
-      {
-        sum += tab[i0][i1][i2];
-      }
-    }
-  }
-
-  for ( uint i0 = 0; i0 < tab.size(); i0++ )
-  {
-    for ( uint i1 = 0; i1 < tab[i0].size(); i1++ )
-    {
-      for ( uint i2 = 0; i2 < tab[i0][i1].size(); i2++ )
-      {
-        tab[i0][i1][i2] /= sum;
-      }
-    }
-  }
-
-  return;
-}
-
-void LFColorWeijer::createHist( const ColorImage &cimg, vector<vector<vector<double> > > &hist, Image &mask )
-{
-  // in Lab umwandeln
-  NICE::MultiChannelImageT<double> genimg, imglab;
-
-  ColorSpace::ColorImagetoMultiChannelImage( cimg, genimg );
-  ColorSpace::convert( imglab, genimg, ColorSpace::COLORSPACE_LAB, ColorSpace::COLORSPACE_RGB );
-
-  for ( int y = 0; y < cimg.height(); y++ )
-  {
-    for ( int x = 0; x < cimg.width(); x++ )
-    {
-      if ( mask.getPixel( x, y ) == 0 )
-        continue;
-      vector<int> b;
-      for ( int i = 0; i < 3; i++ )
-      {
-        int val = ( int )(( imglab.get( x, y, i ) - minv[i] ) / interval[i] );
-        val = std::min( val, bin[i] - 1 );
-        b.push_back( val );
-      }
-      hist[b[0]][b[1]][b[2]]++;
-    }
-  }
-}
-
-void LFColorWeijer::train()
-{
-  cout << "train Starts" << endl;
-  for ( int i = 0; i < LASTCOLOR; i++ )
-  {
-    vector<vector<vector<double> > > h = createTable();
-    hist.push_back( h );
-  }
-
-  string dir = conf->gS( "LFColorWeijer", "table", "/home/dbv/bilder/colorWeijer/ebay/" );
-  string images = conf->gS( "LFColorWeijer", "table", "test_images.txt" );
-  string mask = conf->gS( "LFColorWeijer", "table", "mask_images.txt" );
-
-  string imagesfn;
-  string maskfn;
-
-  ifstream finimg(( dir + images ).c_str() );
-  ifstream finmask(( dir + mask ).c_str() );
-  cout << dir + images << endl;
-  cout << dir + mask << endl;
-  // lese bilder und masken ein
-  while ( finimg >> imagesfn && finmask >> maskfn )
-  {
-    Image mimg( dir + maskfn );
-    cout << dir + maskfn << endl;
-    ColorImage cimg( dir + imagesfn );
-
-    int col = findColor( imagesfn );
-    vector<vector<vector<double> > > tab = createTable();
-
-    createHist( cimg, tab, mimg ); // erzeuge Lab Histogramm des Bildes
-
-    normalize( tab );
-
-    add( hist[col], tab );
-  }
-  finimg.close();
-  finmask.close();
-
-  // normalisiere alle lookuptables
-  for ( uint i = 0; i < hist.size(); i++ )
-  {
-    normalize( hist[i] );
-  }
-
-  smooth();
-  store();
-}
-
 void LFColorWeijer::visualizeFeatures( NICE::ColorImage & out, const VVector & features, const VVector & position ) const
 {
   for ( int i = 0; i < ( int )position.size(); i++ )
@@ -477,28 +227,19 @@ void LFColorWeijer::getFeats( const ColorImage &img, MultiChannelImageT<double>
 {
   int width = ( int )img.width();
   int height = ( int )img.height();
-  feats.reInit( width, height, hist.size());
-
-  NICE::MultiChannelImageT<double> genimg, imglab;
-
-  ColorSpace::ColorImagetoMultiChannelImage( img, genimg );
-  ColorSpace::convert( imglab, genimg, ColorSpace::COLORSPACE_LAB, ColorSpace::COLORSPACE_RGB );
+  feats.reInit( width, height, 11);
 
   for ( int y = 0; y < height; y++ )
   {
     for ( int x = 0; x < width; x++ )
     {
-      for ( uint i = 0; i < hist.size(); i++ )
+      int r = img(x,y,0)/8;
+      int g = img(x,y,1)/8;
+      int b = img(x,y,2)/8;
+      
+      for ( uint i = 0; i < 11; i++ )
       {
-        vector<double> b( 3, 0.0 );
-        for ( int j = 0; j < 3; j++ )
-        {
-          int val = ( int )(( imglab.get( x, y, j ) - minv[j] ) / interval[j] );
-          val = std::min( val, bin[j] - 1 );
-          val = std::max( val, 0 );
-          b[j] = val;
-        }
-        feats.set( x, y, hist[i][b[0]][b[1]][b[2]], i );
+        feats.set( x, y, hist[r][g][b][i], i );
       }
     }
   }

+ 4 - 60
features/localfeatures/LFColorWeijer.h

@@ -41,24 +41,12 @@ class LFColorWeijer : public LocalFeature
       LASTCOLOR
     };
 
-    //! bins for L*, a* and b* chanel of L*a*b*
-    int bin[3];
+    //! lookup table
+    double hist[32][32][32][11];
 
-    //! upper limits for L*, a* and b* chanel of L*a*b*
-    double maxv[3];
-
-    //! lower limits for L*, a* and b* chanel of L*a*b*
-    double minv[3];
-
-    //! quantization interval for L*, a* and b* chanel of L*a*b* depending on bin, maxv and minv
-    double interval[3];
-
-    //! destination of the computed lookuptable
+    //! destination of the precomputed lookuptable
     std::string tfile;
-
-    //! lookuptable for the probabilities (4d: colors, L-channel, a-channel, b-channel)
-    std::vector<std::vector<std::vector<std::vector<double> > > > hist;
-
+    
     //! configuration file
     const NICE::Config *conf;
 
@@ -123,34 +111,11 @@ class LFColorWeijer : public LocalFeature
      */
     void visualizeFeatures ( const NICE::ColorImage & cimg, NICE::ColorImage & out ) const;
 
-    /**
-     * save parameters
-     */
-    void store();
-
-
     /**
      * load parameters
      */
     void restore();
 
-    /**
-     * smooths the look up table
-     */
-    void smooth();
-
-    /**
-     * normalizes the sum of a 3d histogram to 1
-     * @param tab 3d histogram
-     */
-    void normalize ( std::vector<std::vector<std::vector<double> > > &tab );
-
-    /**
-     * creates a new and empty table
-     * @return table of the size bin[0]xbin[1]xbin[2]
-     */
-    std::vector<std::vector<std::vector<double > > > createTable();
-
     /**
      * finds a colorname in a given string
      * @param fn input string
@@ -158,27 +123,6 @@ class LFColorWeijer : public LocalFeature
      */
     int findColor ( std::string &fn );
 
-    /**
-     * creates a new Histogram for input image depending on the image mask
-     * @param cimg input image
-     * @param hist histogram
-     * @param mask which pixel should be consider
-     */
-    void createHist ( const NICE::ColorImage &cimg, std::vector<std::vector<std::vector<double> > > &hist, NICE::Image &mask );
-
-    /**
-     * train the lookuptable
-     */
-    void train();
-
-
-    /**
-     * add a 3d table to a 3d table elementwise
-     * @param dest destination table
-     * @param src source table
-     */
-    void add ( std::vector<std::vector<std::vector<double> > > &dest, std::vector<std::vector<std::vector<double> > > &src );
-
     /**
      * transform each pixel of an image
      * @param img input image

+ 37 - 4
features/localfeatures/progs/testColorWeijer.cpp

@@ -10,19 +10,52 @@ using namespace std;
 using namespace NICE;
 using namespace OBJREC;
 
-int main(int argc, char **argv)
+
+/**
+ * @brief Printing main menu.
+ * @author Alexander Freytag
+ * @date 13-02-2013
+ * 
+ * @return void
+ **/
+void print_main_menu()
+{
+  std::cerr << "=====================================================================================" << std::endl;
+  std::cerr << "||This is a small programm demonstrating the computation of 11-dim color features.  ||" << std::endl;
+  std::cerr << "=====================================================================================" << std::endl;  
+  
+  std::cout << std::endl << "Input options:" << std::endl;
+  std::cout << "   -i <filename>  the name of the image which shall be transformed"<< std::endl;
+  return;
+}
+
+
+//post-process active learning segmentation results, such that the given images are normalized to be visualized in the same range
+int main( int argc, char* argv[] )
 {
-  if (argc < 1)
+  
+  int rc;
+  if (argc<2)
   {
-    cerr << "Bitte Bild angeben" << endl;
+    print_main_menu();
     return -1;
   }
+  
+  std::string filename("");
+  while ((rc=getopt(argc,argv,"i:h"))>=0)
+  {
+    switch(rc)
+    {
+      case 'i': filename = optarg; break;
+      default: print_main_menu();
+    }
+  }   
 
   Config *conf = new Config();
   LFColorWeijer lfc(conf);
 
   //! testen
-  ColorImage cimg(argv[1]);
+  ColorImage cimg(filename);
   ColorImage out;
   lfc.visualizeFeatures (cimg, out);
 

+ 1 - 1
math/kernels/KernelEXPHIK.h

@@ -15,7 +15,7 @@
 namespace OBJREC {
 
 /** Interface for the generalized rbf kernel with HIK metric */
-class KernelEXPHIK : KernelHIK
+class KernelEXPHIK : public KernelHIK
 {
 
     protected:

+ 1 - 1
math/kernels/KernelHIK.h

@@ -14,7 +14,7 @@
 namespace OBJREC {
 
 /** Interface for the popular exponential mercer kernel / rbf kernel */
-class KernelHIK : ParameterizedKernel
+class KernelHIK : public ParameterizedKernel
 {
 
     protected:

+ 1 - 1
math/kernels/genericKernel.h

@@ -40,7 +40,7 @@ class GenericKernelSelection
         double alpha = conf->gD ( "Kernel","alpha",1.0 );
         double beta = conf->gD ( "Kernel","beta",1.0 );
         double gamma = conf->gD ( "Kernel","gamma",1.0 );
-        kernel = new KernelHIK ( gamma, alpha, beta );
+        kernel = new KernelEXPHIK ( gamma, alpha, beta );
       }      
       else
       {

+ 11 - 0
math/progs/testPLSA.cpp

@@ -5,6 +5,9 @@
 * @date 05/21/2008
 
 */
+
+
+#if 0
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
 #include "core/image/ImageT.h"
@@ -443,3 +446,11 @@ int main (int argc, char **argv)
     return 0;
 }
 #endif
+
+#else
+int main (int argc, char **argv)
+{   
+    throw("Not converted to new structure of our library\n");
+    return 0;
+}
+#endif

+ 2 - 0
progs/libdepend.inc

@@ -5,5 +5,7 @@ $(call PKG_DEPEND_INT,vislearning/baselib)
 $(call PKG_DEPEND_INT,vislearning/cbaselib)
 $(call PKG_DEPEND_INT,vislearning/classifier/kernelclassifier)
 $(call PKG_DEPEND_INT,vislearning/matlabAccessHighLevel)
+$(call PKG_DEPEND_EXT,MATIO)
+$(call PKG_DEPEND_EXT,HDF5)