Bläddra i källkod

some format changes

Bjoern Froehlich 13 år sedan
förälder
incheckning
2ba185f555

+ 50 - 48
classifier/FPCnone.cpp

@@ -3,88 +3,90 @@
 #include <iostream>
 
 using namespace OBJREC;
+
 using namespace std;
+
 using namespace NICE;
 
-FPCnone::FPCnone ()
+FPCnone::FPCnone()
 {
 }
 
 FPCnone::FPCnone( const Config *_conf, string section )
 {
-	conf = _conf;
+  conf = _conf;
 }
 
 FPCnone::~FPCnone()
 {
-	//clean up
+  //clean up
 }
 
-ClassificationResult FPCnone::classify ( Example & pce )
+ClassificationResult FPCnone::classify( Example & pce )
 {
-	FullVector overall_distribution(maxClassNo+1);
-	overall_distribution[maxClassNo] = 0.0;
-
-	double maxp = -numeric_limits<double>::max();
-	int classno = 0;
-	
-	double sum  = 0.0;
-		
-	for(int i = 0; i < maxClassNo+1; i++)
-	{
-		overall_distribution[i] = (*pce.vec)[i];
-		
-		sum += overall_distribution[i];
-
-		if(maxp < overall_distribution[i])
-		{
-			classno = i;
-			maxp = overall_distribution[i];
-		}
-	}
-
-	/*for(int i = 0; i < maxClassNo; i++)
-	{
-		overall_distribution[i] /= sum;
-	}*/
-	
-	//cout << "Klasse: " << classno << " prob: " << overall_distribution[classno] << endl;
-	if(classno > 12)
-	{
-		cout << "failure" << endl;
-	}
-
-	return ClassificationResult ( classno, overall_distribution );
+  FullVector overall_distribution( maxClassNo + 1 );
+  overall_distribution[maxClassNo] = 0.0;
+
+  double maxp = -numeric_limits<double>::max();
+  int classno = 0;
+
+  double sum  = 0.0;
+
+  for ( int i = 0; i < maxClassNo + 1; i++ )
+  {
+    overall_distribution[i] = ( *pce.vec )[i];
+
+    sum += overall_distribution[i];
+
+    if ( maxp < overall_distribution[i] )
+    {
+      classno = i;
+      maxp = overall_distribution[i];
+    }
+  }
+
+  /*for(int i = 0; i < maxClassNo; i++)
+  {
+   overall_distribution[i] /= sum;
+  }*/
+
+  //cout << "Klasse: " << classno << " prob: " << overall_distribution[classno] << endl;
+  if ( classno > 12 )
+  {
+    cout << "failure" << endl;
+  }
+
+  return ClassificationResult( classno, overall_distribution );
 }
 
-void FPCnone::train ( FeaturePool & _fp, Examples & examples )
+void FPCnone::train( FeaturePool & _fp, Examples & examples )
 {
-	fp = FeaturePool(_fp);
+  fp = FeaturePool( _fp );
 }
 
 
-void FPCnone::restore (istream & is, int format)
+void FPCnone::restore( istream & is, int format )
 {
 }
 
-void FPCnone::store (ostream & os, int format) const
+void FPCnone::store( ostream & os, int format ) const
 {
 }
 
-void FPCnone::clear ()
+void FPCnone::clear()
 {
 }
 
-FeaturePoolClassifier *FPCnone::clone () const
+FeaturePoolClassifier *FPCnone::clone() const
 {
-	FPCnone *o = new FPCnone ( conf, "non" );
+  FPCnone *o = new FPCnone( conf, "non" );
 
-	o->maxClassNo = maxClassNo;
+  o->maxClassNo = maxClassNo;
 
-	return o;
+  return o;
 }
 
-void FPCnone::setComplexity ( int size )
+void FPCnone::setComplexity( int size )
 {
-	cerr << "FPCnone: no complexity to set" << endl;
+  cerr << "FPCnone: no complexity to set" << endl;
 }

+ 59 - 58
classifier/FPCnone.h

@@ -1,4 +1,4 @@
-/** 
+/**
  * @file FPCnone.h
  * @brief bad hack, not realy a classifier, returns the first values as classification result
  * @author Björn Fröhlich
@@ -18,63 +18,64 @@ namespace OBJREC {
 
 class FPCnone : public FeaturePoolClassifier
 {
-    protected:
-	
-	//! the featurepool
-	FeaturePool fp;
-		
-	//! config file;
-	const Config *conf;
-	
-    public:
-	/**
-	 * standard constructor
-	 * @param conf configfile
-	 * @param section section name in configfile for classifier
-	 */
-	FPCnone( const Config *conf, std::string section="SMLR");
-      
-	
-	/**
-	 * simple constructor -> does nothing
-	 */
-	FPCnone ();
-
-	/**
-	 * simple destructor
-	 */
-	~FPCnone();
-
-	/**
-	 * main classification function
-	 * @param pce input feature
-	 * @return a classification result
-	 */
-	ClassificationResult classify ( Example & pce );
-
-	/**
-	 * start training
-	 * @param fp a featurepool (how to handle which features...)
-	 * @param examples input features
-	 */
-	void train ( FeaturePool & _fp, Examples & examples );
-
-	/**
-	 * clone this object
-	 * @return a copy of this object
-	 */
-	FeaturePoolClassifier *clone () const;
-
-	/**
-	 * set complexity for the next training process e.g. number of weak classifiers
-	 * @param size new complexity
-	 */
-	void setComplexity ( int size );
-
-	/** IO functions */
-	void restore (std::istream & is, int format = 0);
-	void store (std::ostream & os, int format = 0) const;
-	void clear ();
+
+protected:
+
+  //! the featurepool
+  FeaturePool fp;
+
+  //! config file;
+  const Config *conf;
+
+public:
+  /**
+   * standard constructor
+   * @param conf configfile
+   * @param section section name in configfile for classifier
+   */
+  FPCnone( const Config *conf, std::string section = "SMLR" );
+
+
+  /**
+   * simple constructor -> does nothing
+   */
+  FPCnone();
+
+  /**
+   * simple destructor
+   */
+  ~FPCnone();
+
+  /**
+   * main classification function
+   * @param pce input feature
+   * @return a classification result
+   */
+  ClassificationResult classify( Example & pce );
+
+  /**
+   * start training
+   * @param fp a featurepool (how to handle which features...)
+   * @param examples input features
+   */
+  void train( FeaturePool & _fp, Examples & examples );
+
+  /**
+   * clone this object
+   * @return a copy of this object
+   */
+  FeaturePoolClassifier *clone() const;
+
+  /**
+   * set complexity for the next training process e.g. number of weak classifiers
+   * @param size new complexity
+   */
+  void setComplexity( int size );
+
+  /** IO functions */
+  void restore( std::istream & is, int format = 0 );
+  void store( std::ostream & os, int format = 0 ) const;
+  void clear();
 };
 
 } // namespace

+ 52 - 43
progs/classtest.cpp

@@ -4,43 +4,52 @@
 #include <limits>
 
 using namespace std;
+
 using namespace NICE;
 
 template<class ElementType>
+
 class SparseVectorT : public VectorT<ElementType> {
-	size_t dsize;
+  size_t dsize;
+
 public:
-	SparseVectorT(const size_t size, const ElementType& element):VectorT<ElementType>(size,element){dsize = 5;}
-	virtual inline size_t size() const { return dsize; }
+  SparseVectorT( const size_t size, const ElementType& element ): VectorT<ElementType>( size, element ) {
+    dsize = 5;
+  }
+
+  virtual inline size_t size() const {
+    return dsize;
+  }
 };
 
 
-void printit(VectorT<double> &e)
+void printit( VectorT<double> &e )
 {
-	//cout << e.size() << endl;
-	size_t a = 0;
-	for(int i = 0; i < numeric_limits<int>::max(); i++)
-	{
-		a = e.size();
-	}
+  //cout << e.size() << endl;
+  size_t a = 0;
+
+  for ( int i = 0; i < numeric_limits<int>::max(); i++ )
+  {
+    a = e.size();
+  }
 }
 
-int main(int argc, char **argv)
+int main( int argc, char **argv )
 {
-	VectorT<double> k(2,2.0);
-	/*cout << "print1 vector:" << endl;
-	cout << e.size()<< endl;*/
-
-	//SparseVectorT<double> k(2,2.0);
-/*	cout << "print2 sparse:" << endl;
-	cout << k.size()<< endl;*/
-/*
-	cout << "print3 vectormethode:" << endl;
-	printit(e);
-
-	cout << "print4 sparsemethode:" << endl;*/
-	printit(k);
-	return 0;
+  VectorT<double> k( 2, 2.0 );
+  /*cout << "print1 vector:" << endl;
+  cout << e.size()<< endl;*/
+
+  //SparseVectorT<double> k(2,2.0);
+  /* cout << "print2 sparse:" << endl;
+   cout << k.size()<< endl;*/
+  /*
+   cout << "print3 vectormethode:" << endl;
+   printit(e);
+
+   cout << "print4 sparsemethode:" << endl;*/
+  printit( k );
+  return 0;
 }
 
 /*
@@ -48,42 +57,42 @@ int main(int argc, char **argv)
 class Elter
 {
 protected:
-	int t1;
-	int t2;
+ int t1;
+ int t2;
 
 public:
-	Elter():t1(1),t2(2){}
-	virtual void print(){cout << "t1: " << t1 << endl;}
+ Elter():t1(1),t2(2){}
+ virtual void print(){cout << "t1: " << t1 << endl;}
 };
 
 class Kind:public Elter
 {
 public:
-	Kind(){t1 = 3; t2 = 4;}
-	virtual void print(){cout << "t2: " << t2 << endl;}
+ Kind(){t1 = 3; t2 = 4;}
+ virtual void print(){cout << "t2: " << t2 << endl;}
 };
 
 void printit(Elter &e)
 {
-	e.print();
+ e.print();
 }
 
 int main(int argc, char **argv)
 {
-	Elter e;
-	cout << "print1 elter:" << endl;
-	e.print();
+ Elter e;
+ cout << "print1 elter:" << endl;
+ e.print();
 
-	Kind k;
-	cout << "print2 kind:" << endl;
-	k.print();
+ Kind k;
+ cout << "print2 kind:" << endl;
+ k.print();
 
-	cout << "print3 eltermethode:" << endl;
-	printit(e);
+ cout << "print3 eltermethode:" << endl;
+ printit(e);
 
 
-	cout << "print3 kindmethode:" << endl;
-	printit(k);
-	return 0;
+ cout << "print3 kindmethode:" << endl;
+ printit(k);
+ return 0;
 }
 */

+ 45 - 41
progs/convertFeatures.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file testClassifier.cpp
 * @brief main program for classifier evaluation
 * @author Erik Rodner
@@ -26,47 +26,51 @@
 using namespace OBJREC;
 
 using namespace NICE;
+
 using namespace std;
 
-int main (int argc, char **argv)
+int main( int argc, char **argv )
 {
-	std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
-
-	Config conf ( argc, argv );
-	string fn = conf.gS("main", "input", "train.vec");
-	int format = conf.gI("main", "format", 0 );
-	string outfn = conf.gS("main", "output", "out.vec");
-	
-	LabeledSetVector test;
-
-	test.read (fn, format );
-	cout << "fn: " << fn << endl;
-	
-	ofstream fout(outfn.c_str());
-	ofstream cn((outfn+".cn").c_str());
-	
-	fout << test.count() << endl;
-	cn << test.count() << endl;
-	fout << test.dimension();
-	cn << 1;
-
-	for( map< int, vector<NICE::Vector *> >::iterator iter = test.begin(); iter != test.end(); ++iter ) 
-	{
-		for(int j = 0; j < iter->second.size(); j++)
-		{
-			Vector vec = *(iter->second[j]);
-			cn << endl << iter->first;
-			fout << endl;
-			for(int i = 0; i < vec.size()-1; i++)
-			{
-				fout << vec[i] << " ";
-			}
-			fout << vec[vec.size()-1];
-		}
-	}
-	
-	fout.close();
-	cn.close();
-	
-    return 0;
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+  string fn = conf.gS( "main", "input", "train.vec" );
+  int format = conf.gI( "main", "format", 0 );
+  string outfn = conf.gS( "main", "output", "out.vec" );
+
+  LabeledSetVector test;
+
+  test.read( fn, format );
+  cout << "fn: " << fn << endl;
+
+  ofstream fout( outfn.c_str() );
+  ofstream cn(( outfn + ".cn" ).c_str() );
+
+  fout << test.count() << endl;
+  cn << test.count() << endl;
+  fout << test.dimension();
+  cn << 1;
+
+  for ( map< int, vector<NICE::Vector *> >::iterator iter = test.begin(); iter != test.end(); ++iter )
+  {
+    for ( int j = 0; j < iter->second.size(); j++ )
+    {
+      Vector vec = *( iter->second[j] );
+      cn << endl << iter->first;
+      fout << endl;
+
+      for ( int i = 0; i < vec.size() - 1; i++ )
+      {
+        fout << vec[i] << " ";
+      }
+
+      fout << vec[vec.size()-1];
+    }
+  }
+
+  fout.close();
+
+  cn.close();
+
+  return 0;
 }

+ 62 - 54
progs/convertFeatures2.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file testClassifier.cpp
 * @brief main program for classifier evaluation
 * @author Erik Rodner
@@ -24,61 +24,69 @@
 #undef DEBUG
 
 using namespace OBJREC;
+
 using namespace NICE;
+
 using namespace std;
 
-int main (int argc, char **argv)
+int main( int argc, char **argv )
 {
-	std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
-
-	Config conf ( argc, argv );
-	string fn = conf.gS("main", "input", "train.vec");
-	int format = conf.gI("main", "format", 0 );
-	string outfn = conf.gS("main", "output", "out.vec");
-	int dim = conf.gI("main", "dim", 0);
-	
-	ifstream fin(fn.c_str(), ifstream::in);
-
-	LabeledSetVector test;
-	vector<double> maxv(dim,numeric_limits<double>::min());
-	vector<double> minv(dim,numeric_limits<double>::max());
-	
-	while(fin.good())
-	{
-		Vector tmp(dim);
-		for(int i = 0; i < dim; i++)
-		{
-			fin >> tmp[i];
-			maxv[i] = std::max(maxv[i],tmp[i]);
-			minv[i] = std::min(minv[i],tmp[i]);
-		}
-		int label;
-		fin >> label;
-		label--;
-		if(label > 5)
-			label--;
-		test.add(label, tmp);
-	}
-	
-	for(int i = 0; i < dim; i++)
-	{
-		maxv[i] -= minv[i];
-	}
-	
-	for( map< int, vector<NICE::Vector *> >::iterator iter = test.begin(); iter != test.end(); ++iter ) 
-	{
-		for(int j = 0; j < iter->second.size(); j++)
-		{
-			for(int i = 0; i < iter->second[j]->size(); i++)
-			{
-				(*(iter->second[j]))[i] = ((*(iter->second[j]))[i] - minv[i]) / maxv[i];
-			}
-		}
-	}
-		
-	test.save(outfn);
-	fin.close();
-
-	
-    return 0;
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+  string fn = conf.gS( "main", "input", "train.vec" );
+  int format = conf.gI( "main", "format", 0 );
+  string outfn = conf.gS( "main", "output", "out.vec" );
+  int dim = conf.gI( "main", "dim", 0 );
+
+  ifstream fin( fn.c_str(), ifstream::in );
+
+  LabeledSetVector test;
+  vector<double> maxv( dim, numeric_limits<double>::min() );
+  vector<double> minv( dim, numeric_limits<double>::max() );
+
+  while ( fin.good() )
+  {
+    Vector tmp( dim );
+
+    for ( int i = 0; i < dim; i++ )
+    {
+      fin >> tmp[i];
+      maxv[i] = std::max( maxv[i], tmp[i] );
+      minv[i] = std::min( minv[i], tmp[i] );
+    }
+
+    int label;
+
+    fin >> label;
+    label--;
+
+    if ( label > 5 )
+      label--;
+
+    test.add( label, tmp );
+  }
+
+  for ( int i = 0; i < dim; i++ )
+  {
+    maxv[i] -= minv[i];
+  }
+
+  for ( map< int, vector<NICE::Vector *> >::iterator iter = test.begin(); iter != test.end(); ++iter )
+  {
+    for ( int j = 0; j < iter->second.size(); j++ )
+    {
+      for ( int i = 0; i < iter->second[j]->size(); i++ )
+      {
+        ( *( iter->second[j] ) )[i] = (( *( iter->second[j] ) )[i] - minv[i] ) / maxv[i];
+      }
+    }
+  }
+
+  test.save( outfn );
+
+  fin.close();
+
+
+  return 0;
 }

+ 90 - 84
progs/getRelevantClasses.cpp

@@ -29,89 +29,95 @@ using namespace std;
 /**
  test semantic segmentation routines
 */
-int main(int argc, char **argv)
+int main( int argc, char **argv )
 {
-	Config conf(argc, argv);
-
-	MultiDataset md(&conf);
-
-	const ClassNames & classNames = md.getClassNames("train");
-	
-	const LabeledSet *testFiles = md["test"];
-
-	set<int> forbidden_classes;
-
-	std::string forbidden_classes_s = conf.gS("analysis", "forbidden_classes", "");
-
-	classNames.getSelection(forbidden_classes_s, forbidden_classes);
-
-	LOOP_ALL_S(*testFiles)
-	{
-		EACH_INFO(classno, info);
-
-		std::string file = info.img();
-
-		NICE::Image lm;
-		NICE::MultiChannelImageT<double> probabilities;
-		
-		if (info.hasLocalizationInfo())
-		{
-			const LocalizationResult *l_gt = info.localization();
-
-			lm.resize(l_gt->xsize, l_gt->ysize);
-			lm.set(0);
-			l_gt->calcLabeledImage(lm, classNames.getBackgroundClass());
-		}
-		NICE::Image lm_gt;
-
-		if (info.hasLocalizationInfo())
-		{
-			const LocalizationResult *l_gt = info.localization();
-
-			lm_gt.resize(l_gt->xsize, l_gt->ysize);
-			lm_gt.set(0);
-
-			fprintf(stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n");
-			l_gt->calcLabeledImage(lm_gt, classNames.getBackgroundClass());
-		}
-		
-		set<int> classes;
-		for(int x = 0; x < lm_gt.width(); x++)
-		{
-		  for(int y = 0; y < lm_gt.height(); y++)
-		  {
-		    classes.insert(lm_gt.getPixel(x,y));
-		  }
-		}
-		
-		
-		
-		// write allowed classes
-		string cndir = conf.gS("SemSegCsurka", "cndir", "");
-		std::vector< std::string > list;
-		StringTools::split (file, '/', list);
-		cout << cndir<< "/" << list.back() << ".dat" << endl;
-		
-		string cname = list.back();
-		
-		if(cndir != "")
-		{
-			string fname = cndir+"/"+cname+".dat";
-			cout << fname << endl;
-			ofstream outfile(fname.c_str());
-			
-			set<int>::iterator theIterator;
-			for( theIterator = classes.begin(); theIterator != classes.end(); theIterator++ ) {
-			  outfile << *theIterator << endl;
-			}     
-			
-		}
-		else
-		{
-			cerr << "please define directory for writing filenames in config: SemSegCsurka::cndir" << endl;
-			exit(-1);
-		}
-	}
-
-	return 0;
+  Config conf( argc, argv );
+
+  MultiDataset md( &conf );
+
+  const ClassNames & classNames = md.getClassNames( "train" );
+
+  const LabeledSet *testFiles = md["test"];
+
+  set<int> forbidden_classes;
+
+  std::string forbidden_classes_s = conf.gS( "analysis", "forbidden_classes", "" );
+
+  classNames.getSelection( forbidden_classes_s, forbidden_classes );
+
+  LOOP_ALL_S( *testFiles )
+  {
+    EACH_INFO( classno, info );
+
+    std::string file = info.img();
+
+    NICE::Image lm;
+    NICE::MultiChannelImageT<double> probabilities;
+
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
+
+      lm.resize( l_gt->xsize, l_gt->ysize );
+      lm.set( 0 );
+      l_gt->calcLabeledImage( lm, classNames.getBackgroundClass() );
+    }
+
+    NICE::Image lm_gt;
+
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
+
+      lm_gt.resize( l_gt->xsize, l_gt->ysize );
+      lm_gt.set( 0 );
+
+      fprintf( stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n" );
+      l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+    }
+
+    set<int> classes;
+
+    for ( int x = 0; x < lm_gt.width(); x++ )
+    {
+      for ( int y = 0; y < lm_gt.height(); y++ )
+      {
+        classes.insert( lm_gt.getPixel( x, y ) );
+      }
+    }
+
+
+
+    // write allowed classes
+    string cndir = conf.gS( "SemSegCsurka", "cndir", "" );
+
+    std::vector< std::string > list;
+
+    StringTools::split( file, '/', list );
+
+    cout << cndir << "/" << list.back() << ".dat" << endl;
+
+    string cname = list.back();
+
+    if ( cndir != "" )
+    {
+      string fname = cndir + "/" + cname + ".dat";
+      cout << fname << endl;
+      ofstream outfile( fname.c_str() );
+
+      set<int>::iterator theIterator;
+
+      for ( theIterator = classes.begin(); theIterator != classes.end(); theIterator++ ) {
+        outfile << *theIterator << endl;
+      }
+
+    }
+    else
+    {
+      cerr << "please define directory for writing filenames in config: SemSegCsurka::cndir" << endl;
+      exit( -1 );
+    }
+  }
+
+  return 0;
 }

+ 264 - 238
progs/testClassifier.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file testClassifier.cpp
 * @brief main program for classifier evaluation
 * @author Erik Rodner
@@ -28,280 +28,306 @@
 using namespace OBJREC;
 
 using namespace NICE;
+
 using namespace std;
 
-void binarizeVector ( NICE::Vector & xout, const NICE::Vector & x, const NICE::Vector & thresholds )
+void binarizeVector( NICE::Vector & xout, const NICE::Vector & x, const NICE::Vector & thresholds )
+{
+  xout.resize( x.size() );
+
+  for ( size_t i = 0 ; i < x.size() ; i++ )
+    if ( fabs( x[i] ) > thresholds[i] )
+      xout[i] = 1.0;
+    else
+      xout[i] = 0.0;
+}
+
+void binarizeSet( LabeledSetVector & dst, const LabeledSetVector & src, const NICE::Vector & thresholds )
 {
-    xout.resize(x.size());
-    for ( size_t i = 0 ; i < x.size() ; i++ )
-	if ( fabs(x[i]) > thresholds[i] )
-	    xout[i] = 1.0;
-	else
-	    xout[i] = 0.0;
+  LOOP_ALL( src )
+  {
+    EACH( classno, x );
+    NICE::Vector dstv;
+    binarizeVector( dstv, x, thresholds );
+    dst.add( classno, dstv );
+  }
 }
 
-void binarizeSet ( LabeledSetVector & dst, const LabeledSetVector & src, const NICE::Vector & thresholds )
+int main( int argc, char **argv )
 {
-    LOOP_ALL(src)
+  fprintf( stderr, "testClassifier: init\n" );
+
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+
+  string wekafile = conf.gS( "main", "weka", "" );
+  string trainfn = conf.gS( "main", "train", "train.vec" );
+  string testfn = conf.gS( "main", "test", "test.vec" );
+  int format = conf.gI( "main", "format", 0 );
+  bool binarize = conf.gB( "main", "binarize", false );
+  int wekaclass = conf.gI( "main", "wekaclass", 1 );
+  string classifier_cache = conf.gS( "main", "classifiercache", "" );
+  string classifier_cache_in = conf.gS( "main", "classifierin", "" );
+  int numRuns = conf.gI( "main", "runs", 1 );
+  string writeImgNet = conf.gS( "main", "imgnet", "" );
+
+  // classno:text,classno:text,...
+  string classes = conf.gS( "main", "classes", "" );
+  int classesnb = conf.gI( "main", "classes", 0 );
+  string classesconf = conf.gS( "main", "classesconf", "" );
+
+  fprintf( stderr, "testClassifier: reading config\n" );
+  Preprocess::Init( &conf );
+
+  fprintf( stderr, "testClassifier: reading multi dataset\n" );
+  int testMaxClassNo;
+  int trainMaxClassNo;
+
+  ClassNames *classNames;
+
+  if ( classes.size() == 0 && classesnb != 0 )
+  {
+    classNames = new ClassNames();
+
+    for ( int classno = 0 ; classno < classesnb ; classno++ )
     {
-		EACH(classno,x);
-		NICE::Vector dstv;
-		binarizeVector ( dstv, x, thresholds );
-		dst.add ( classno, dstv );
+      classNames->addClass( classno, StringTools::convertToString<int> ( classno ), StringTools::convertToString<int> ( classno ) );
     }
-}
 
-int main (int argc, char **argv)
-{  
-    fprintf (stderr, "testClassifier: init\n");
-
-    std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
-
-    Config conf ( argc, argv );
-
-    string wekafile = conf.gS("main", "weka", "");
-    string trainfn = conf.gS("main", "train", "train.vec");
-	string testfn = conf.gS("main", "test", "test.vec");
-	int format = conf.gI("main", "format", 0 );
-	bool binarize = conf.gB("main", "binarize", false );
-	int wekaclass = conf.gI("main", "wekaclass", 1 );
-	string classifier_cache = conf.gS("main", "classifiercache", "");
-	string classifier_cache_in = conf.gS("main", "classifierin", "");
-	int numRuns = conf.gI("main", "runs", 1);
-	string writeImgNet = conf.gS("main", "imgnet", "");
-	
-	// classno:text,classno:text,...
-	string classes = conf.gS("main", "classes", "");
-	int classesnb = conf.gI("main", "classes", 0);
-	string classesconf = conf.gS("main", "classesconf", "");
-
-    fprintf (stderr, "testClassifier: reading config\n");
-    Preprocess::Init ( &conf );
-    
-    fprintf (stderr, "testClassifier: reading multi dataset\n");
-    int testMaxClassNo;
-    int trainMaxClassNo;
-
-    ClassNames *classNames;
-	if(classes.size() == 0 && classesnb != 0)
-	{
-		classNames = new ClassNames ();
-		for ( int classno = 0 ; classno < classesnb ; classno++ )
-		{
-			classNames->addClass ( classno, StringTools::convertToString<int> ( classno ), StringTools::convertToString<int> (classno) );
-		}
-		trainMaxClassNo = classNames->getMaxClassno();
-		testMaxClassNo = trainMaxClassNo;
-	}
-	else
-    if ( classes.size() > 0 ) 
+    trainMaxClassNo = classNames->getMaxClassno();
+
+    testMaxClassNo = trainMaxClassNo;
+  }
+  else
+    if ( classes.size() > 0 )
     {
-		classNames = new ClassNames ();
-		
-		vector<string> classes_sub;
-		StringTools::split ( string(classes), ',', classes_sub );
-
-		for ( vector<string>::const_iterator i = classes_sub.begin();
-				i != classes_sub.end(); i++ )
-		{
-			vector<string> desc;
-			StringTools::split ( *i, ':', desc);
-			if ( desc.size() != 2 ) 
-				break;
-			int classno = StringTools::convert<int> ( desc[0] );
-			classNames->addClass ( classno, desc[1], desc[1] );
-		}
-
-		trainMaxClassNo = classNames->getMaxClassno();
-		testMaxClassNo = trainMaxClassNo;
-
-		classNames->store(cout);
-    } 
-	else if ( classesconf.size() > 0 ) {
-		classNames = new ClassNames ();
-		Config cConf ( classesconf );
-		classNames->readFromConfig ( cConf, "*" );
-		trainMaxClassNo = classNames->getMaxClassno();
-		testMaxClassNo = trainMaxClassNo;
-	}
-	else 
-	{
-		MultiDataset md ( &conf );
-		classNames = new ClassNames ( md.getClassNames("train"), "*" );
-		testMaxClassNo = md.getClassNames("test").getMaxClassno();
-		trainMaxClassNo = md.getClassNames("train").getMaxClassno();
+      classNames = new ClassNames();
+
+      vector<string> classes_sub;
+      StringTools::split( string( classes ), ',', classes_sub );
+
+      for ( vector<string>::const_iterator i = classes_sub.begin();
+            i != classes_sub.end(); i++ )
+      {
+        vector<string> desc;
+        StringTools::split( *i, ':', desc );
+
+        if ( desc.size() != 2 )
+          break;
+
+        int classno = StringTools::convert<int> ( desc[0] );
+
+        classNames->addClass( classno, desc[1], desc[1] );
+      }
+
+      trainMaxClassNo = classNames->getMaxClassno();
+
+      testMaxClassNo = trainMaxClassNo;
+
+      classNames->store( cout );
     }
-        
-    LabeledSetVector train;
-    if ( classifier_cache_in.size() <= 0 )
+    else if ( classesconf.size() > 0 ) {
+      classNames = new ClassNames();
+      Config cConf( classesconf );
+      classNames->readFromConfig( cConf, "*" );
+      trainMaxClassNo = classNames->getMaxClassno();
+      testMaxClassNo = trainMaxClassNo;
+    }
+    else
     {
-		fprintf (stderr, "testClassifier: Reading training dataset from %s\n", trainfn.c_str() );
-		train.read ( trainfn, format );
-		train.printInformation();
-    } else {
-		fprintf (stderr, "testClassifier: skipping training set %s\n", trainfn.c_str() );
+      MultiDataset md( &conf );
+      classNames = new ClassNames( md.getClassNames( "train" ), "*" );
+      testMaxClassNo = md.getClassNames( "test" ).getMaxClassno();
+      trainMaxClassNo = md.getClassNames( "train" ).getMaxClassno();
     }
 
-    LabeledSetVector test;
-    fprintf (stderr, "testClassifier: Reading test dataset from %s\n", testfn.c_str() );
-    test.read ( testfn, format );
+  LabeledSetVector train;
 
-	ClassificationResults cresults;
+  if ( classifier_cache_in.size() <= 0 )
+  {
+    fprintf( stderr, "testClassifier: Reading training dataset from %s\n", trainfn.c_str() );
+    train.read( trainfn, format );
+    train.printInformation();
+  } else {
+    fprintf( stderr, "testClassifier: skipping training set %s\n", trainfn.c_str() );
+  }
 
-	ofstream outinet;
-	if(writeImgNet.length() > 0)
-	{
-		outinet.open(writeImgNet.c_str());
-	}
+  LabeledSetVector test;
 
-    for (int runs = 0 ; runs < numRuns ; runs++ ) {
-		VecClassifier *vec_classifier = NULL;
+  fprintf( stderr, "testClassifier: Reading test dataset from %s\n", testfn.c_str() );
+  test.read( testfn, format );
 
-		if ( conf.gS("main", "classifier") == "random_forest_transfer" ) 
-		{
-			FeaturePoolClassifier *fpc = new FPCRandomForestTransfer ( &conf, classNames );
-			vec_classifier = new VCFeaturePool ( &conf, fpc );
-		} else {
-			vec_classifier = CSGeneric::selectVecClassifier ( &conf, "main" );
-		}
+  ClassificationResults cresults;
 
-		NICE::Vector thresholds;
+  ofstream outinet;
 
-		if ( classifier_cache_in.size() <= 0 )
-		{
-			if ( binarize ) {
-				LabeledSetVector trainbin;
-				NICE::Vector mis;
-				MutualInformation mi;
-				fprintf (stderr, "testClassifier: computing mutual information\n");
-				mi.computeThresholdsOverall ( train, thresholds, mis );
-				fprintf (stderr, "testClassifier: done!\n");
-				binarizeSet ( trainbin, train, thresholds );
-				vec_classifier->teach ( trainbin );
-			} else {
+  if ( writeImgNet.length() > 0 )
+  {
+    outinet.open( writeImgNet.c_str() );
+  }
 
-				vec_classifier->teach ( train );
+  for ( int runs = 0 ; runs < numRuns ; runs++ ) {
+    VecClassifier *vec_classifier = NULL;
 
-			}
+    if ( conf.gS( "main", "classifier" ) == "random_forest_transfer" )
+    {
+      FeaturePoolClassifier *fpc = new FPCRandomForestTransfer( &conf, classNames );
+      vec_classifier = new VCFeaturePool( &conf, fpc );
+    } else {
+      vec_classifier = CSGeneric::selectVecClassifier( &conf, "main" );
+    }
 
-			vec_classifier->finishTeaching();
+    NICE::Vector thresholds;
 
-		if ( classifier_cache.size() > 0 )
-				vec_classifier->save ( classifier_cache );
-		} else {
-			vec_classifier->setMaxClassNo ( classNames->getMaxClassno() );
-			vec_classifier->read ( classifier_cache_in );
-		}
+    if ( classifier_cache_in.size() <= 0 )
+    {
+      if ( binarize ) {
+        LabeledSetVector trainbin;
+        NICE::Vector mis;
+        MutualInformation mi;
+        fprintf( stderr, "testClassifier: computing mutual information\n" );
+        mi.computeThresholdsOverall( train, thresholds, mis );
+        fprintf( stderr, "testClassifier: done!\n" );
+        binarizeSet( trainbin, train, thresholds );
+        vec_classifier->teach( trainbin );
+      } else {
 
-		ProgressBar pb ("Classification");
+        vec_classifier->teach( train );
 
-		pb.show();
+      }
 
-		std::vector<int> count   ( testMaxClassNo+1, 0 );
+      vec_classifier->finishTeaching();
 
-		std::vector<int> correct ( testMaxClassNo+1, 0 );
+      if ( classifier_cache.size() > 0 )
+        vec_classifier->save( classifier_cache );
+    } else {
+      vec_classifier->setMaxClassNo( classNames->getMaxClassno() );
+      vec_classifier->read( classifier_cache_in );
+    }
 
-		MatrixT<int> confusionMatrix ( testMaxClassNo+1, trainMaxClassNo+1, 0 );
+    ProgressBar pb( "Classification" );
 
-		int n = test.count();
-		LOOP_ALL(test)
-		{
-			EACH(classno,v);
-			pb.update ( n );
+    pb.show();
+
+    std::vector<int> count( testMaxClassNo + 1, 0 );
+
+    std::vector<int> correct( testMaxClassNo + 1, 0 );
+
+    MatrixT<int> confusionMatrix( testMaxClassNo + 1, trainMaxClassNo + 1, 0 );
+
+    int n = test.count();
+    LOOP_ALL( test )
+    {
+      EACH( classno, v );
+      pb.update( n );
 #ifdef DEBUG
-			fprintf (stderr, "\tclassification\n" );
+      fprintf( stderr, "\tclassification\n" );
 #endif
-			ClassificationResult r;
-
-			if ( binarize ) 
-			{
-				NICE::Vector vout;
-				binarizeVector ( vout, v, thresholds );
-				r = vec_classifier->classify ( vout );
-			} else {
-				r = vec_classifier->classify ( v );
-			}
-			r.classno_groundtruth = classno;
-			r.classname = classNames->text( r.classno );
+      ClassificationResult r;
+
+      if ( binarize )
+      {
+        NICE::Vector vout;
+        binarizeVector( vout, v, thresholds );
+        r = vec_classifier->classify( vout );
+      } else {
+        r = vec_classifier->classify( v );
+      }
+
+      r.classno_groundtruth = classno;
+
+      r.classname = classNames->text( r.classno );
 
 #ifdef DEBUG
-			if ( r.classno == classno )
-				fprintf (stderr, "+ classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno, 
-					classNames->text(classno).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno]);
-			else
-				fprintf (stderr, "- classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno, 
-					classNames->text(classno).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
-
-			r.scores.store ( cerr );
+
+      if ( r.classno == classno )
+        fprintf( stderr, "+ classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+      else
+        fprintf( stderr, "- classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+
+      r.scores.store( cerr );
+
 #endif
 
-			if(writeImgNet.length() > 0)
-			{
-				for(int z = 1; z < r.scores.size()-1; z++)
-				{
-					outinet << r.scores[z] << " ";
-				}
-				outinet << r.scores[r.scores.size()-1] << endl;
-			}
-
-			if ( r.classno >= 0 )
-			{
-				if ( classno == r.classno ) correct[classno]++;
-
-				count[classno]++;
-
-				if ( r.ok() ) {
-					confusionMatrix(classno, r.classno)++;
-				}
-				cresults.push_back ( r );
-			}
-		}
-		pb.hide();
-
-		if ( wekafile.size() > 0 )
-		{
-			string wekafile_s = wekafile;
-			if ( numRuns > 1 )
-				wekafile_s = wekafile_s + "." + StringTools::convertToString<int>(runs) + ".txt";
-			cresults.writeWEKA ( wekafile_s, wekaclass );
-		}
-			
-		int count_total = 0;
-		int correct_total = 0;
-		int classes_tested = 0;
-		double avg_recognition = 0.0;
-		for ( size_t classno = 0; classno < correct.size(); classno++ )
-		{
-			if ( count[classno] == 0 ) {
-				fprintf (stdout, "class %d not tested !!\n", (int)classno);
-			} else {
-				fprintf (stdout, "classification result class %d (\"%s\") : %5.2f %%\n",
-					(int)classno, classNames->text(classno).c_str(), correct[classno]*100.0/count[classno] );
-				avg_recognition += correct[classno]/(double)count[classno];
-				classes_tested++;
-			}
-
-			count_total += count[classno];
-			correct_total += correct[classno];
-		}
-		avg_recognition /= classes_tested;
-
-
-		fprintf (stdout, "overall recognition rate : %-5.3f %%\n", correct_total*100.0/count_total );
-		fprintf (stdout, "average recognition rate : %-5.3f %%\n", avg_recognition*100 );
-		fprintf (stdout, "total:%d misclassified:%d\n", count_total, count_total - correct_total );
-
-		int max_count = *(max_element( count.begin(), count.end() ));
-		fprintf (stdout, "no of classes : %d\n", classNames->numClasses() );
-		fprintf (stdout, "lower bound 1 : %f\n", 100.0/(classNames->numClasses()));
-		fprintf (stdout, "lower bound 2 : %f\n", max_count * 100.0 / (double) count_total);
-
-		cout << confusionMatrix << endl;
-		
-		delete vec_classifier;
+      if ( writeImgNet.length() > 0 )
+      {
+        for ( int z = 1; z < r.scores.size() - 1; z++ )
+        {
+          outinet << r.scores[z] << " ";
+        }
+
+        outinet << r.scores[r.scores.size()-1] << endl;
+      }
+
+      if ( r.classno >= 0 )
+      {
+        if ( classno == r.classno ) correct[classno]++;
+
+        count[classno]++;
+
+        if ( r.ok() ) {
+          confusionMatrix( classno, r.classno )++;
+        }
+
+        cresults.push_back( r );
+      }
     }
 
-    delete classNames;
+    pb.hide();
+
+    if ( wekafile.size() > 0 )
+    {
+      string wekafile_s = wekafile;
+
+      if ( numRuns > 1 )
+        wekafile_s = wekafile_s + "." + StringTools::convertToString<int>( runs ) + ".txt";
+
+      cresults.writeWEKA( wekafile_s, wekaclass );
+    }
+
+    int count_total = 0;
+
+    int correct_total = 0;
+    int classes_tested = 0;
+    double avg_recognition = 0.0;
+
+    for ( size_t classno = 0; classno < correct.size(); classno++ )
+    {
+      if ( count[classno] == 0 ) {
+        fprintf( stdout, "class %d not tested !!\n", ( int )classno );
+      } else {
+        fprintf( stdout, "classification result class %d (\"%s\") : %5.2f %%\n",
+                 ( int )classno, classNames->text( classno ).c_str(), correct[classno]*100.0 / count[classno] );
+        avg_recognition += correct[classno] / ( double )count[classno];
+        classes_tested++;
+      }
+
+      count_total += count[classno];
+
+      correct_total += correct[classno];
+    }
+
+    avg_recognition /= classes_tested;
+
+
+    fprintf( stdout, "overall recognition rate : %-5.3f %%\n", correct_total*100.0 / count_total );
+    fprintf( stdout, "average recognition rate : %-5.3f %%\n", avg_recognition*100 );
+    fprintf( stdout, "total:%d misclassified:%d\n", count_total, count_total - correct_total );
+
+    int max_count = *( max_element( count.begin(), count.end() ) );
+    fprintf( stdout, "no of classes : %d\n", classNames->numClasses() );
+    fprintf( stdout, "lower bound 1 : %f\n", 100.0 / ( classNames->numClasses() ) );
+    fprintf( stdout, "lower bound 2 : %f\n", max_count * 100.0 / ( double ) count_total );
+
+    cout << confusionMatrix << endl;
+
+    delete vec_classifier;
+  }
+
+  delete classNames;
 
-    return 0;
+  return 0;
 }

+ 291 - 264
progs/testClassifierGMM.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file testClassifier.cpp
 * @brief main program for classifier evaluation
 * @author Erik Rodner
@@ -28,292 +28,319 @@
 using namespace OBJREC;
 
 using namespace NICE;
+
 using namespace std;
 
-void binarizeVector ( NICE::Vector & xout, const NICE::Vector & x, const NICE::Vector & thresholds )
+void binarizeVector( NICE::Vector & xout, const NICE::Vector & x, const NICE::Vector & thresholds )
+{
+  xout.resize( x.size() );
+
+  for ( size_t i = 0 ; i < x.size() ; i++ )
+    if ( fabs( x[i] ) > thresholds[i] )
+      xout[i] = 1.0;
+    else
+      xout[i] = 0.0;
+}
+
+void binarizeSet( LabeledSetVector & dst, const LabeledSetVector & src, const NICE::Vector & thresholds )
 {
-    xout.resize(x.size());
-    for ( size_t i = 0 ; i < x.size() ; i++ )
-	if ( fabs(x[i]) > thresholds[i] )
-	    xout[i] = 1.0;
-	else
-	    xout[i] = 0.0;
+  LOOP_ALL( src )
+  {
+    EACH( classno, x );
+    NICE::Vector dstv;
+    binarizeVector( dstv, x, thresholds );
+    dst.add( classno, dstv );
+  }
 }
 
-void binarizeSet ( LabeledSetVector & dst, const LabeledSetVector & src, const NICE::Vector & thresholds )
+int main( int argc, char **argv )
 {
-    LOOP_ALL(src)
+  fprintf( stderr, "testClassifier: init\n" );
+
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+
+  string wekafile = conf.gS( "main", "weka", "" );
+  string trainfn = conf.gS( "main", "train", "train.vec" );
+  string testfn = conf.gS( "main", "test", "test.vec" );
+  int format = conf.gI( "main", "format", 0 );
+  bool binarize = conf.gB( "main", "binarize", false );
+  int wekaclass = conf.gI( "main", "wekaclass", 1 );
+  string classifier_cache = conf.gS( "main", "classifiercache", "" );
+  string classifier_cache_in = conf.gS( "main", "classifierin", "" );
+  int numRuns = conf.gI( "main", "runs", 1 );
+
+  // classno:text,classno:text,...
+  string classes = conf.gS( "main", "classes", "" );
+  int classesnb = conf.gI( "main", "classes", 0 );
+  string classesconf = conf.gS( "main", "classesconf", "" );
+
+  fprintf( stderr, "testClassifier: reading config\n" );
+  Preprocess::Init( &conf );
+
+  fprintf( stderr, "testClassifier: reading multi dataset\n" );
+  int testMaxClassNo;
+  int trainMaxClassNo;
+
+
+  ClassNames *classNames;
+
+  if ( classes.size() == 0 && classesnb != 0 )
+  {
+    classNames = new ClassNames();
+
+    for ( int classno = 0 ; classno < classesnb ; classno++ )
     {
-		EACH(classno,x);
-		NICE::Vector dstv;
-		binarizeVector ( dstv, x, thresholds );
-		dst.add ( classno, dstv );
+      classNames->addClass( classno, StringTools::convertToString<int> ( classno ), StringTools::convertToString<int> ( classno ) );
     }
-}
 
-int main (int argc, char **argv)
-{  
-    fprintf (stderr, "testClassifier: init\n");
-
-    std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
-
-    Config conf ( argc, argv );
-
-    string wekafile = conf.gS("main", "weka", "");
-    string trainfn = conf.gS("main", "train", "train.vec");
-	string testfn = conf.gS("main", "test", "test.vec");
-	int format = conf.gI("main", "format", 0 );
-	bool binarize = conf.gB("main", "binarize", false );
-	int wekaclass = conf.gI("main", "wekaclass", 1 );
-	string classifier_cache = conf.gS("main", "classifiercache", "");
-	string classifier_cache_in = conf.gS("main", "classifierin", "");
-	int numRuns = conf.gI("main", "runs", 1);
-	
-	// classno:text,classno:text,...
-	string classes = conf.gS("main", "classes", "");
-	int classesnb = conf.gI("main", "classes", 0);
-	string classesconf = conf.gS("main", "classesconf", "");
-
-    fprintf (stderr, "testClassifier: reading config\n");
-    Preprocess::Init ( &conf );
-    
-    fprintf (stderr, "testClassifier: reading multi dataset\n");
-    int testMaxClassNo;
-    int trainMaxClassNo;
-    
-
-    ClassNames *classNames;
-	if(classes.size() == 0 && classesnb != 0)
-	{
-		classNames = new ClassNames ();
-		for ( int classno = 0 ; classno < classesnb ; classno++ )
-		{
-			classNames->addClass ( classno, StringTools::convertToString<int> ( classno ), StringTools::convertToString<int> (classno) );
-		}
-		trainMaxClassNo = classNames->getMaxClassno();
-		testMaxClassNo = trainMaxClassNo;
-	}
-	else
-    if ( classes.size() > 0 ) 
+    trainMaxClassNo = classNames->getMaxClassno();
+
+    testMaxClassNo = trainMaxClassNo;
+  }
+  else
+    if ( classes.size() > 0 )
     {
-		classNames = new ClassNames ();
-		
-		vector<string> classes_sub;
-		StringTools::split ( string(classes), ',', classes_sub );
-
-		for ( vector<string>::const_iterator i = classes_sub.begin();
-				i != classes_sub.end(); i++ )
-		{
-			vector<string> desc;
-			StringTools::split ( *i, ':', desc);
-			if ( desc.size() != 2 ) 
-				break;
-			int classno = StringTools::convert<int> ( desc[0] );
-			classNames->addClass ( classno, desc[1], desc[1] );
-		}
-
-		trainMaxClassNo = classNames->getMaxClassno();
-		testMaxClassNo = trainMaxClassNo;
-
-		classNames->store(cout);
-    } 
-	else if ( classesconf.size() > 0 ) {
-		classNames = new ClassNames ();
-		Config cConf ( classesconf );
-		classNames->readFromConfig ( cConf, "*" );
-		trainMaxClassNo = classNames->getMaxClassno();
-		testMaxClassNo = trainMaxClassNo;
-	}
-	else 
-	{
-		MultiDataset md ( &conf );
-		classNames = new ClassNames ( md.getClassNames("train"), "*" );
-		testMaxClassNo = md.getClassNames("test").getMaxClassno();
-		trainMaxClassNo = md.getClassNames("train").getMaxClassno();
+      classNames = new ClassNames();
+
+      vector<string> classes_sub;
+      StringTools::split( string( classes ), ',', classes_sub );
+
+      for ( vector<string>::const_iterator i = classes_sub.begin();
+            i != classes_sub.end(); i++ )
+      {
+        vector<string> desc;
+        StringTools::split( *i, ':', desc );
+
+        if ( desc.size() != 2 )
+          break;
+
+        int classno = StringTools::convert<int> ( desc[0] );
+
+        classNames->addClass( classno, desc[1], desc[1] );
+      }
+
+      trainMaxClassNo = classNames->getMaxClassno();
+
+      testMaxClassNo = trainMaxClassNo;
+
+      classNames->store( cout );
     }
-        
-    LabeledSetVector train;
-    if ( classifier_cache_in.size() <= 0 )
+    else if ( classesconf.size() > 0 ) {
+      classNames = new ClassNames();
+      Config cConf( classesconf );
+      classNames->readFromConfig( cConf, "*" );
+      trainMaxClassNo = classNames->getMaxClassno();
+      testMaxClassNo = trainMaxClassNo;
+    }
+    else
+    {
+      MultiDataset md( &conf );
+      classNames = new ClassNames( md.getClassNames( "train" ), "*" );
+      testMaxClassNo = md.getClassNames( "test" ).getMaxClassno();
+      trainMaxClassNo = md.getClassNames( "train" ).getMaxClassno();
+    }
+
+  LabeledSetVector train;
+
+  if ( classifier_cache_in.size() <= 0 )
+  {
+    fprintf( stderr, "testClassifier: Reading training dataset from %s\n", trainfn.c_str() );
+    train.read( trainfn, format );
+    train.printInformation();
+  } else {
+    fprintf( stderr, "testClassifier: skipping training set %s\n", trainfn.c_str() );
+  }
+
+  LabeledSetVector test;
+
+  fprintf( stderr, "testClassifier: Reading test dataset from %s\n", testfn.c_str() );
+  test.read( testfn, format );
+
+  GMM *gmm = NULL;
+  int nbgmm = conf.gI( "main", "gmm", 0 );
+
+  if ( nbgmm > 0 )
+  {
+    gmm = new GMM( &conf, nbgmm );
+    VVector vset;
+    Vector l;
+    train.getFlatRepresentation( vset, l );
+    gmm->computeMixture( vset );
+
+    map<int, vector<NICE::Vector *> >::iterator iter;
+
+    for ( iter = train.begin(); iter != train.end(); ++iter )
+    {
+      for ( uint i = 0; i < iter->second.size(); ++i )
+      {
+        gmm->getProbs( *( iter->second[i] ), *( iter->second[i] ) );
+      }
+    }
+
+    for ( iter = test.begin(); iter != test.end(); ++iter )
+    {
+      for ( uint i = 0; i < iter->second.size(); ++i )
+      {
+        gmm->getProbs( *( iter->second[i] ), *( iter->second[i] ) );
+      }
+    }
+  }
+
+  ClassificationResults cresults;
+
+
+  for ( int runs = 0 ; runs < numRuns ; runs++ ) {
+    VecClassifier *vec_classifier = NULL;
+
+    if ( conf.gS( "main", "classifier" ) == "random_forest_transfer" )
     {
-		fprintf (stderr, "testClassifier: Reading training dataset from %s\n", trainfn.c_str() );
-		train.read ( trainfn, format );
-		train.printInformation();
+      FeaturePoolClassifier *fpc = new FPCRandomForestTransfer( &conf, classNames );
+      vec_classifier = new VCFeaturePool( &conf, fpc );
     } else {
-		fprintf (stderr, "testClassifier: skipping training set %s\n", trainfn.c_str() );
+      vec_classifier = CSGeneric::selectVecClassifier( &conf, "main" );
     }
 
-    LabeledSetVector test;
-    fprintf (stderr, "testClassifier: Reading test dataset from %s\n", testfn.c_str() );
-    test.read ( testfn, format );
+    NICE::Vector thresholds;
 
-    GMM *gmm = NULL;
-    int nbgmm = conf.gI("main", "gmm", 0);
-    if(nbgmm > 0)
+    if ( classifier_cache_in.size() <= 0 )
     {
-	gmm = new GMM(&conf, nbgmm);
-	VVector vset;
-	Vector l;
-	train.getFlatRepresentation(vset,l);
-	gmm->computeMixture(vset);
-	
-	map<int, vector<NICE::Vector *> >::iterator iter;
-    	for( iter = train.begin(); iter != train.end(); ++iter ) 
-	{
-		for(uint i = 0; i < iter->second.size(); ++i)
-		{
-			gmm->getProbs(*(iter->second[i]),*(iter->second[i]));
-		}
-	}
-	
-	for( iter = test.begin(); iter != test.end(); ++iter ) 
-	{
-		for(uint i = 0; i < iter->second.size(); ++i)
-		{
-			gmm->getProbs(*(iter->second[i]),*(iter->second[i]));
-		}
-	}
+      if ( binarize ) {
+        LabeledSetVector trainbin;
+        NICE::Vector mis;
+        MutualInformation mi;
+        fprintf( stderr, "testClassifier: computing mutual information\n" );
+        mi.computeThresholdsOverall( train, thresholds, mis );
+        fprintf( stderr, "testClassifier: done!\n" );
+        binarizeSet( trainbin, train, thresholds );
+        vec_classifier->teach( trainbin );
+      } else {
+
+        vec_classifier->teach( train );
+
+      }
+
+      vec_classifier->finishTeaching();
+
+      if ( classifier_cache.size() > 0 )
+        vec_classifier->save( classifier_cache );
+    } else {
+      vec_classifier->setMaxClassNo( classNames->getMaxClassno() );
+      vec_classifier->read( classifier_cache_in );
     }
-    
-	ClassificationResults cresults;
-	
-
-    for (int runs = 0 ; runs < numRuns ; runs++ ) {
-		VecClassifier *vec_classifier = NULL;
-
-		if ( conf.gS("main", "classifier") == "random_forest_transfer" ) 
-		{
-			FeaturePoolClassifier *fpc = new FPCRandomForestTransfer ( &conf, classNames );
-			vec_classifier = new VCFeaturePool ( &conf, fpc );
-		} else {
-			vec_classifier = CSGeneric::selectVecClassifier ( &conf, "main" );
-		}
-
-		NICE::Vector thresholds;
-
-		if ( classifier_cache_in.size() <= 0 )
-		{
-			if ( binarize ) {
-				LabeledSetVector trainbin;
-				NICE::Vector mis;
-				MutualInformation mi;
-				fprintf (stderr, "testClassifier: computing mutual information\n");
-				mi.computeThresholdsOverall ( train, thresholds, mis );
-				fprintf (stderr, "testClassifier: done!\n");
-				binarizeSet ( trainbin, train, thresholds );
-				vec_classifier->teach ( trainbin );
-			} else {
-
-				vec_classifier->teach ( train );
-
-			}
-
-			vec_classifier->finishTeaching();
-
-		if ( classifier_cache.size() > 0 )
-				vec_classifier->save ( classifier_cache );
-		} else {
-			vec_classifier->setMaxClassNo ( classNames->getMaxClassno() );
-			vec_classifier->read ( classifier_cache_in );
-		}
-
-		ProgressBar pb ("Classification");
-
-		pb.show();
-
-		std::vector<int> count   ( testMaxClassNo+1, 0 );
-
-		std::vector<int> correct ( testMaxClassNo+1, 0 );
-
-		MatrixT<int> confusionMatrix ( testMaxClassNo+1, trainMaxClassNo+1, 0 );
-
-		int n = test.count();
-		LOOP_ALL(test)
-		{
-			EACH(classno,v);
-			pb.update ( n );
-
-			fprintf (stderr, "\tclassification\n" );
-			ClassificationResult r;
-
-			if ( binarize ) 
-			{
-				NICE::Vector vout;
-				binarizeVector ( vout, v, thresholds );
-				r = vec_classifier->classify ( vout );
- 			} else {
-				r = vec_classifier->classify ( v );
-			}
-			r.classno_groundtruth = classno;
-			r.classname = classNames->text( r.classno );
+
+    ProgressBar pb( "Classification" );
+
+    pb.show();
+
+    std::vector<int> count( testMaxClassNo + 1, 0 );
+
+    std::vector<int> correct( testMaxClassNo + 1, 0 );
+
+    MatrixT<int> confusionMatrix( testMaxClassNo + 1, trainMaxClassNo + 1, 0 );
+
+    int n = test.count();
+    LOOP_ALL( test )
+    {
+      EACH( classno, v );
+      pb.update( n );
+
+      fprintf( stderr, "\tclassification\n" );
+      ClassificationResult r;
+
+      if ( binarize )
+      {
+        NICE::Vector vout;
+        binarizeVector( vout, v, thresholds );
+        r = vec_classifier->classify( vout );
+      } else {
+        r = vec_classifier->classify( v );
+      }
+
+      r.classno_groundtruth = classno;
+
+      r.classname = classNames->text( r.classno );
 
 #ifdef DEBUG
-			if ( r.classno == classno )
-				fprintf (stderr, "+ classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno, 
-					classNames->text(classno).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno]);
-			else
-				fprintf (stderr, "- classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno, 
-					classNames->text(classno).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+
+      if ( r.classno == classno )
+        fprintf( stderr, "+ classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+      else
+        fprintf( stderr, "- classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+
 #endif
 
-			r.scores.store ( cerr );
-			if ( r.classno >= 0 )
-			{
-				if ( classno == r.classno ) correct[classno]++;
-
-				count[classno]++;
-
-				if ( r.ok() ) {
-					confusionMatrix(classno, r.classno)++;
-				}
-				cresults.push_back ( r );
-			}
-		}
-		pb.hide();
-
-		if ( wekafile.size() > 0 )
-		{
-			string wekafile_s = wekafile;
-			if ( numRuns > 1 )
-				wekafile_s = wekafile_s + "." + StringTools::convertToString<int>(runs) + ".txt";
-			cresults.writeWEKA ( wekafile_s, wekaclass );
-		}
-			
-		int count_total = 0;
-		int correct_total = 0;
-		int classes_tested = 0;
-		double avg_recognition = 0.0;
-		for ( size_t classno = 0; classno < correct.size(); classno++ )
-		{
-			if ( count[classno] == 0 ) {
-				fprintf (stdout, "class %d not tested !!\n", (int)classno);
-			} else {
-				fprintf (stdout, "classification result class %d (\"%s\") : %5.2f %%\n",
-					(int)classno, classNames->text(classno).c_str(), correct[classno]*100.0/count[classno] );
-				avg_recognition += correct[classno]/(double)count[classno];
-				classes_tested++;
-			}
-
-			count_total += count[classno];
-			correct_total += correct[classno];
-		}
-		avg_recognition /= classes_tested;
-
-
-		fprintf (stdout, "overall recognition rate : %-5.3f %%\n", correct_total*100.0/count_total );
-		fprintf (stdout, "average recognition rate : %-5.3f %%\n", avg_recognition*100 );
-		fprintf (stdout, "total:%d misclassified:%d\n", count_total, count_total - correct_total );
-
-		int max_count = *(max_element( count.begin(), count.end() ));
-		fprintf (stdout, "no of classes : %d\n", classNames->numClasses() );
-		fprintf (stdout, "lower bound 1 : %f\n", 100.0/(classNames->numClasses()));
-		fprintf (stdout, "lower bound 2 : %f\n", max_count * 100.0 / (double) count_total);
-
-		cout << confusionMatrix << endl;
-		
-		delete vec_classifier;
+      r.scores.store( cerr );
+
+      if ( r.classno >= 0 )
+      {
+        if ( classno == r.classno ) correct[classno]++;
+
+        count[classno]++;
+
+        if ( r.ok() ) {
+          confusionMatrix( classno, r.classno )++;
+        }
+
+        cresults.push_back( r );
+      }
     }
 
-    delete classNames;
+    pb.hide();
+
+    if ( wekafile.size() > 0 )
+    {
+      string wekafile_s = wekafile;
+
+      if ( numRuns > 1 )
+        wekafile_s = wekafile_s + "." + StringTools::convertToString<int>( runs ) + ".txt";
+
+      cresults.writeWEKA( wekafile_s, wekaclass );
+    }
+
+    int count_total = 0;
+
+    int correct_total = 0;
+    int classes_tested = 0;
+    double avg_recognition = 0.0;
+
+    for ( size_t classno = 0; classno < correct.size(); classno++ )
+    {
+      if ( count[classno] == 0 ) {
+        fprintf( stdout, "class %d not tested !!\n", ( int )classno );
+      } else {
+        fprintf( stdout, "classification result class %d (\"%s\") : %5.2f %%\n",
+                 ( int )classno, classNames->text( classno ).c_str(), correct[classno]*100.0 / count[classno] );
+        avg_recognition += correct[classno] / ( double )count[classno];
+        classes_tested++;
+      }
+
+      count_total += count[classno];
+
+      correct_total += correct[classno];
+    }
+
+    avg_recognition /= classes_tested;
+
+
+    fprintf( stdout, "overall recognition rate : %-5.3f %%\n", correct_total*100.0 / count_total );
+    fprintf( stdout, "average recognition rate : %-5.3f %%\n", avg_recognition*100 );
+    fprintf( stdout, "total:%d misclassified:%d\n", count_total, count_total - correct_total );
+
+    int max_count = *( max_element( count.begin(), count.end() ) );
+    fprintf( stdout, "no of classes : %d\n", classNames->numClasses() );
+    fprintf( stdout, "lower bound 1 : %f\n", 100.0 / ( classNames->numClasses() ) );
+    fprintf( stdout, "lower bound 2 : %f\n", max_count * 100.0 / ( double ) count_total );
+
+    cout << confusionMatrix << endl;
+
+    delete vec_classifier;
+  }
+
+  delete classNames;
 
-    return 0;
+  return 0;
 }

+ 32 - 29
progs/testRF.cpp

@@ -12,37 +12,40 @@
 #include "vislearning/baselib/Globals.h"
 
 using namespace OBJREC;
+
 using namespace NICE;
+
 using namespace std;
 
-int main (int argc, char **argv)
+int main( int argc, char **argv )
 {
-	if(argc < 1)
-	{
-		cerr << "Bitte Datei angeben" << endl;
-		return -1;
-	}
-
-	string filename;
-	filename += argv[1];
-	cout << "file: " << filename << endl;
-
-	Config *conf = new Config();
-
-	FeaturePoolClassifier *fpcrfCs = new FPCRandomForests(conf, "CsurkaForest");
-
-	//Vector *vec = new Vector(384);
-	//Example ex(vec);
-
-	fpcrfCs->setMaxClassNo(8);
-	fpcrfCs->read(filename);
-	/*
-	ClassificationResult r;
-
-	if(fpcrfCs != NULL)
-	{
-		r = fpcrfCs->classify ( ex );
-	}
-	*/
-	return 0;
+  if ( argc < 1 )
+  {
+    cerr << "Bitte Datei angeben" << endl;
+    return -1;
+  }
+
+  string filename;
+
+  filename += argv[1];
+  cout << "file: " << filename << endl;
+
+  Config *conf = new Config();
+
+  FeaturePoolClassifier *fpcrfCs = new FPCRandomForests( conf, "CsurkaForest" );
+
+  //Vector *vec = new Vector(384);
+  //Example ex(vec);
+
+  fpcrfCs->setMaxClassNo( 8 );
+  fpcrfCs->read( filename );
+  /*
+  ClassificationResult r;
+
+  if(fpcrfCs != NULL)
+  {
+   r = fpcrfCs->classify ( ex );
+  }
+  */
+  return 0;
 }

+ 222 - 209
progs/testSemanticSegmentation.cpp

@@ -32,268 +32,281 @@ using namespace NICE;
 
 using namespace std;
 
-void updateMatrix(const NICE::Image & img, const NICE::Image & gt,
-																		NICE::Matrix & M, const set<int> & forbidden_classes)
+void updateMatrix( const NICE::Image & img, const NICE::Image & gt,
+                   NICE::Matrix & M, const set<int> & forbidden_classes )
 {
-	double subsamplex = gt.width() / (double)img.width();
-	double subsampley = gt.height() / (double)img.height();
+  double subsamplex = gt.width() / ( double )img.width();
+  double subsampley = gt.height() / ( double )img.height();
 
-	for (int y = 0 ; y < gt.height() ; y++)
-		for (int x = 0 ; x < gt.width() ; x++)
-		{
-			int xx = (int)(x / subsamplex);
-			int yy = (int)(y / subsampley);
+  for ( int y = 0 ; y < gt.height() ; y++ )
+    for ( int x = 0 ; x < gt.width() ; x++ )
+    {
+      int xx = ( int )( x / subsamplex );
+      int yy = ( int )( y / subsampley );
 
-			if (xx < 0) xx = 0;
+      if ( xx < 0 ) xx = 0;
 
-			if (yy < 0) yy = 0;
+      if ( yy < 0 ) yy = 0;
 
-			if (xx > img.width() - 1) xx = img.width() - 1;
+      if ( xx > img.width() - 1 ) xx = img.width() - 1;
 
-			if (yy > img.height() - 1) yy = img.height() - 1;
+      if ( yy > img.height() - 1 ) yy = img.height() - 1;
 
-			int cimg = img.getPixel(xx, yy);
+      int cimg = img.getPixel( xx, yy );
 
-			int gimg = gt.getPixel(x, y);
+      int gimg = gt.getPixel( x, y );
 
-			if (forbidden_classes.find(gimg) == forbidden_classes.end())
-			{
-				M(gimg, cimg)++;
-			}
-		}
+      if ( forbidden_classes.find( gimg ) == forbidden_classes.end() )
+      {
+        M( gimg, cimg )++;
+      }
+    }
 }
 
 /**
  test semantic segmentation routines
 */
-int main(int argc, char **argv)
+int main( int argc, char **argv )
 {
-	std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
 
-	Config conf(argc, argv);
+  Config conf( argc, argv );
 
-	bool show_result = conf.gB("debug", "show_results", false);
+  bool show_result = conf.gB( "debug", "show_results", false );
 
-	bool write_results = conf.gB("debug", "write_results", false);
+  bool write_results = conf.gB( "debug", "write_results", false );
 
-	bool write_results_pascal = conf.gB("debug", "write_results_pascal", false);
+  bool write_results_pascal = conf.gB( "debug", "write_results_pascal", false );
 
-	std::string resultdir = conf.gS("debug", "resultdir", ".");
+  std::string resultdir = conf.gS( "debug", "resultdir", "." );
 
-	if (write_results)
-	{
-		cerr << "Writing Results to " << resultdir << endl;
-	}
+  if ( write_results )
+  {
+    cerr << "Writing Results to " << resultdir << endl;
+  }
 
-	MultiDataset md(&conf);
+  MultiDataset md( &conf );
 
-	const ClassNames & classNames = md.getClassNames("train");
-	
-	string method = conf.gS("main","method","SSCsurka");
+  const ClassNames & classNames = md.getClassNames( "train" );
 
-	SemanticSegmentation *semseg = NULL;
-	if(method == "SSCsurka")
-	{
-		semseg = new SemSegCsurka ( &conf, &md);
-	}
-	else if(method == "SSContext")
-	{
-		semseg = new SemSegContextTree ( &conf, &md);
-	}
-	//SemanticSegmentation *semseg = new SemSegLocal ( &conf, &md );
-	//SemanticSegmentation *semseg = new SemSegSTF ( &conf, &md );
-	//SemanticSegmentation *semseg = new SemSegRegionBased(&conf, &md);
+  string method = conf.gS( "main", "method", "SSCsurka" );
 
-	const LabeledSet *testFiles = md["test"];
-	NICE::Matrix M(classNames.getMaxClassno() + 1, classNames.getMaxClassno() + 1);
-	M.set(0);
+  SemanticSegmentation *semseg = NULL;
 
-	set<int> forbidden_classes;
+  if ( method == "SSCsurka" )
+  {
+    semseg = new SemSegCsurka( &conf, &md );
+  }
+  else if ( method == "SSContext" )
+  {
+    semseg = new SemSegContextTree( &conf, &md );
+  }
 
-	std::string forbidden_classes_s = conf.gS("analysis", "forbidden_classes", "");
+  //SemanticSegmentation *semseg = new SemSegLocal ( &conf, &md );
+  //SemanticSegmentation *semseg = new SemSegSTF ( &conf, &md );
+  //SemanticSegmentation *semseg = new SemSegRegionBased(&conf, &md);
 
-	classNames.getSelection(forbidden_classes_s, forbidden_classes);
+  const LabeledSet *testFiles = md["test"];
 
-	ProgressBar pb("Semantic Segmentation Analysis");
-	pb.show();
+  NICE::Matrix M( classNames.getMaxClassno() + 1, classNames.getMaxClassno() + 1 );
 
-	int fileno = 0;
+  M.set( 0 );
 
-	LOOP_ALL_S(*testFiles)
-	{
-		EACH_INFO(classno, info);
-		std::string file = info.img();
+  set<int> forbidden_classes;
 
-		NICE::Image lm;
-		NICE::MultiChannelImageT<double> probabilities;
-		
-		if (info.hasLocalizationInfo())
-		{
-			const LocalizationResult *l_gt = info.localization();
+  std::string forbidden_classes_s = conf.gS( "analysis", "forbidden_classes", "" );
 
-			lm.resize(l_gt->xsize, l_gt->ysize);
-			lm.set(0);
-			l_gt->calcLabeledImage(lm, classNames.getBackgroundClass());
-		}
+  classNames.getSelection( forbidden_classes_s, forbidden_classes );
 
-		semseg->semanticseg(file, lm, probabilities);
-		fprintf(stderr, "testSemanticSegmentation: Segmentation finished !\n");
+  ProgressBar pb( "Semantic Segmentation Analysis" );
 
-		NICE::Image lm_gt;
+  pb.show();
 
-		if (info.hasLocalizationInfo())
-		{
-			const LocalizationResult *l_gt = info.localization();
+  int fileno = 0;
 
-			lm_gt.resize(l_gt->xsize, l_gt->ysize);
-			lm_gt.set(0);
+  LOOP_ALL_S( *testFiles )
+  {
+    EACH_INFO( classno, info );
+    std::string file = info.img();
 
-			fprintf(stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n");
-			l_gt->calcLabeledImage(lm_gt, classNames.getBackgroundClass());
-		}
+    NICE::Image lm;
+    NICE::MultiChannelImageT<double> probabilities;
 
-		std::string fname = StringTools::baseName(file, false);
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
 
-		if (write_results_pascal)
-		{
+      lm.resize( l_gt->xsize, l_gt->ysize );
+      lm.set( 0 );
+      l_gt->calcLabeledImage( lm, classNames.getBackgroundClass() );
+    }
 
-			NICE::Image pascal_lm(lm.width(), lm.height());
-			int backgroundClass = classNames.getBackgroundClass();
+    semseg->semanticseg( file, lm, probabilities );
 
-			for (int y = 0 ; y < lm.height(); y++)
-				for (int x = 0 ; x < lm.width(); x++)
-				{
-					int v = lm.getPixel(x, y);
+    fprintf( stderr, "testSemanticSegmentation: Segmentation finished !\n" );
 
-					if (v == backgroundClass)
-						pascal_lm.setPixel(x, y, 255);
-					else
-						pascal_lm.setPixel(x, y, 255 - v - 1);
-				}
+    NICE::Image lm_gt;
 
-			char filename[1024];
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
 
-			char *format = (char *)"pgm";
-			sprintf(filename, "%s/%s.%s", resultdir.c_str(), fname.c_str(), format);
+      lm_gt.resize( l_gt->xsize, l_gt->ysize );
+      lm_gt.set( 0 );
 
-			pascal_lm.write(filename);
-		}
+      fprintf( stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n" );
+      l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+    }
 
-		if (show_result || write_results)
-		{
-			NICE::ColorImage orig(file);
-			NICE::ColorImage rgb;
-			NICE::ColorImage rgb_gt;
+    std::string fname = StringTools::baseName( file, false );
 
-			classNames.labelToRGB(lm, rgb);
+    if ( write_results_pascal )
+    {
 
-			classNames.labelToRGB(lm_gt, rgb_gt);
+      NICE::Image pascal_lm( lm.width(), lm.height() );
+      int backgroundClass = classNames.getBackgroundClass();
 
-			if (write_results)
-			{
-				char filename[1024];
-				char *format = (char *)"ppm";
-				sprintf(filename, "%06d.%s", fileno, format);
-				std::string origfilename = resultdir + "/orig_" + string(filename);
-				cerr << "Writing to file " << origfilename << endl;
-				orig.write(origfilename);
-				rgb.write(resultdir + "/result_" + string(filename));
-				rgb_gt.write(resultdir + "/groundtruth_" + string(filename));
-			}
+      for ( int y = 0 ; y < lm.height(); y++ )
+        for ( int x = 0 ; x < lm.width(); x++ )
+        {
+          int v = lm.getPixel( x, y );
 
-			if (show_result)
-			{
+          if ( v == backgroundClass )
+            pascal_lm.setPixel( x, y, 255 );
+          else
+            pascal_lm.setPixel( x, y, 255 - v - 1 );
+        }
+
+      char filename[1024];
+
+      char *format = ( char * )"pgm";
+      sprintf( filename, "%s/%s.%s", resultdir.c_str(), fname.c_str(), format );
+
+      pascal_lm.write( filename );
+    }
+
+    if ( show_result || write_results )
+    {
+      NICE::ColorImage orig( file );
+      NICE::ColorImage rgb;
+      NICE::ColorImage rgb_gt;
+
+      classNames.labelToRGB( lm, rgb );
+
+      classNames.labelToRGB( lm_gt, rgb_gt );
+
+      if ( write_results )
+      {
+        char filename[1024];
+        char *format = ( char * )"ppm";
+        sprintf( filename, "%06d.%s", fileno, format );
+        std::string origfilename = resultdir + "/orig_" + string( filename );
+        cerr << "Writing to file " << origfilename << endl;
+        orig.write( origfilename );
+        rgb.write( resultdir + "/result_" + string( filename ) );
+        rgb_gt.write( resultdir + "/groundtruth_" + string( filename ) );
+      }
+
+      if ( show_result )
+      {
 #ifndef NOVISUAL
-				showImage(rgb, "Result");
-				showImage(rgb_gt, "Groundtruth");
-				showImage(orig, "Input");
+        showImage( rgb, "Result" );
+        showImage( rgb_gt, "Groundtruth" );
+        showImage( orig, "Input" );
 #endif
-			}
-		}
+      }
+    }
 
 //#pragma omp critical
-		updateMatrix(lm, lm_gt, M, forbidden_classes);
-
-		cerr << M << endl;
-		fileno++;
-		pb.update(testFiles->count());
-	}
-
-	pb.hide();
-
-	double overall = 0.0;
-	double sumall = 0.0;
-	for(int r = 0; r < (int)M.rows(); r++)
-	{
-		for(int c = 0; c < (int)M.cols(); c++)
-		{
-			if(r == c)
-				overall += M(r,c);
-			sumall += M(r,c);
-		}
-	}
-	
-	overall /= sumall;
-	
-	// normalizing M using rows
-	for (int r = 0 ; r < (int)M.rows() ; r++)
-	{
-		double sum = 0.0;
-
-		for (int c = 0 ; c < (int)M.cols() ; c++)
-			sum += M(r, c);
-
-		if (fabs(sum) > 1e-4)
-			for (int c = 0 ; c < (int)M.cols() ; c++)
-				M(r, c) /= sum;
-	}
-
-	cerr << M << endl;
-	
-	double avg_perf = 0.0;
-	int classes_trained = 0;
-
-	for (int r = 0 ; r < (int)M.rows() ; r++)
-	{
-		if ((classNames.existsClassno(r)) && (forbidden_classes.find(r) == forbidden_classes.end()))
-		{
-			avg_perf += M(r, r);
-			classes_trained++;
-		}
-	}
-
-	if (write_results)
-	{
-		ofstream fout((resultdir + "/res.txt").c_str(), ios::out);
-		fout <<  "overall: " << overall << endl;
-		fout << "Average Performance " << avg_perf / (classes_trained) << endl;
-		fout << "Lower Bound " << 1.0  / classes_trained << endl;
-
-		for (int r = 0 ; r < (int)M.rows() ; r++)
-		{
-			if ((classNames.existsClassno(r)) && (forbidden_classes.find(r) == forbidden_classes.end()))
-			{
-				std::string classname = classNames.text(r);
-				fout << classname.c_str() << ": " << M(r, r) << endl;
-			}
-		}
-
-		fout.close();
-	}
-
-	fprintf(stderr, "overall: %f\n", overall);
-	fprintf(stderr, "Average Performance %f\n", avg_perf / (classes_trained));
-	//fprintf(stderr, "Lower Bound %f\n", 1.0 / classes_trained);
-	for (int r = 0 ; r < (int)M.rows() ; r++)
-	{
-		if ((classNames.existsClassno(r)) && (forbidden_classes.find(r) == forbidden_classes.end()))
-		{
-			std::string classname = classNames.text(r);
-			fprintf(stderr, "%s: %f\n", classname.c_str(), M(r, r));
-		}
-	}
-
-	delete semseg;
-
-	return 0;
+    updateMatrix( lm, lm_gt, M, forbidden_classes );
+
+    cerr << M << endl;
+
+    fileno++;
+
+    pb.update( testFiles->count() );
+  }
+
+  pb.hide();
+
+  double overall = 0.0;
+  double sumall = 0.0;
+
+  for ( int r = 0; r < ( int )M.rows(); r++ )
+  {
+    for ( int c = 0; c < ( int )M.cols(); c++ )
+    {
+      if ( r == c )
+        overall += M( r, c );
+
+      sumall += M( r, c );
+    }
+  }
+
+  overall /= sumall;
+
+  // normalizing M using rows
+
+  for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+  {
+    double sum = 0.0;
+
+    for ( int c = 0 ; c < ( int )M.cols() ; c++ )
+      sum += M( r, c );
+
+    if ( fabs( sum ) > 1e-4 )
+      for ( int c = 0 ; c < ( int )M.cols() ; c++ )
+        M( r, c ) /= sum;
+  }
+
+  cerr << M << endl;
+
+  double avg_perf = 0.0;
+  int classes_trained = 0;
+
+  for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+  {
+    if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+    {
+      avg_perf += M( r, r );
+      classes_trained++;
+    }
+  }
+
+  if ( write_results )
+  {
+    ofstream fout(( resultdir + "/res.txt" ).c_str(), ios::out );
+    fout <<  "overall: " << overall << endl;
+    fout << "Average Performance " << avg_perf / ( classes_trained ) << endl;
+    fout << "Lower Bound " << 1.0  / classes_trained << endl;
+
+    for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+    {
+      if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+      {
+        std::string classname = classNames.text( r );
+        fout << classname.c_str() << ": " << M( r, r ) << endl;
+      }
+    }
+
+    fout.close();
+  }
+
+  fprintf( stderr, "overall: %f\n", overall );
+
+  fprintf( stderr, "Average Performance %f\n", avg_perf / ( classes_trained ) );
+  //fprintf(stderr, "Lower Bound %f\n", 1.0 / classes_trained);
+
+  for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+  {
+    if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+    {
+      std::string classname = classNames.text( r );
+      fprintf( stderr, "%s: %f\n", classname.c_str(), M( r, r ) );
+    }
+  }
+
+  delete semseg;
+
+  return 0;
 }

+ 109 - 101
semseg/FIShotton.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file FIShotton.cpp
 * @brief feature images
 * @author Erik Rodner
@@ -14,122 +14,130 @@
 using namespace OBJREC;
 
 using namespace std;
+
 using namespace NICE;
 
 
-void FIShotton::buildTextonMap ( CachedExample *ce,
-				FPCRandomForests *fpcrf,
-				map<DecisionNode *, pair<long, int> > index,
-				int subsamplex, 
-				int subsampley,
-				int maxdepthSegmentationForest )
+void FIShotton::buildTextonMap( CachedExample *ce,
+                                FPCRandomForests *fpcrf,
+                                map<DecisionNode *, pair<long, int> > index,
+                                int subsamplex,
+                                int subsampley,
+                                int maxdepthSegmentationForest )
 {
-    vector<DecisionNode *> leafs;
-
-    int xsize, ysize;
-    ce->getImageSize ( xsize, ysize );
-    int xsize_s = xsize / subsamplex;
-    int ysize_s = ysize / subsampley;
-
-    SparseVector *textonIndices = new SparseVector [xsize_s*ysize_s];
-    
-    Example pce ( ce, 0, 0 );
-    long offset = 0;
-    long offset_s = 0;
-    for ( int y = 0 ; y < ysize_s ; y++ )
+  vector<DecisionNode *> leafs;
+
+  int xsize, ysize;
+  ce->getImageSize( xsize, ysize );
+  int xsize_s = xsize / subsamplex;
+  int ysize_s = ysize / subsampley;
+
+  SparseVector *textonIndices = new SparseVector [xsize_s*ysize_s];
+
+  Example pce( ce, 0, 0 );
+  long offset = 0;
+  long offset_s = 0;
+
+  for ( int y = 0 ; y < ysize_s ; y++ )
+  {
+    for ( int x = 0 ; x < xsize_s ; x++, offset_s++ )
     {
-	for ( int x = 0 ; x < xsize_s ; x++, offset_s++ )
-	{
-	    for ( int yi = 0 ; yi < subsampley ; yi++ )
-	    {
-		for ( int xi = 0 ; xi < subsamplex ; xi++, offset++ )
-		{
-		    leafs.clear();
-		    pce.x = x*subsamplex + xi; pce.y = y*subsampley + yi; 
-		    fpcrf->getLeafNodes ( pce, leafs, maxdepthSegmentationForest );
-		    SparseVector v;
-		    for ( vector<DecisionNode *>::const_iterator i = leafs.begin();
-								i != leafs.end();
-								i++ )
-			v.insert ( pair<int, double> ( index[*i].first, 1.0 ) );
-
-		    textonIndices[offset_s].add(v);
-		}
-	    }
-	}
+      for ( int yi = 0 ; yi < subsampley ; yi++ )
+      {
+        for ( int xi = 0 ; xi < subsamplex ; xi++, offset++ )
+        {
+          leafs.clear();
+          pce.x = x * subsamplex + xi;
+          pce.y = y * subsampley + yi;
+          fpcrf->getLeafNodes( pce, leafs, maxdepthSegmentationForest );
+          SparseVector v;
+
+          for ( vector<DecisionNode *>::const_iterator i = leafs.begin();
+                i != leafs.end();
+                i++ )
+            v.insert( pair<int, double> ( index[*i].first, 1.0 ) );
+
+          textonIndices[offset_s].add( v );
+        }
+      }
     }
-    fprintf (stderr, "Building Texton Integral NICE::Image !!\n");
+  }
+
+  fprintf( stderr, "Building Texton Integral NICE::Image !!\n" );
 
-    ce->buildIntegralSV ( CachedExample::SVTEXTON, textonIndices, xsize_s, ysize_s );
+  ce->buildIntegralSV( CachedExample::SVTEXTON, textonIndices, xsize_s, ysize_s );
 }
 
 
 
 
-void FIShotton::buildSemanticMap ( CachedExample *ce,
-				FPCRandomForests *fpcrf,
-				int subsamplex, int subsampley,
-				int numClasses )
+void FIShotton::buildSemanticMap( CachedExample *ce,
+                                  FPCRandomForests *fpcrf,
+                                  int subsamplex, int subsampley,
+                                  int numClasses )
 {
-    int xsize, ysize;
-    ce->getImageSize ( xsize, ysize );
-    int xsize_s = xsize / subsamplex;
-    int ysize_s = ysize / subsampley;
-
-    NICE::MultiChannelImageT<double> & priorMap = ce->getDChannel ( CachedExample::D_INTEGRALPRIOR );
-    priorMap.reInit ( xsize_s, ysize_s, numClasses, true );
-    priorMap.setAll ( 0.0 );
-    
-    vector<DecisionNode *> leafs;
-   
-    Example pce ( ce, 0, 0 );
-    long offset = 0;
-    long offset_s = 0;
-    for ( int y = 0 ; y < ysize_s ; y++ )
+  int xsize, ysize;
+  ce->getImageSize( xsize, ysize );
+  int xsize_s = xsize / subsamplex;
+  int ysize_s = ysize / subsampley;
+
+  NICE::MultiChannelImageT<double> & priorMap = ce->getDChannel( CachedExample::D_INTEGRALPRIOR );
+  priorMap.reInit( xsize_s, ysize_s, numClasses, true );
+  priorMap.setAll( 0.0 );
+
+  vector<DecisionNode *> leafs;
+
+  Example pce( ce, 0, 0 );
+  long offset = 0;
+  long offset_s = 0;
+
+  for ( int y = 0 ; y < ysize_s ; y++ )
+  {
+    for ( int x = 0 ; x < xsize_s ; x++, offset_s++ )
     {
-	for ( int x = 0 ; x < xsize_s ; x++, offset_s++ )
-	{
-	    for ( int yi = 0 ; yi < subsampley ; yi++ )
-	    {
-		for ( int xi = 0 ; xi < subsamplex ; xi++ )
-		{
-		    leafs.clear();
-		    pce.x = x*subsamplex + xi; pce.y = y*subsampley + yi; 
-		    fpcrf->getLeafNodes ( pce, leafs );
-		    
-		    for ( vector<DecisionNode *>::const_iterator i = leafs.begin();
-								i != leafs.end();
-								i++ )
-		    {
-			const FullVector & sv = (*i)->distribution;
-
-			for ( int i = 0 ; i < sv.size(); i++  )
-			{
-			    priorMap.data[i][offset_s] += sv[i];
-			}
-		    }
-		}
-	    }
-
-	    double sum = 0.0;
-	    for ( uint i = 0 ; i < priorMap.numChannels; i++ )
-		sum += priorMap.data[i][offset_s];
-	
-	    if ( sum < 10e-13 )
-	    {
-		fprintf (stderr, "x*subsamplex %d y*subsampley %d xsize %d ysize %d\n",
-		    x*subsamplex, y*subsampley, xsize, ysize );
-		exit(-1);
-	    } else {
-		for ( uint i = 0 ; i < priorMap.numChannels; i++ )
-		    priorMap.data[i][offset_s] /= sum;
-	    }
-
-	}
+      for ( int yi = 0 ; yi < subsampley ; yi++ )
+      {
+        for ( int xi = 0 ; xi < subsamplex ; xi++ )
+        {
+          leafs.clear();
+          pce.x = x * subsamplex + xi;
+          pce.y = y * subsampley + yi;
+          fpcrf->getLeafNodes( pce, leafs );
+
+          for ( vector<DecisionNode *>::const_iterator i = leafs.begin();
+                i != leafs.end();
+                i++ )
+          {
+            const FullVector & sv = ( *i )->distribution;
+
+            for ( int i = 0 ; i < sv.size(); i++ )
+            {
+              priorMap.data[i][offset_s] += sv[i];
+            }
+          }
+        }
+      }
+
+      double sum = 0.0;
+
+      for ( uint i = 0 ; i < priorMap.numChannels; i++ )
+        sum += priorMap.data[i][offset_s];
+
+      if ( sum < 10e-13 )
+      {
+        fprintf( stderr, "x*subsamplex %d y*subsampley %d xsize %d ysize %d\n",
+                 x*subsamplex, y*subsampley, xsize, ysize );
+        exit( -1 );
+      } else {
+        for ( uint i = 0 ; i < priorMap.numChannels; i++ )
+          priorMap.data[i][offset_s] /= sum;
+      }
+
     }
+  }
 
-    for ( uint i = 0 ; i < priorMap.numChannels ; i++ )
-	GenericImageTools::calcIntegralImage ( priorMap.data[i], priorMap.data[i], priorMap.xsize, priorMap.ysize );
+  for ( uint i = 0 ; i < priorMap.numChannels ; i++ )
+    GenericImageTools::calcIntegralImage( priorMap.data[i], priorMap.data[i], priorMap.xsize, priorMap.ysize );
 
 }
 

+ 18 - 17
semseg/FIShotton.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file FIShotton.h
 * @brief feature images for preprocessing according to shotton
 * @author Erik Rodner
@@ -7,7 +7,7 @@
 */
 #ifndef FIShottonINCLUDE
 #define FIShottonINCLUDE
- 
+
 #include "core/vector/SparseVector.h"
 #include "vislearning/cbaselib/CachedExample.h"
 #include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
@@ -15,24 +15,25 @@
 namespace OBJREC {
 
 /** feature images */
+
 class FIShotton
 {
 
-    protected:
-
-    public:
-  
-	static void buildSemanticMap ( CachedExample *ce,
-				FPCRandomForests *fpcrf,
-				int subsamplex, int subsampley,
-				int numClasses );
-
-	static void buildTextonMap ( CachedExample *ce,
-				FPCRandomForests *fpcrf,
-				std::map<DecisionNode *, std::pair<long, int> > index,
-				int subsamplex, 
-				int subsampley,
-				int maxdepthSegmentationForest );
+protected:
+
+public:
+
+  static void buildSemanticMap( CachedExample *ce,
+                                FPCRandomForests *fpcrf,
+                                int subsamplex, int subsampley,
+                                int numClasses );
+
+  static void buildTextonMap( CachedExample *ce,
+                              FPCRandomForests *fpcrf,
+                              std::map<DecisionNode *, std::pair<long, int> > index,
+                              int subsamplex,
+                              int subsampley,
+                              int maxdepthSegmentationForest );
 };
 
 

+ 1591 - 1376
semseg/SemSegContextTree.cpp

@@ -19,391 +19,501 @@
 //#define LOCALFEATS
 
 using namespace OBJREC;
+
 using namespace std;
+
 using namespace NICE;
 
 
 
-class MCImageAccess:public ValueAccess
+class MCImageAccess: public ValueAccess
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y, const int &channel)
-	{
-		return feats.feats->get(x,y,channel);
-	}
-	
-	virtual string writeInfos()
-	{
-		return "raw";
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y, const int &channel )
+  {
+    return feats.feats->get( x, y, channel );
+  }
+
+  virtual string writeInfos()
+  {
+    return "raw";
+  }
 };
 
-class ClassificationResultAcess:public ValueAccess
+class ClassificationResultAcess: public ValueAccess
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y, const int &channel)
-	{
-		return (*feats.tree)[feats.cfeats->get(x,y,feats.cTree)].dist[channel];
-	}
-	
-	virtual string writeInfos()
-	{
-		return "context";
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y, const int &channel )
+  {
+    return ( *feats.tree )[feats.cfeats->get( x,y,feats.cTree )].dist[channel];
+  }
+
+  virtual string writeInfos()
+  {
+    return "context";
+  }
 };
 
-class Minus:public Operation
+class Minus: public Operation
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		double v1 = values->getVal(feats, BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),channel1);
-		double v2 = values->getVal(feats, BOUND(x+x2,0,xsize-1),BOUND(y+y2,0,ysize-1),channel2);
-		return v1-v2;
-	}
-	
-	virtual Operation* clone()
-	{
-		return new Minus();
-	}
-	
-	virtual string writeInfos()
-	{
-		string out = "Minus";
-		if(values !=NULL)
-			out+=values->writeInfos();
-		return out;
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return MINUS;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
+    double v2 = values->getVal( feats, BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel2 );
+    return v1 -v2;
+  }
+
+  virtual Operation* clone()
+  {
+    return new Minus();
+  }
+
+  virtual string writeInfos()
+  {
+    string out = "Minus";
+
+    if ( values != NULL )
+      out += values->writeInfos();
+
+    return out;
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return MINUS;
+  }
 };
 
-class MinusAbs:public Operation
+class MinusAbs: public Operation
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		double v1 = values->getVal(feats, BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),channel1);
-		double v2 = values->getVal(feats, BOUND(x+x2,0,xsize-1),BOUND(y+y2,0,ysize-1),channel2);
-		return abs(v1-v2);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new MinusAbs();
-	};
-	
-	virtual string writeInfos()
-	{
-		string out = "MinusAbs";
-		if(values !=NULL)
-			out+=values->writeInfos();
-		return out;
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return MINUSABS;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
+    double v2 = values->getVal( feats, BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel2 );
+    return abs( v1 -v2 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new MinusAbs();
+  };
+
+  virtual string writeInfos()
+  {
+    string out = "MinusAbs";
+
+    if ( values != NULL )
+      out += values->writeInfos();
+
+    return out;
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return MINUSABS;
+  }
 };
 
-class Addition:public Operation
+class Addition: public Operation
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		double v1 = values->getVal(feats, BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),channel1);
-		double v2 = values->getVal(feats, BOUND(x+x2,0,xsize-1),BOUND(y+y2,0,ysize-1),channel2);
-		return v1+v2;
-	}
-	
-	virtual Operation* clone()
-	{
-		return new Addition();
-	}
-	
-	virtual string writeInfos()
-	{
-		string out = "Addition";
-		if(values !=NULL)
-			out+=values->writeInfos();
-		return out;
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return ADDITION;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
+    double v2 = values->getVal( feats, BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel2 );
+    return v1 + v2;
+  }
+
+  virtual Operation* clone()
+  {
+    return new Addition();
+  }
+
+  virtual string writeInfos()
+  {
+    string out = "Addition";
+
+    if ( values != NULL )
+      out += values->writeInfos();
+
+    return out;
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return ADDITION;
+  }
 };
 
-class Only1:public Operation
+class Only1: public Operation
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		double v1 = values->getVal(feats, BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),channel1);
-		return v1;
-	}
-	
-	virtual Operation* clone()
-	{
-		return new Only1();
-	}
-	
-	virtual string writeInfos()
-	{
-		string out = "Only1";
-		if(values !=NULL)
-			out+=values->writeInfos();
-		return out;
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return ONLY1;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
+    return v1;
+  }
+
+  virtual Operation* clone()
+  {
+    return new Only1();
+  }
+
+  virtual string writeInfos()
+  {
+    string out = "Only1";
+
+    if ( values != NULL )
+      out += values->writeInfos();
+
+    return out;
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return ONLY1;
+  }
+};
+
+class RelativeXPosition: public Operation
+{
+
+public:
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    return ( double )x / ( double )xsize;
+  }
+
+  virtual Operation* clone()
+  {
+    return new RelativeXPosition();
+  }
+
+  virtual string writeInfos()
+  {
+    return "RelativeXPosition";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return RELATIVEXPOSITION;
+  }
+};
+
+class RelativeYPosition: public Operation
+{
+
+public:
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    return ( double )x / ( double )xsize;
+  }
+
+  virtual Operation* clone()
+  {
+    return new RelativeYPosition();
+  }
+
+  virtual string writeInfos()
+  {
+    return "RelativeYPosition";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return RELATIVEYPOSITION;
+  }
 };
 
 // uses mean of classification in window given by (x1,y1) (x2,y2)
-class IntegralOps:public Operation
+
+class IntegralOps: public Operation
 {
+
 public:
-	virtual void set(int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values)
-	{
-		x1 = min(_x1,_x2);
-		y1 = min(_y1,_y2);
-		x2 = max(_x1,_x2);
-		y2 = max(_y1,_y2);
-		channel1 = _channel1;
-		channel2 = _channel2;
-		values = _values;
-	}
-	
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		return computeMean(*feats.integralImg,BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),BOUND(x+x2,0,xsize-1),BOUND(y+y2,0,ysize-1),channel1);
-	}
-	
-	inline double computeMean(const NICE::MultiChannelImageT<double> &intImg, const int &uLx, const int &uLy, const int &lRx, const int &lRy, const int &chan)
-	{
-		double val1 = intImg.get(uLx,uLy, chan);
-		double val2 = intImg.get(lRx,uLy, chan);
-		double val3 = intImg.get(uLx,lRy, chan);
-		double val4 = intImg.get(lRx,lRy, chan);
-		double area = (lRx-uLx)*(lRy-uLy);
-		if(area == 0)
-			return 0.0;
-		return (val1+val4-val2-val3)/area;
-	}
-	
-	virtual Operation* clone()
-	{
-		return new IntegralOps();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "IntegralOps";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return INTEGRAL;
-	}
+  virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+  {
+    x1 = min( _x1, _x2 );
+    y1 = min( _y1, _y2 );
+    x2 = max( _x1, _x2 );
+    y2 = max( _y1, _y2 );
+    channel1 = _channel1;
+    channel2 = _channel2;
+    values = _values;
+  }
+
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    return computeMean( *feats.integralImg, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel1 );
+  }
+
+  inline double computeMean( const NICE::MultiChannelImageT<double> &intImg, const int &uLx, const int &uLy, const int &lRx, const int &lRy, const int &chan )
+  {
+    double val1 = intImg.get( uLx, uLy, chan );
+    double val2 = intImg.get( lRx, uLy, chan );
+    double val3 = intImg.get( uLx, lRy, chan );
+    double val4 = intImg.get( lRx, lRy, chan );
+    double area = ( lRx - uLx ) * ( lRy - uLy );
+
+    if ( area == 0 )
+      return 0.0;
+
+    return ( val1 + val4 - val2 - val3 ) / area;
+  }
+
+  virtual Operation* clone()
+  {
+    return new IntegralOps();
+  }
+
+  virtual string writeInfos()
+  {
+    return "IntegralOps";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return INTEGRAL;
+  }
+};
+
+//like a global bag of words to model the current appearance of classes in an image without local context
+
+class GlobalFeats: public IntegralOps
+{
+
+public:
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    return computeMean( *feats.integralImg, 0, 0, xsize - 1, ysize - 1, channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new GlobalFeats();
+  }
+
+  virtual string writeInfos()
+  {
+    return "GlobalFeats";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return GLOBALFEATS;
+  }
 };
 
 //uses mean of Integral image given by x1, y1 with current pixel as center
-class IntegralCenteredOps:public IntegralOps
+
+class IntegralCenteredOps: public IntegralOps
 {
+
 public:
-	virtual void set(int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values)
-	{
-		x1 = abs(_x1);
-		y1 = abs(_y1);
-		x2 = abs(_x2);
-		y2 = abs(_y2);
-		channel1 = _channel1;
-		channel2 = _channel2;
-		values = _values;
-	}
-	
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		return computeMean(*feats.integralImg,BOUND(x-x1,0,xsize-1),BOUND(y-y1,0,ysize-1),BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new IntegralCenteredOps();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "IntegralCenteredOps";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return INTEGRALCENT;
-	}
+  virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+  {
+    x1 = abs( _x1 );
+    y1 = abs( _y1 );
+    x2 = abs( _x2 );
+    y2 = abs( _y2 );
+    channel1 = _channel1;
+    channel2 = _channel2;
+    values = _values;
+  }
+
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    return computeMean( *feats.integralImg, BOUND( x - x1, 0, xsize - 1 ), BOUND( y - y1, 0, ysize - 1 ), BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new IntegralCenteredOps();
+  }
+
+  virtual string writeInfos()
+  {
+    return "IntegralCenteredOps";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return INTEGRALCENT;
+  }
 };
 
 //uses different of mean of Integral image given by two windows, where (x1,y1) is the width and height of window1 and (x2,y2) of window 2
-class BiIntegralCenteredOps:public IntegralCenteredOps
+
+class BiIntegralCenteredOps: public IntegralCenteredOps
 {
+
 public:
-		virtual void set(int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values)
-	{
-		x1 = min(abs(_x1),abs(_x2));
-		y1 = min(abs(_y1),abs(_y2));
-		x2 = max(abs(_x1),abs(_x2));
-		y2 = max(abs(_y1),abs(_y2));
-		channel1 = _channel1;
-		channel2 = _channel2;
-		values = _values;
-	}
-	
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		return computeMean(*feats.integralImg,BOUND(x-x1,0,xsize-1),BOUND(y-y1,0,ysize-1),BOUND(x+x1,0,xsize-1),BOUND(y+y1,0,ysize-1),channel1) - computeMean(*feats.integralImg,BOUND(x-x2,0,xsize-1),BOUND(y-y2,0,ysize-1),BOUND(x+x2,0,xsize-1),BOUND(y+y2,0,ysize-1),channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new BiIntegralCenteredOps();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "BiIntegralCenteredOps";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return BIINTEGRALCENT;
-	}
+  virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+  {
+    x1 = min( abs( _x1 ), abs( _x2 ) );
+    y1 = min( abs( _y1 ), abs( _y2 ) );
+    x2 = max( abs( _x1 ), abs( _x2 ) );
+    y2 = max( abs( _y1 ), abs( _y2 ) );
+    channel1 = _channel1;
+    channel2 = _channel2;
+    values = _values;
+  }
+
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+    return computeMean( *feats.integralImg, BOUND( x - x1, 0, xsize - 1 ), BOUND( y - y1, 0, ysize - 1 ), BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 ) - computeMean( *feats.integralImg, BOUND( x - x2, 0, xsize - 1 ), BOUND( y - y2, 0, ysize - 1 ), BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new BiIntegralCenteredOps();
+  }
+
+  virtual string writeInfos()
+  {
+    return "BiIntegralCenteredOps";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return BIINTEGRALCENT;
+  }
 };
 
 /** horizontal Haar features
  * ++
  * --
  */
-class HaarHorizontal:public IntegralCenteredOps
+
+class HaarHorizontal: public IntegralCenteredOps
 {
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		
-		int tlx = BOUND(x-x1,0,xsize-1);
-		int tly = BOUND(y-y1,0,ysize-1);
-		int lrx = BOUND(x+x1,0,xsize-1);
-		int lry = BOUND(y+y1,0,ysize-1);
-		
-		return computeMean(*feats.integralImg,tlx,tly,lrx, y,channel1)-computeMean(*feats.integralImg,tlx,y,lrx, lry,channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new HaarHorizontal();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "HaarHorizontal";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return HAARHORIZ;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+
+    int tlx = BOUND( x - x1, 0, xsize - 1 );
+    int tly = BOUND( y - y1, 0, ysize - 1 );
+    int lrx = BOUND( x + x1, 0, xsize - 1 );
+    int lry = BOUND( y + y1, 0, ysize - 1 );
+
+    return computeMean( *feats.integralImg, tlx, tly, lrx, y, channel1 ) - computeMean( *feats.integralImg, tlx, y, lrx, lry, channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new HaarHorizontal();
+  }
+
+  virtual string writeInfos()
+  {
+    return "HaarHorizontal";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return HAARHORIZ;
+  }
 };
 
 /** vertical Haar features
  * +-
  * +-
  */
-class HaarVertical:public IntegralCenteredOps
+
+class HaarVertical: public IntegralCenteredOps
 {
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		
-		int tlx = BOUND(x-x1,0,xsize-1);
-		int tly = BOUND(y-y1,0,ysize-1);
-		int lrx = BOUND(x+x1,0,xsize-1);
-		int lry = BOUND(y+y1,0,ysize-1);
-		
-		return computeMean(*feats.integralImg,tlx,tly,x, lry,channel1)-computeMean(*feats.integralImg,x,tly,lrx, lry,channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new HaarVertical();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "HaarVertical";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return HAARVERT;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+
+    int tlx = BOUND( x - x1, 0, xsize - 1 );
+    int tly = BOUND( y - y1, 0, ysize - 1 );
+    int lrx = BOUND( x + x1, 0, xsize - 1 );
+    int lry = BOUND( y + y1, 0, ysize - 1 );
+
+    return computeMean( *feats.integralImg, tlx, tly, x, lry, channel1 ) - computeMean( *feats.integralImg, x, tly, lrx, lry, channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new HaarVertical();
+  }
+
+  virtual string writeInfos()
+  {
+    return "HaarVertical";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return HAARVERT;
+  }
 };
 
 /** vertical Haar features
  * +-
  * -+
  */
-class HaarDiag:public IntegralCenteredOps
+
+class HaarDiag: public IntegralCenteredOps
 {
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		
-		int tlx = BOUND(x-x1,0,xsize-1);
-		int tly = BOUND(y-y1,0,ysize-1);
-		int lrx = BOUND(x+x1,0,xsize-1);
-		int lry = BOUND(y+y1,0,ysize-1);
-		
-		return computeMean(*feats.integralImg,tlx,tly,x, y,channel1)+computeMean(*feats.integralImg,x,y,lrx, lry,channel1) - computeMean(*feats.integralImg,tlx,y,x, lry,channel1)-computeMean(*feats.integralImg,x,tly,lrx, y,channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new HaarDiag();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "HaarDiag";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return HAARDIAG;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+
+    int tlx = BOUND( x - x1, 0, xsize - 1 );
+    int tly = BOUND( y - y1, 0, ysize - 1 );
+    int lrx = BOUND( x + x1, 0, xsize - 1 );
+    int lry = BOUND( y + y1, 0, ysize - 1 );
+
+    return computeMean( *feats.integralImg, tlx, tly, x, y, channel1 ) + computeMean( *feats.integralImg, x, y, lrx, lry, channel1 ) - computeMean( *feats.integralImg, tlx, y, x, lry, channel1 ) - computeMean( *feats.integralImg, x, tly, lrx, y, channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new HaarDiag();
+  }
+
+  virtual string writeInfos()
+  {
+    return "HaarDiag";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return HAARDIAG;
+  }
 };
 
 /** horizontal Haar features
@@ -411,37 +521,38 @@ class HaarDiag:public IntegralCenteredOps
  * ---
  * +++
  */
-class Haar3Horiz:public BiIntegralCenteredOps
+
+class Haar3Horiz: public BiIntegralCenteredOps
 {
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		
-		int tlx = BOUND(x-x2,0,xsize-1);
-		int tly = BOUND(y-y2,0,ysize-1);
-		int mtly = BOUND(y-y1,0,ysize-1);
-		int mlry = BOUND(y+y1,0,ysize-1);
-		int lrx = BOUND(x+x2,0,xsize-1);
-		int lry = BOUND(y+y2,0,ysize-1);
-		
-		return computeMean(*feats.integralImg,tlx,tly,lrx, mtly,channel1) -computeMean(*feats.integralImg,tlx,mtly,lrx, mlry,channel1) + computeMean(*feats.integralImg,tlx,mlry,lrx, lry,channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new Haar3Horiz();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "Haar3Horiz";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return HAAR3HORIZ;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+
+    int tlx = BOUND( x - x2, 0, xsize - 1 );
+    int tly = BOUND( y - y2, 0, ysize - 1 );
+    int mtly = BOUND( y - y1, 0, ysize - 1 );
+    int mlry = BOUND( y + y1, 0, ysize - 1 );
+    int lrx = BOUND( x + x2, 0, xsize - 1 );
+    int lry = BOUND( y + y2, 0, ysize - 1 );
+
+    return computeMean( *feats.integralImg, tlx, tly, lrx, mtly, channel1 ) - computeMean( *feats.integralImg, tlx, mtly, lrx, mlry, channel1 ) + computeMean( *feats.integralImg, tlx, mlry, lrx, lry, channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new Haar3Horiz();
+  }
+
+  virtual string writeInfos()
+  {
+    return "Haar3Horiz";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return HAAR3HORIZ;
+  }
 };
 
 /** vertical Haar features
@@ -449,1073 +560,1177 @@ class Haar3Horiz:public BiIntegralCenteredOps
  * +-+
  * +-+
  */
-class Haar3Vert:public BiIntegralCenteredOps
+
+class Haar3Vert: public BiIntegralCenteredOps
 {
-	virtual double getVal(const Features &feats, const int &x, const int &y)
-	{
-		int xsize, ysize;
-		getXY(feats, xsize, ysize);
-		
-		int tlx = BOUND(x-x2,0,xsize-1);
-		int tly = BOUND(y-y2,0,ysize-1);
-		int mtlx = BOUND(x-x1,0,xsize-1);
-		int mlrx = BOUND(x+x1,0,xsize-1);
-		int lrx = BOUND(x+x2,0,xsize-1);
-		int lry = BOUND(y+y2,0,ysize-1);
-		
-		return computeMean(*feats.integralImg,tlx,tly,mtlx, lry,channel1) -computeMean(*feats.integralImg,mtlx,tly,mlrx, lry,channel1) + computeMean(*feats.integralImg,mlrx,tly,lrx, lry,channel1);
-	}
-	
-	virtual Operation* clone()
-	{
-		return new Haar3Vert();
-	}
-	
-	virtual string writeInfos()
-	{
-		return "Haar3Vert";
-	}
-	
-	virtual OperationTypes getOps()
-	{
-		return HAAR3VERT;
-	}
+  virtual double getVal( const Features &feats, const int &x, const int &y )
+  {
+    int xsize, ysize;
+    getXY( feats, xsize, ysize );
+
+    int tlx = BOUND( x - x2, 0, xsize - 1 );
+    int tly = BOUND( y - y2, 0, ysize - 1 );
+    int mtlx = BOUND( x - x1, 0, xsize - 1 );
+    int mlrx = BOUND( x + x1, 0, xsize - 1 );
+    int lrx = BOUND( x + x2, 0, xsize - 1 );
+    int lry = BOUND( y + y2, 0, ysize - 1 );
+
+    return computeMean( *feats.integralImg, tlx, tly, mtlx, lry, channel1 ) - computeMean( *feats.integralImg, mtlx, tly, mlrx, lry, channel1 ) + computeMean( *feats.integralImg, mlrx, tly, lrx, lry, channel1 );
+  }
+
+  virtual Operation* clone()
+  {
+    return new Haar3Vert();
+  }
+
+  virtual string writeInfos()
+  {
+    return "Haar3Vert";
+  }
+
+  virtual OperationTypes getOps()
+  {
+    return HAAR3VERT;
+  }
 };
 
 SemSegContextTree::SemSegContextTree( const Config *conf, const MultiDataset *md )
-    : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
+    : SemanticSegmentation( conf, &( md->getClassNames( "train" ) ) )
 {
-	this->conf = conf;
-	string section = "SSContextTree";
-	lfcw = new LFColorWeijer(conf);
-	
-	grid = conf->gI(section, "grid", 10 );
-	
-	maxSamples = conf->gI(section, "max_samples", 2000);
-	
-	minFeats = conf->gI(section, "min_feats", 50 );
-	
-	maxDepth = conf->gI(section, "max_depth", 10 );
-	
-	windowSize = conf->gI(section, "window_size", 16);
-	
-	featsPerSplit = conf->gI(section, "feats_per_split", 200);
-	
-	useShannonEntropy = conf->gB(section, "use_shannon_entropy", true);
-	
-	nbTrees = conf->gI(section, "amount_trees", 1);
-	
-	string segmentationtype = conf->gS(section, "segmentation_type", "meanshift");
-	
-	useGaussian = conf->gB(section, "use_gaussian", true);
-
-	if(useGaussian)
-		throw("there something wrong with using gaussian! first fix it!");
-	
-	pixelWiseLabeling = false;
-	
-	if(segmentationtype == "meanshift")
-		segmentation = new RSMeanShift(conf);
-	else if (segmentationtype == "none")
-	{
-		segmentation = NULL;
-		pixelWiseLabeling = true;
-	}
-	else if (segmentationtype == "felzenszwalb")
-		segmentation = new RSGraphBased(conf);
-	else
-		throw("no valid segmenation_type\n please choose between none, meanshift and felzenszwalb\n");
-	
-	
-	ftypes = conf->gI(section, "features", 2);;
-	
-	ops.push_back(new Minus());
-	ops.push_back(new MinusAbs());
-	ops.push_back(new Addition());
-	ops.push_back(new Only1());
-	
-	cops.push_back(new BiIntegralCenteredOps());
-	cops.push_back(new IntegralCenteredOps());
-	cops.push_back(new IntegralOps());
-	cops.push_back(new HaarHorizontal());
-	cops.push_back(new HaarVertical());
-	cops.push_back(new HaarDiag());
-	cops.push_back(new Haar3Horiz());
-	cops.push_back(new Haar3Vert());
-	
-	opOverview = vector<int>(NBOPERATIONS, 0);
-	
-	calcVal.push_back(new MCImageAccess());
-	calcVal.push_back(new ClassificationResultAcess());
-	
-	classnames = md->getClassNames ( "train" );
-	
-	///////////////////////////////////
-	// Train Segmentation Context Trees
-	///////////////////////////////////
-
-	train ( md );
+  this->conf = conf;
+  string section = "SSContextTree";
+  lfcw = new LFColorWeijer( conf );
+
+  grid = conf->gI( section, "grid", 10 );
+
+  maxSamples = conf->gI( section, "max_samples", 2000 );
+
+  minFeats = conf->gI( section, "min_feats", 50 );
+
+  maxDepth = conf->gI( section, "max_depth", 10 );
+
+  windowSize = conf->gI( section, "window_size", 16 );
+
+  featsPerSplit = conf->gI( section, "feats_per_split", 200 );
+
+  useShannonEntropy = conf->gB( section, "use_shannon_entropy", true );
+
+  nbTrees = conf->gI( section, "amount_trees", 1 );
+
+  string segmentationtype = conf->gS( section, "segmentation_type", "meanshift" );
+
+  useGaussian = conf->gB( section, "use_gaussian", true );
+
+  if ( useGaussian )
+    throw( "there something wrong with using gaussian! first fix it!" );
+
+  pixelWiseLabeling = false;
+
+  if ( segmentationtype == "meanshift" )
+    segmentation = new RSMeanShift( conf );
+  else if ( segmentationtype == "none" )
+  {
+    segmentation = NULL;
+    pixelWiseLabeling = true;
+  }
+  else if ( segmentationtype == "felzenszwalb" )
+    segmentation = new RSGraphBased( conf );
+  else
+    throw( "no valid segmenation_type\n please choose between none, meanshift and felzenszwalb\n" );
+
+  ftypes = conf->gI( section, "features", 2 );;
+
+  ops.push_back( new Minus() );
+  ops.push_back( new MinusAbs() );
+  ops.push_back( new Addition() );
+  ops.push_back( new Only1() );
+  ops.push_back( new RelativeXPosition() );
+  ops.push_back( new RelativeYPosition() );
+
+  cops.push_back( new BiIntegralCenteredOps() );
+  cops.push_back( new IntegralCenteredOps() );
+  cops.push_back( new IntegralOps() );
+  cops.push_back( new HaarHorizontal() );
+  cops.push_back( new HaarVertical() );
+  cops.push_back( new HaarDiag() );
+  cops.push_back( new Haar3Horiz() );
+  cops.push_back( new Haar3Vert() );
+  cops.push_back( new GlobalFeats() );
+
+  opOverview = vector<int>( NBOPERATIONS, 0 );
+
+  calcVal.push_back( new MCImageAccess() );
+  calcVal.push_back( new ClassificationResultAcess() );
+
+  classnames = md->getClassNames( "train" );
+
+  ///////////////////////////////////
+  // Train Segmentation Context Trees
+  ///////////////////////////////////
+
+  train( md );
 }
 
 SemSegContextTree::~SemSegContextTree()
 {
 }
 
-double SemSegContextTree::getBestSplit(std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<int> > &currentfeats, std::vector<NICE::MultiChannelImageT<double> > &integralImgs, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree)
+double SemSegContextTree::getBestSplit( std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<int> > &currentfeats, std::vector<NICE::MultiChannelImageT<double> > &integralImgs, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree )
 {
-	int imgCount = 0, featdim = 0;
-	try
-	{
-		imgCount = (int)feats.size();
-		featdim = feats[0].channels();
-	}
-	catch(Exception)
-	{
-		cerr << "no features computed?" << endl;
-	}
-	
-	double bestig = -numeric_limits< double >::max();
-	splitop = NULL;
-	splitval = -1.0;
-	
-	set<vector<int> >selFeats;
-	map<int,int> e;
-	int featcounter = 0;
-	
-	for(int iCounter = 0; iCounter < imgCount; iCounter++)
-	{
-		int xsize = (int)currentfeats[iCounter].width();
-		int ysize = (int)currentfeats[iCounter].height();
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++)
-			{
-				if(currentfeats[iCounter].get(x,y,tree) == node)
-				{
-					featcounter++;
-				}
-			}
-		}
-	}
-	
-	if(featcounter < minFeats)
-	{
-		cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
-		return 0.0;
-	}
-	
-	vector<double> fraction(a.size(),0.0);
-	for(uint i = 0; i < fraction.size(); i++)
-	{
-		if ( forbidden_classes.find ( labelmapback[i] ) != forbidden_classes.end() )
-			fraction[i] = 0;
-		else
-			fraction[i] = ((double)maxSamples)/((double)featcounter*a[i]*a.size());
-		//cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
-	}
-	//cout << "a.size(): " << a.size() << endl;
-	//getchar();
-	featcounter = 0;
-	
-	for(int iCounter = 0; iCounter < imgCount; iCounter++)
-	{
-		int xsize = (int)currentfeats[iCounter].width();
-		int ysize = (int)currentfeats[iCounter].height();
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++)
-			{
-				if(currentfeats[iCounter].get(x,y,tree) == node)
-				{
-					int cn = labels[iCounter](x,y);
-					double randD = (double)rand()/(double)RAND_MAX;
-					if(randD < fraction[labelmap[cn]])
-					{
-						vector<int> tmp(3,0);
-						tmp[0] = iCounter;
-						tmp[1] = x;
-						tmp[2] = y;
-						featcounter++;
-						selFeats.insert(tmp);
-						e[cn]++;
-					}
-				}
-			}
-		}
-	}
-	//cout << "size: " << selFeats.size() << endl;
-	//getchar();
-	
-	map<int,int>::iterator mapit;
-	double globent = 0.0;
-	for ( mapit=e.begin() ; mapit != e.end(); mapit++ )
-	{
-	  //cout << "class: " << mapit->first << ": " << mapit->second << endl;
-		double p = (double)(*mapit).second/(double)featcounter;
-		globent += p*log2(p);
-	}
-	globent = -globent;
-	
-	if(globent < 0.5)
-	{
-		cout << "globent to small: " << globent << endl;
-		return 0.0;
-	}
-	
-	int classes = (int)forest[tree][0].dist.size();
-	featsel.clear();
-	for(int i = 0; i < featsPerSplit; i++)
-	{
-		int x1, x2, y1, y2;
-		int ft = (int)((double)rand()/(double)RAND_MAX*(double)ftypes);
-		
-		int tmpws = windowSize;
-		
-		if(integralImgs[0].width() == 0)
-			ft = 0;
-		
-		if(ft > 0)
-		{
-			tmpws *= 4;
-		}
-		
-		if(useGaussian)
-		{
-			double sigma = (double)tmpws/2.0;
-			x1 = randGaussDouble(sigma)*(double)tmpws;
-			x2 = randGaussDouble(sigma)*(double)tmpws;
-			y1 = randGaussDouble(sigma)*(double)tmpws;
-			y2 = randGaussDouble(sigma)*(double)tmpws;
-		}
-		else
-		{
-			x1 = (int)((double)rand()/(double)RAND_MAX*(double)tmpws)-tmpws/2;
-			x2 = (int)((double)rand()/(double)RAND_MAX*(double)tmpws)-tmpws/2;
-			y1 = (int)((double)rand()/(double)RAND_MAX*(double)tmpws)-tmpws/2;
-			y2 = (int)((double)rand()/(double)RAND_MAX*(double)tmpws)-tmpws/2;
-		}
-				
-		if(ft == 0)
-		{
-			int f1 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
-			int f2 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
-			int o = (int)((double)rand()/(double)RAND_MAX*(double)ops.size());
-			Operation *op = ops[o]->clone();
-			op->set(x1,y1,x2,y2,f1,f2, calcVal[ft]);
-			featsel.push_back(op);
-		}
-		else if(ft == 1)
-		{
-
-			int opssize = (int)ops.size();
-			//opssize = 0;
-			int o = (int)((double)rand()/(double)RAND_MAX*(((double)cops.size())+(double)opssize));
-			
-			Operation *op;
-			if(o < opssize)
-			{
-				int chans = (int)forest[0][0].dist.size();
-				int f1 = (int)((double)rand()/(double)RAND_MAX*(double)chans);
-				int f2 = (int)((double)rand()/(double)RAND_MAX*(double)chans);
-				op = ops[o]->clone();
-				op->set(x1,y1,x2,y2,f1,f2, calcVal[ft]);
-			}
-			else
-			{
-				int chans = integralImgs[0].channels();
-				int f1 = (int)((double)rand()/(double)RAND_MAX*(double)chans);
-				int f2 = (int)((double)rand()/(double)RAND_MAX*(double)chans);
-				o -= opssize;
-				op = cops[o]->clone();
-				op->set(x1,y1,x2,y2,f1,f2, calcVal[ft]);
-			}
-			featsel.push_back(op);
-		}
-	}
+  int imgCount = 0, featdim = 0;
+
+  try
+  {
+    imgCount = ( int )feats.size();
+    featdim = feats[0].channels();
+  }
+  catch ( Exception )
+  {
+    cerr << "no features computed?" << endl;
+  }
+
+  double bestig = -numeric_limits< double >::max();
+
+  splitop = NULL;
+  splitval = -1.0;
+
+  set<vector<int> >selFeats;
+  map<int, int> e;
+  int featcounter = 0;
+
+  for ( int iCounter = 0; iCounter < imgCount; iCounter++ )
+  {
+    int xsize = ( int )currentfeats[iCounter].width();
+    int ysize = ( int )currentfeats[iCounter].height();
+
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++ )
+      {
+        if ( currentfeats[iCounter].get( x, y, tree ) == node )
+        {
+          featcounter++;
+        }
+      }
+    }
+  }
+
+  if ( featcounter < minFeats )
+  {
+    cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
+    return 0.0;
+  }
+
+  vector<double> fraction( a.size(), 0.0 );
+
+  for ( uint i = 0; i < fraction.size(); i++ )
+  {
+    if ( forbidden_classes.find( labelmapback[i] ) != forbidden_classes.end() )
+      fraction[i] = 0;
+    else
+      fraction[i] = (( double )maxSamples ) / (( double )featcounter * a[i] * a.size() );
+
+    //cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
+  }
+
+  //cout << "a.size(): " << a.size() << endl;
+  //getchar();
+  featcounter = 0;
+
+  for ( int iCounter = 0; iCounter < imgCount; iCounter++ )
+  {
+    int xsize = ( int )currentfeats[iCounter].width();
+    int ysize = ( int )currentfeats[iCounter].height();
+
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++ )
+      {
+        if ( currentfeats[iCounter].get( x, y, tree ) == node )
+        {
+          int cn = labels[iCounter]( x, y );
+          double randD = ( double )rand() / ( double )RAND_MAX;
+
+          if ( randD < fraction[labelmap[cn]] )
+          {
+            vector<int> tmp( 3, 0 );
+            tmp[0] = iCounter;
+            tmp[1] = x;
+            tmp[2] = y;
+            featcounter++;
+            selFeats.insert( tmp );
+            e[cn]++;
+          }
+        }
+      }
+    }
+  }
+
+  //cout << "size: " << selFeats.size() << endl;
+  //getchar();
+
+  map<int, int>::iterator mapit;
+
+  double globent = 0.0;
+
+  for ( mapit = e.begin() ; mapit != e.end(); mapit++ )
+  {
+    //cout << "class: " << mapit->first << ": " << mapit->second << endl;
+    double p = ( double )( *mapit ).second / ( double )featcounter;
+    globent += p * log2( p );
+  }
+
+  globent = -globent;
+
+  if ( globent < 0.5 )
+  {
+    cout << "globent to small: " << globent << endl;
+    return 0.0;
+  }
+
+  int classes = ( int )forest[tree][0].dist.size();
+
+  featsel.clear();
+
+  for ( int i = 0; i < featsPerSplit; i++ )
+  {
+    int x1, x2, y1, y2;
+    int ft = ( int )(( double )rand() / ( double )RAND_MAX * ( double )ftypes );
+
+    int tmpws = windowSize;
+
+    if ( integralImgs[0].width() == 0 )
+      ft = 0;
+
+    if ( ft > 0 )
+    {
+      tmpws *= 4;
+    }
+
+    if ( useGaussian )
+    {
+      double sigma = ( double )tmpws / 2.0;
+      x1 = randGaussDouble( sigma ) * ( double )tmpws;
+      x2 = randGaussDouble( sigma ) * ( double )tmpws;
+      y1 = randGaussDouble( sigma ) * ( double )tmpws;
+      y2 = randGaussDouble( sigma ) * ( double )tmpws;
+    }
+    else
+    {
+      x1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
+      x2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
+      y1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
+      y2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
+    }
+
+    if ( ft == 0 )
+    {
+      int f1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )featdim );
+      int f2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )featdim );
+      int o = ( int )(( double )rand() / ( double )RAND_MAX * ( double )ops.size() );
+      Operation *op = ops[o]->clone();
+      op->set( x1, y1, x2, y2, f1, f2, calcVal[ft] );
+      featsel.push_back( op );
+    }
+    else if ( ft == 1 )
+    {
+
+      int opssize = ( int )ops.size();
+      //opssize = 0;
+      int o = ( int )(( double )rand() / ( double )RAND_MAX * ((( double )cops.size() ) + ( double )opssize ) );
+
+      Operation *op;
+
+      if ( o < opssize )
+      {
+        int chans = ( int )forest[0][0].dist.size();
+        int f1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
+        int f2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
+        op = ops[o]->clone();
+        op->set( x1, y1, x2, y2, f1, f2, calcVal[ft] );
+      }
+      else
+      {
+        int chans = integralImgs[0].channels();
+        int f1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
+        int f2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
+        o -= opssize;
+        op = cops[o]->clone();
+        op->set( x1, y1, x2, y2, f1, f2, calcVal[ft] );
+      }
+
+      featsel.push_back( op );
+    }
+  }
+
 #pragma omp parallel for private(mapit)
-	for(int f = 0; f < featsPerSplit; f++)
-	{
-		double l_bestig = -numeric_limits< double >::max();
-		double l_splitval = -1.0;
-		set<vector<int> >::iterator it;
-		vector<double> vals;
-		
-		for ( it=selFeats.begin() ; it != selFeats.end(); it++ )
-		{
-			Features feat;
-			feat.feats = &feats[(*it)[0]];
-			feat.cfeats = &currentfeats[(*it)[0]];
-			feat.cTree = tree;
-			feat.tree = &forest[tree];
-			feat.integralImg = &integralImgs[(*it)[0]];
-			vals.push_back(featsel[f]->getVal(feat, (*it)[1], (*it)[2]));
-		}
-		
-		int counter = 0;
-		for ( it=selFeats.begin() ; it != selFeats.end(); it++ , counter++)
-		{
-			set<vector<int> >::iterator it2;
-			double val = vals[counter];
-			
-			map<int,int> eL, eR;
-			int counterL = 0, counterR = 0;
-			int counter2 = 0;
-			for ( it2=selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
-			{
-				int cn = labels[(*it2)[0]]((*it2)[1], (*it2)[2]);
-				//cout << "vals[counter2] " << vals[counter2] << " val: " <<  val << endl;
-				if(vals[counter2] < val)
-				{
-					//left entropie:
-					eL[cn] = eL[cn]+1;
-					counterL++;
-				}
-				else
-				{
-					//right entropie:
-					eR[cn] = eR[cn]+1;
-					counterR++;
-				}
-			}
-			
-			double leftent = 0.0;
-			for ( mapit=eL.begin() ; mapit != eL.end(); mapit++ )
-			{
-				double p = (double)(*mapit).second/(double)counterL;
-				leftent -= p*log2(p);
-			}
-			
-			double rightent = 0.0;
-			for ( mapit=eR.begin() ; mapit != eR.end(); mapit++ )
-			{
-				double p = (double)(*mapit).second/(double)counterR;
-				rightent -= p*log2(p);
-			}
-			//cout << "rightent: " << rightent << " leftent: " << leftent << endl;
-			
-			double pl = (double)counterL/(double)(counterL+counterR);
-			double ig = globent - (1.0-pl) * rightent - pl*leftent;
-			//double ig = globent - rightent - leftent;
-			
-			if(useShannonEntropy)
-			{
-				double esplit = - ( pl*log(pl) + (1-pl)*log(1-pl) );
-				ig = 2*ig / ( globent + esplit );
-			}
-			
-			if(ig > l_bestig)
-			{
-				l_bestig = ig;
-				l_splitval = val;
-			}
-		}
-		
+  for ( int f = 0; f < featsPerSplit; f++ )
+  {
+    double l_bestig = -numeric_limits< double >::max();
+    double l_splitval = -1.0;
+    set<vector<int> >::iterator it;
+    vector<double> vals;
+
+    for ( it = selFeats.begin() ; it != selFeats.end(); it++ )
+    {
+      Features feat;
+      feat.feats = &feats[( *it )[0]];
+      feat.cfeats = &currentfeats[( *it )[0]];
+      feat.cTree = tree;
+      feat.tree = &forest[tree];
+      feat.integralImg = &integralImgs[( *it )[0]];
+      vals.push_back( featsel[f]->getVal( feat, ( *it )[1], ( *it )[2] ) );
+    }
+
+    int counter = 0;
+
+    for ( it = selFeats.begin() ; it != selFeats.end(); it++ , counter++ )
+    {
+      set<vector<int> >::iterator it2;
+      double val = vals[counter];
+
+      map<int, int> eL, eR;
+      int counterL = 0, counterR = 0;
+      int counter2 = 0;
+
+      for ( it2 = selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
+      {
+        int cn = labels[( *it2 )[0]](( *it2 )[1], ( *it2 )[2] );
+        //cout << "vals[counter2] " << vals[counter2] << " val: " <<  val << endl;
+
+        if ( vals[counter2] < val )
+        {
+          //left entropie:
+          eL[cn] = eL[cn] + 1;
+          counterL++;
+        }
+        else
+        {
+          //right entropie:
+          eR[cn] = eR[cn] + 1;
+          counterR++;
+        }
+      }
+
+      double leftent = 0.0;
+
+      for ( mapit = eL.begin() ; mapit != eL.end(); mapit++ )
+      {
+        double p = ( double )( *mapit ).second / ( double )counterL;
+        leftent -= p * log2( p );
+      }
+
+      double rightent = 0.0;
+
+      for ( mapit = eR.begin() ; mapit != eR.end(); mapit++ )
+      {
+        double p = ( double )( *mapit ).second / ( double )counterR;
+        rightent -= p * log2( p );
+      }
+
+      //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
+
+      double pl = ( double )counterL / ( double )( counterL + counterR );
+
+      double ig = globent - ( 1.0 - pl ) * rightent - pl * leftent;
+
+      //double ig = globent - rightent - leftent;
+
+      if ( useShannonEntropy )
+      {
+        double esplit = - ( pl * log( pl ) + ( 1 - pl ) * log( 1 - pl ) );
+        ig = 2 * ig / ( globent + esplit );
+      }
+
+      if ( ig > l_bestig )
+      {
+        l_bestig = ig;
+        l_splitval = val;
+      }
+    }
+
 #pragma omp critical
-{
-		//cout << "globent: " << globent <<  " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
-		//cout << "globent: " << globent <<  " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
-		//cout << "p: " << featsubset[f] << endl;
-		if(l_bestig > bestig)
-		{
-			bestig = l_bestig;
-			splitop = featsel[f];
-			splitval = l_splitval;
-		}
-}
-	}
-		//splitop->writeInfos();
-		//cout<< "ig: " << bestig << endl;
-	
-	/*for(int i = 0; i < featsPerSplit; i++)
-	{
-		if(featsel[i] != splitop)
-			delete featsel[i];
-	}*/
+    {
+      //cout << "globent: " << globent <<  " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
+      //cout << "globent: " << globent <<  " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
+      //cout << "p: " << featsubset[f] << endl;
+
+      if ( l_bestig > bestig )
+      {
+        bestig = l_bestig;
+        splitop = featsel[f];
+        splitval = l_splitval;
+      }
+    }
+  }
+
+  //splitop->writeInfos();
+  //cout<< "ig: " << bestig << endl;
+  //FIXME: delete all features!
+  /*for(int i = 0; i < featsPerSplit; i++)
+  {
+   if(featsel[i] != splitop)
+    delete featsel[i];
+  }*/
 #ifdef debug
-	cout << "globent: " << globent <<  " bestig " << bestig << " splitval: " << splitval << endl;
+  cout << "globent: " << globent <<  " bestig " << bestig << " splitval: " << splitval << endl;
+
 #endif
-	return bestig;
+  return bestig;
 }
 
-inline double SemSegContextTree::getMeanProb(const int &x,const int &y,const int &channel, const MultiChannelImageT<int> &currentfeats)
+inline double SemSegContextTree::getMeanProb( const int &x, const int &y, const int &channel, const MultiChannelImageT<int> &currentfeats )
 {
-	double val = 0.0;
-	for(int tree = 0; tree < nbTrees; tree++)
-	{
-		val += forest[tree][currentfeats.get(x,y,tree)].dist[channel];
-	}
-	
-	return val / (double)nbTrees;
+  double val = 0.0;
+
+  for ( int tree = 0; tree < nbTrees; tree++ )
+  {
+    val += forest[tree][currentfeats.get( x,y,tree )].dist[channel];
+  }
+
+  return val / ( double )nbTrees;
 }
 
-void SemSegContextTree::computeIntegralImage(const NICE::MultiChannelImageT<int> &currentfeats, const NICE::MultiChannelImageT<double> &lfeats, NICE::MultiChannelImageT<double> &integralImage)
+void SemSegContextTree::computeIntegralImage( const NICE::MultiChannelImageT<int> &currentfeats, const NICE::MultiChannelImageT<double> &lfeats, NICE::MultiChannelImageT<double> &integralImage )
 {
-	int xsize = currentfeats.width();
-	int ysize = currentfeats.height();
-	
-	int channels = (int)forest[0][0].dist.size();
+  int xsize = currentfeats.width();
+  int ysize = currentfeats.height();
+
+  int channels = ( int )forest[0][0].dist.size();
 
 #pragma omp parallel for
-	for(int c = 0; c < channels; c++)
-	{
-		integralImage.set(0,0,getMeanProb(0,0,c, currentfeats), c);
-		
-		//first column
-		for(int y = 1; y < ysize; y++)
-		{
-			integralImage.set(0,y,getMeanProb(0,y,c, currentfeats)+integralImage.get(0,y,c), c);
-		}
-		
-		//first row
-		for(int x = 1; x < xsize; x++)
-		{
-			integralImage.set(x,0,getMeanProb(x,0,c, currentfeats)+integralImage.get(x,0,c), c);
-		}
-		
-		//rest
-		for(int y = 1; y < ysize; y++)
-		{
-			for(int x = 1; x < xsize; x++)
-			{
-				double val = getMeanProb(x,y,c,currentfeats)+integralImage.get(x,y-1,c)+integralImage.get(x-1,y,c)-integralImage.get(x-1,y-1,c);
-				integralImage.set(x, y, val, c);
-			}
-		}
-	}
-	
-	int channels2 = (int)lfeats.channels();
-	
-	xsize = lfeats.width();
-	ysize = lfeats.height();
-	if(integralImage.get(xsize-1,ysize-1,channels) == 0.0)
-	{
+
+  for ( int c = 0; c < channels; c++ )
+  {
+    integralImage.set( 0, 0, getMeanProb( 0, 0, c, currentfeats ), c );
+
+    //first column
+
+    for ( int y = 1; y < ysize; y++ )
+    {
+      integralImage.set( 0, y, getMeanProb( 0, y, c, currentfeats ) + integralImage.get( 0, y, c ), c );
+    }
+
+    //first row
+    for ( int x = 1; x < xsize; x++ )
+    {
+      integralImage.set( x, 0, getMeanProb( x, 0, c, currentfeats ) + integralImage.get( x, 0, c ), c );
+    }
+
+    //rest
+    for ( int y = 1; y < ysize; y++ )
+    {
+      for ( int x = 1; x < xsize; x++ )
+      {
+        double val = getMeanProb( x, y, c, currentfeats ) + integralImage.get( x, y - 1, c ) + integralImage.get( x - 1, y, c ) - integralImage.get( x - 1, y - 1, c );
+        integralImage.set( x, y, val, c );
+      }
+    }
+  }
+
+  int channels2 = ( int )lfeats.channels();
+
+  xsize = lfeats.width();
+  ysize = lfeats.height();
+
+  if ( integralImage.get( xsize - 1, ysize - 1, channels ) == 0.0 )
+  {
 #pragma omp parallel for
-		for(int c1 = 0; c1 < channels2; c1++)
-		{
-			int c = channels+c1; 
-			integralImage.set(0,0,lfeats.get(0,0,c1), c);
-			
-			//first column
-			for(int y = 1; y < ysize; y++)
-			{
-				integralImage.set(0,y,lfeats.get(0,y,c1)+integralImage.get(0,y,c), c);
-			}
-			
-			//first row
-			for(int x = 1; x < xsize; x++)
-			{
-				integralImage.set(x,0,lfeats.get(x,0,c1)+integralImage.get(x,0,c), c);
-			}
-			
-			//rest
-			for(int y = 1; y < ysize; y++)
-			{
-				for(int x = 1; x < xsize; x++)
-				{
-					double val = lfeats.get(x,y,c1)+integralImage.get(x,y-1,c)+integralImage.get(x-1,y,c)-integralImage.get(x-1,y-1,c);
-					integralImage.set(x, y, val, c);
-				}
-			}
-		}
-	}
+
+    for ( int c1 = 0; c1 < channels2; c1++ )
+    {
+      int c = channels + c1;
+      integralImage.set( 0, 0, lfeats.get( 0, 0, c1 ), c );
+
+      //first column
+
+      for ( int y = 1; y < ysize; y++ )
+      {
+        integralImage.set( 0, y, lfeats.get( 0, y, c1 ) + integralImage.get( 0, y, c ), c );
+      }
+
+      //first row
+      for ( int x = 1; x < xsize; x++ )
+      {
+        integralImage.set( x, 0, lfeats.get( x, 0, c1 ) + integralImage.get( x, 0, c ), c );
+      }
+
+      //rest
+      for ( int y = 1; y < ysize; y++ )
+      {
+        for ( int x = 1; x < xsize; x++ )
+        {
+          double val = lfeats.get( x, y, c1 ) + integralImage.get( x, y - 1, c ) + integralImage.get( x - 1, y, c ) - integralImage.get( x - 1, y - 1, c );
+          integralImage.set( x, y, val, c );
+        }
+      }
+    }
+  }
 }
 
-void SemSegContextTree::train ( const MultiDataset *md )
+void SemSegContextTree::train( const MultiDataset *md )
 {
-	const LabeledSet train = * ( *md ) ["train"];
-	const LabeledSet *trainp = &train;
-	
-	ProgressBar pb ( "compute feats" );
-	pb.show();
-	
-	//TODO: Speichefresser!, lohnt sich sparse?
-	vector<MultiChannelImageT<double> > allfeats;
-	vector<MultiChannelImageT<int> > currentfeats;
-	vector<MatrixT<int> > labels;
-
-	std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
-	if ( forbidden_classes_s == "" )
-	{
-		forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
-	}
-	
-	classnames.getSelection ( forbidden_classes_s, forbidden_classes );
-	
-	int imgcounter = 0;
-	
-	/*MultiChannelImageT<int> ttmp2(0,0,0);
-	MultiChannelImageT<double> ttmp1(100,100,1);
-	MultiChannelImageT<double> tint(100,100,1);
-	ttmp1.setAll(1.0);
-	tint.setAll(0.0);
-	computeIntegralImage(ttmp2,ttmp1,tint);
-
-	
-	for(int i = 0; i < cops.size(); i++)
-	{
-		Features feats;
-		feats.feats = &tint;
-		feats.cfeats = &ttmp2;
-		feats.cTree = 0;
-		feats.tree = new vector<TreeNode>;
-		feats.integralImg = &tint;
-		cops[i]->set(-10, -6, 8, 9, 0, 0, new MCImageAccess());
-		cout << "for: " << cops[i]->writeInfos() << endl;
-		int y = 50;
-		for(int x = 40; x < 44; x++)
-		{
-			cout << "x: " << x << " val: " << cops[i]->getVal(feats, x, y) << endl;
-		}
-	}
-	
-	getchar();*/
-	
-	LOOP_ALL_S ( *trainp )
-	{
-		EACH_INFO ( classno,info );
-
-		NICE::ColorImage img;
-
-		std::string currentFile = info.img();
-
-		CachedExample *ce = new CachedExample ( currentFile );
-
-		const LocalizationResult *locResult = info.localization();
-		if ( locResult->size() <= 0 )
-		{
-			fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
-			          currentFile.c_str() );
-			continue;
-		}
-
-		fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
-
-		int xsize, ysize;
-		ce->getImageSize ( xsize, ysize );
-		
-		MatrixT<int> tmpMat(xsize,ysize);
-		
-		currentfeats.push_back(MultiChannelImageT<int>(xsize,ysize,nbTrees));
-		currentfeats[imgcounter].setAll(0);
-
-		labels.push_back(tmpMat);
-
-		try {
-			img = ColorImage(currentFile);
-		} catch (Exception) {
-			cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
-			continue;
-		}
-
-		Globals::setCurrentImgFN ( currentFile );
-
-		//TODO: resize image?!
-		MultiChannelImageT<double> feats;
-		allfeats.push_back(feats);
+  const LabeledSet train = * ( *md )["train"];
+  const LabeledSet *trainp = &train;
+
+  ProgressBar pb( "compute feats" );
+  pb.show();
+
+  //TODO: Speichefresser!, lohnt sich sparse?
+  vector<MultiChannelImageT<double> > allfeats;
+  vector<MultiChannelImageT<int> > currentfeats;
+  vector<MatrixT<int> > labels;
+
+  std::string forbidden_classes_s = conf->gS( "analysis", "donttrain", "" );
+
+  if ( forbidden_classes_s == "" )
+  {
+    forbidden_classes_s = conf->gS( "analysis", "forbidden_classes", "" );
+  }
+
+  classnames.getSelection( forbidden_classes_s, forbidden_classes );
+
+  int imgcounter = 0;
+
+  /*MultiChannelImageT<int> ttmp2(0,0,0);
+  MultiChannelImageT<double> ttmp1(100,100,1);
+  MultiChannelImageT<double> tint(100,100,1);
+  ttmp1.setAll(1.0);
+  tint.setAll(0.0);
+  computeIntegralImage(ttmp2,ttmp1,tint);
+
+
+  for(int i = 0; i < cops.size(); i++)
+  {
+   Features feats;
+   feats.feats = &tint;
+   feats.cfeats = &ttmp2;
+   feats.cTree = 0;
+   feats.tree = new vector<TreeNode>;
+   feats.integralImg = &tint;
+   cops[i]->set(-10, -6, 8, 9, 0, 0, new MCImageAccess());
+   cout << "for: " << cops[i]->writeInfos() << endl;
+   int y = 50;
+   for(int x = 40; x < 44; x++)
+   {
+    cout << "x: " << x << " val: " << cops[i]->getVal(feats, x, y) << endl;
+   }
+  }
+
+  getchar();*/
+
+  LOOP_ALL_S( *trainp )
+  {
+    EACH_INFO( classno, info );
+
+    NICE::ColorImage img;
+
+    std::string currentFile = info.img();
+
+    CachedExample *ce = new CachedExample( currentFile );
+
+    const LocalizationResult *locResult = info.localization();
+
+    if ( locResult->size() <= 0 )
+    {
+      fprintf( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+               currentFile.c_str() );
+      continue;
+    }
+
+    fprintf( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
+
+    int xsize, ysize;
+    ce->getImageSize( xsize, ysize );
+
+    MatrixT<int> tmpMat( xsize, ysize );
+
+    currentfeats.push_back( MultiChannelImageT<int>( xsize, ysize, nbTrees ) );
+    currentfeats[imgcounter].setAll( 0 );
+
+    labels.push_back( tmpMat );
+
+    try {
+      img = ColorImage( currentFile );
+    } catch ( Exception ) {
+      cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
+      continue;
+    }
+
+    Globals::setCurrentImgFN( currentFile );
+
+    //TODO: resize image?!
+    MultiChannelImageT<double> feats;
+    allfeats.push_back( feats );
 #ifdef LOCALFEATS
-		lfcw->getFeats(img, allfeats[imgcounter]);
+    lfcw->getFeats( img, allfeats[imgcounter] );
 #else
-		allfeats[imgcounter].reInit(xsize, ysize, 3, true);
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++)
-			{
-				for(int r = 0; r < 3; r++)
-				{
-					allfeats[imgcounter].set(x,y,img.getPixel(x,y,r),r);
-				}
-			}
-		}
+    allfeats[imgcounter].reInit( xsize, ysize, 3, true );
+
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++ )
+      {
+        for ( int r = 0; r < 3; r++ )
+        {
+          allfeats[imgcounter].set( x, y, img.getPixel( x, y, r ), r );
+        }
+      }
+    }
+
 #endif
-		
-		// getting groundtruth
-		NICE::Image pixelLabels (xsize, ysize);
-		pixelLabels.set(0);
-		locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++)
-			{
-				classno = pixelLabels.getPixel(x, y);
-				labels[imgcounter](x,y) = classno;
-				if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
-					continue;
-				labelcounter[classno]++;
-				
-			}
-		}
-		
-		imgcounter++;
-		pb.update ( trainp->count());
-		delete ce;
-	}
-	pb.hide();
-	
-	map<int,int>::iterator mapit;
-	int classes = 0;
-	for(mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++)
-	{
-		labelmap[mapit->first] = classes;
-		
-		labelmapback[classes] = mapit->first;
-		classes++;
-	}
-	
-	//balancing
-	int featcounter = 0;
-	a = vector<double>(classes,0.0);
-	for(int iCounter = 0; iCounter < imgcounter; iCounter++)
-	{
-		int xsize = (int)currentfeats[iCounter].width();
-		int ysize = (int)currentfeats[iCounter].height();
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++)
-			{
-				featcounter++;
-				int cn = labels[iCounter](x,y);
-				a[labelmap[cn]] ++;
-			}
-		}
-	}
-	
-	for(int i = 0; i < (int)a.size(); i++)
-	{
-		a[i] /= (double)featcounter;
-	}
-	
+
+    // getting groundtruth
+    NICE::Image pixelLabels( xsize, ysize );
+
+    pixelLabels.set( 0 );
+
+    locResult->calcLabeledImage( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++ )
+      {
+        classno = pixelLabels.getPixel( x, y );
+        labels[imgcounter]( x, y ) = classno;
+
+        if ( forbidden_classes.find( classno ) != forbidden_classes.end() )
+          continue;
+
+        labelcounter[classno]++;
+
+      }
+    }
+
+    imgcounter++;
+
+    pb.update( trainp->count() );
+    delete ce;
+  }
+
+  pb.hide();
+
+  map<int, int>::iterator mapit;
+  int classes = 0;
+
+  for ( mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++ )
+  {
+    labelmap[mapit->first] = classes;
+
+    labelmapback[classes] = mapit->first;
+    classes++;
+  }
+
+  //balancing
+  int featcounter = 0;
+
+  a = vector<double>( classes, 0.0 );
+
+  for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
+  {
+    int xsize = ( int )currentfeats[iCounter].width();
+    int ysize = ( int )currentfeats[iCounter].height();
+
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++ )
+      {
+        featcounter++;
+        int cn = labels[iCounter]( x, y );
+        a[labelmap[cn]] ++;
+      }
+    }
+  }
+
+  for ( int i = 0; i < ( int )a.size(); i++ )
+  {
+    a[i] /= ( double )featcounter;
+  }
+
 #ifdef DEBUG
-	for(int i = 0; i < (int)a.size(); i++)
-	{
-		cout << "a["<<i<<"]: " << a[i] << endl;
-	}
-	cout << "a.size: " << a.size() << endl;
+  for ( int i = 0; i < ( int )a.size(); i++ )
+  {
+    cout << "a[" << i << "]: " << a[i] << endl;
+  }
+
+  cout << "a.size: " << a.size() << endl;
+
 #endif
-	
-	int depth = 0;
-	for(int t = 0; t < nbTrees; t++)
-	{
-		vector<TreeNode> tree;
-		tree.push_back(TreeNode());
-		tree[0].dist = vector<double>(classes,0.0);
-		tree[0].depth = depth;
-		forest.push_back(tree);
-	}
-	
-	vector<int> startnode(nbTrees,0);
-	bool allleaf = false;
-	//int baseFeatSize = allfeats[0].size();
-	
-	vector<MultiChannelImageT<double> > integralImgs(imgcounter,MultiChannelImageT<double>());
-	
-	while(!allleaf && depth < maxDepth)
-	{
-		allleaf = true;
-		vector<MultiChannelImageT<int> > lastfeats = currentfeats;
-		
+
+  int depth = 0;
+
+  for ( int t = 0; t < nbTrees; t++ )
+  {
+    vector<TreeNode> tree;
+    tree.push_back( TreeNode() );
+    tree[0].dist = vector<double>( classes, 0.0 );
+    tree[0].depth = depth;
+    forest.push_back( tree );
+  }
+
+  vector<int> startnode( nbTrees, 0 );
+
+  bool allleaf = false;
+  //int baseFeatSize = allfeats[0].size();
+
+  vector<MultiChannelImageT<double> > integralImgs( imgcounter, MultiChannelImageT<double>() );
+
+  while ( !allleaf && depth < maxDepth )
+  {
+    allleaf = true;
+    vector<MultiChannelImageT<int> > lastfeats = currentfeats;
+
 #if 1
-		Timer timer;
-		timer.start();
+    Timer timer;
+    timer.start();
 #endif
-		
-		for(int tree = 0; tree < nbTrees; tree++)
-		{
-			int t = (int) forest[tree].size();
-			int s = startnode[tree];
-			startnode[tree] = t;
-	//TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht		
-	//#pragma omp parallel for
-			for(int i = s; i < t; i++)
-			{
-				if(!forest[tree][i].isleaf && forest[tree][i].left < 0)
-				{  
-					Operation *splitfeat = NULL;
-					double splitval;
-					double bestig = getBestSplit(allfeats, lastfeats, integralImgs, labels, i, splitfeat, splitval, tree);
-					
-					forest[tree][i].feat = splitfeat;
-					forest[tree][i].decision = splitval;
-					
-					if(splitfeat != NULL)
-					{
-						allleaf = false;
-						int left = forest[tree].size();
-						forest[tree].push_back(TreeNode());
-						forest[tree].push_back(TreeNode());
-						int right = left+1;
-						forest[tree][i].left = left;
-						forest[tree][i].right = right;
-						forest[tree][left].dist = vector<double>(classes, 0.0);
-						forest[tree][right].dist = vector<double>(classes, 0.0);
-						forest[tree][left].depth = depth+1;
-						forest[tree][right].depth = depth+1;
-						
-	#pragma omp parallel for
-						for(int iCounter = 0; iCounter < imgcounter; iCounter++)
-						{
-							int xsize = currentfeats[iCounter].width();
-							int ysize = currentfeats[iCounter].height();
-							for(int x = 0; x < xsize; x++)
-							{
-								for(int y = 0; y < ysize; y++)
-								{
-									if(currentfeats[iCounter].get(x, y, tree) == i)
-									{
-										Features feat;
-										feat.feats = &allfeats[iCounter];
-										feat.cfeats = &lastfeats[iCounter];
-										feat.cTree = tree;
-										feat.tree = &forest[tree];
-										feat.integralImg = &integralImgs[iCounter];
-										double val = splitfeat->getVal(feat,x,y);
-										if(val < splitval)
-										{ 
-											currentfeats[iCounter].set(x,y,left,tree);
-											forest[tree][left].dist[labelmap[labels[iCounter](x,y)]]++;
-										}
-										else
-										{  
-											currentfeats[iCounter].set(x,y,right,tree);
-											forest[tree][right].dist[labelmap[labels[iCounter](x,y)]]++;
-										}
-									}
-								}
-							}
-						}
-						
-						double lcounter = 0.0, rcounter = 0.0;
-						for(uint d = 0; d < forest[tree][left].dist.size(); d++)
-						{
-							if ( forbidden_classes.find ( labelmapback[d] ) != forbidden_classes.end() )
-							{
-								forest[tree][left].dist[d] = 0;
-								forest[tree][right].dist[d] = 0;
-							}
-							else
-							{
-								forest[tree][left].dist[d]/=a[d];
-								lcounter +=forest[tree][left].dist[d];
-								forest[tree][right].dist[d]/=a[d];
-								rcounter +=forest[tree][right].dist[d];
-							}
-						}
-						
-						if(lcounter <= 0 || rcounter <= 0)
-						{
-							cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
-							cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
-							cout << "bestig: " << bestig << endl;
-							for(int iCounter = 0; iCounter < imgcounter; iCounter++)
-							{
-								int xsize = currentfeats[iCounter].width();
-								int ysize = currentfeats[iCounter].height();
-								int counter = 0;
-								for(int x = 0; x < xsize; x++)
-								{
-									for(int y = 0; y < ysize; y++)
-									{
-										if(lastfeats[iCounter].get(x,y,tree) == i)
-										{
-											if(++counter > 30)
-												break;
-											Features feat;
-											feat.feats = &allfeats[iCounter];
-											feat.cfeats = &lastfeats[iCounter];
-											feat.cTree = tree;
-											feat.tree = &forest[tree];
-											feat.integralImg = &integralImgs[iCounter];
-											double val = splitfeat->getVal(feat,x,y);
-											cout << "splitval: " << splitval << " val: " << val << endl;
-										}
-									}
-								}
-							}
-							assert(lcounter > 0 && rcounter > 0);
-						}
-						
-						for(uint d = 0; d < forest[tree][left].dist.size(); d++)
-						{
-							forest[tree][left].dist[d]/=lcounter;
-							forest[tree][right].dist[d]/=rcounter;
-						}
-					}
-					else
-					{
-						forest[tree][i].isleaf = true;
-					}
-				}
-			}
-		}
-		//TODO: features neu berechnen!
-			
-		//compute integral image
-		int channels = classes+allfeats[0].channels();
-			
-		if(integralImgs[0].width() == 0)
-		{
-			for(int i = 0; i < imgcounter; i++)
-			{
-				int xsize = allfeats[i].width();
-				int ysize = allfeats[i].height();
-				integralImgs[i].reInit(xsize, ysize, channels);
-				integralImgs[i].setAll(0.0);
-			}
-		}
-			
-		for(int i = 0; i < imgcounter; i++)
-		{
-			computeIntegralImage(currentfeats[i],allfeats[i], integralImgs[i]);
-		}
+
+    for ( int tree = 0; tree < nbTrees; tree++ )
+    {
+      int t = ( int ) forest[tree].size();
+      int s = startnode[tree];
+      startnode[tree] = t;
+      //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
+      //#pragma omp parallel for
+
+      for ( int i = s; i < t; i++ )
+      {
+        if ( !forest[tree][i].isleaf && forest[tree][i].left < 0 )
+        {
+          Operation *splitfeat = NULL;
+          double splitval;
+          double bestig = getBestSplit( allfeats, lastfeats, integralImgs, labels, i, splitfeat, splitval, tree );
+
+          forest[tree][i].feat = splitfeat;
+          forest[tree][i].decision = splitval;
+
+          if ( splitfeat != NULL )
+          {
+            allleaf = false;
+            int left = forest[tree].size();
+            forest[tree].push_back( TreeNode() );
+            forest[tree].push_back( TreeNode() );
+            int right = left + 1;
+            forest[tree][i].left = left;
+            forest[tree][i].right = right;
+            forest[tree][left].dist = vector<double>( classes, 0.0 );
+            forest[tree][right].dist = vector<double>( classes, 0.0 );
+            forest[tree][left].depth = depth + 1;
+            forest[tree][right].depth = depth + 1;
+
+#pragma omp parallel for
+
+            for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
+            {
+              int xsize = currentfeats[iCounter].width();
+              int ysize = currentfeats[iCounter].height();
+
+              for ( int x = 0; x < xsize; x++ )
+              {
+                for ( int y = 0; y < ysize; y++ )
+                {
+                  if ( currentfeats[iCounter].get( x, y, tree ) == i )
+                  {
+                    Features feat;
+                    feat.feats = &allfeats[iCounter];
+                    feat.cfeats = &lastfeats[iCounter];
+                    feat.cTree = tree;
+                    feat.tree = &forest[tree];
+                    feat.integralImg = &integralImgs[iCounter];
+                    double val = splitfeat->getVal( feat, x, y );
+
+                    if ( val < splitval )
+                    {
+                      currentfeats[iCounter].set( x, y, left, tree );
+                      forest[tree][left].dist[labelmap[labels[iCounter]( x, y )]]++;
+                    }
+                    else
+                    {
+                      currentfeats[iCounter].set( x, y, right, tree );
+                      forest[tree][right].dist[labelmap[labels[iCounter]( x, y )]]++;
+                    }
+                  }
+                }
+              }
+            }
+
+            double lcounter = 0.0, rcounter = 0.0;
+
+            for ( uint d = 0; d < forest[tree][left].dist.size(); d++ )
+            {
+              if ( forbidden_classes.find( labelmapback[d] ) != forbidden_classes.end() )
+              {
+                forest[tree][left].dist[d] = 0;
+                forest[tree][right].dist[d] = 0;
+              }
+              else
+              {
+                forest[tree][left].dist[d] /= a[d];
+                lcounter += forest[tree][left].dist[d];
+                forest[tree][right].dist[d] /= a[d];
+                rcounter += forest[tree][right].dist[d];
+              }
+            }
+
+            if ( lcounter <= 0 || rcounter <= 0 )
+            {
+              cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
+              cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
+              cout << "bestig: " << bestig << endl;
+
+              for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
+              {
+                int xsize = currentfeats[iCounter].width();
+                int ysize = currentfeats[iCounter].height();
+                int counter = 0;
+
+                for ( int x = 0; x < xsize; x++ )
+                {
+                  for ( int y = 0; y < ysize; y++ )
+                  {
+                    if ( lastfeats[iCounter].get( x, y, tree ) == i )
+                    {
+                      if ( ++counter > 30 )
+                        break;
+
+                      Features feat;
+
+                      feat.feats = &allfeats[iCounter];
+
+                      feat.cfeats = &lastfeats[iCounter];
+
+                      feat.cTree = tree;
+
+                      feat.tree = &forest[tree];
+
+                      feat.integralImg = &integralImgs[iCounter];
+
+                      double val = splitfeat->getVal( feat, x, y );
+
+                      cout << "splitval: " << splitval << " val: " << val << endl;
+                    }
+                  }
+                }
+              }
+
+              assert( lcounter > 0 && rcounter > 0 );
+            }
+
+            for ( uint d = 0; d < forest[tree][left].dist.size(); d++ )
+            {
+              forest[tree][left].dist[d] /= lcounter;
+              forest[tree][right].dist[d] /= rcounter;
+            }
+          }
+          else
+          {
+            forest[tree][i].isleaf = true;
+          }
+        }
+      }
+    }
+
+    //TODO: features neu berechnen!
+
+    //compute integral image
+    int channels = classes + allfeats[0].channels();
+
+    if ( integralImgs[0].width() == 0 )
+    {
+      for ( int i = 0; i < imgcounter; i++ )
+      {
+        int xsize = allfeats[i].width();
+        int ysize = allfeats[i].height();
+        integralImgs[i].reInit( xsize, ysize, channels );
+        integralImgs[i].setAll( 0.0 );
+      }
+    }
+
+    for ( int i = 0; i < imgcounter; i++ )
+    {
+      computeIntegralImage( currentfeats[i], allfeats[i], integralImgs[i] );
+    }
+
 #if 1
-		timer.stop();
-		cout << "time for depth " << depth << ": " << timer.getLast() << endl;
+    timer.stop();
+
+    cout << "time for depth " << depth << ": " << timer.getLast() << endl;
+
 #endif
-		depth++;
-	#ifdef DEBUG
-			cout << "depth: " << depth << endl;
-	#endif
-	}
-	
+    depth++;
+
 #ifdef DEBUG
-	for(int tree = 0; tree < nbTrees; tree++)
-	{
-		int t = (int) forest[tree].size();
-		for(int i = 0; i < t; i++)
-		{
-			printf("tree[%i]: left: %i, right: %i", i, forest[tree][i].left, forest[tree][i].right);
-			if(!forest[tree][i].isleaf && forest[tree][i].left != -1)
-			{
-				cout <<  ", feat: " << forest[tree][i].feat->writeInfos() << " ";
-				opOverview[forest[tree][i].feat->getOps()]++;
-			}
-			for(int d = 0; d < (int)forest[tree][i].dist.size(); d++)
-			{
-				cout << " " << forest[tree][i].dist[d];
-			}
-			cout << endl;
-		}
-	}
-	
-	for(uint c = 0; c < ops.size(); c++)
-	{
-		cout << ops[c]->writeInfos() << ": " << opOverview[ops[c]->getOps()] << endl;
-	}
-	
-	for(uint c = 0; c < cops.size(); c++)
-	{
-		cout << cops[c]->writeInfos() << ": " << opOverview[cops[c]->getOps()] << endl;
-	}
+    cout << "depth: " << depth << endl;
+
+#endif
+  }
+
+#ifdef DEBUG
+  for ( int tree = 0; tree < nbTrees; tree++ )
+  {
+    int t = ( int ) forest[tree].size();
+
+    for ( int i = 0; i < t; i++ )
+    {
+      printf( "tree[%i]: left: %i, right: %i", i, forest[tree][i].left, forest[tree][i].right );
+
+      if ( !forest[tree][i].isleaf && forest[tree][i].left != -1 )
+      {
+        cout <<  ", feat: " << forest[tree][i].feat->writeInfos() << " ";
+        opOverview[forest[tree][i].feat->getOps()]++;
+      }
+
+      for ( int d = 0; d < ( int )forest[tree][i].dist.size(); d++ )
+      {
+        cout << " " << forest[tree][i].dist[d];
+      }
+
+      cout << endl;
+    }
+  }
+
+  for ( uint c = 0; c < ops.size(); c++ )
+  {
+    cout << ops[c]->writeInfos() << ": " << opOverview[ops[c]->getOps()] << endl;
+  }
+
+  for ( uint c = 0; c < cops.size(); c++ )
+  {
+    cout << cops[c]->writeInfos() << ": " << opOverview[cops[c]->getOps()] << endl;
+  }
+
 #endif
 }
 
-void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult,NICE::MultiChannelImageT<double> & probabilities )
+void SemSegContextTree::semanticseg( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
 {
-	int xsize;
-	int ysize;
-	ce->getImageSize ( xsize, ysize );
-	
-	int numClasses = classNames->numClasses();
-	
-	fprintf (stderr, "ContextTree classification !\n");
-
-	probabilities.reInit ( xsize, ysize, numClasses, true );
-	probabilities.setAll ( 0 );
-
-	NICE::ColorImage img;
-
-	std::string currentFile = Globals::getCurrentImgFN();
-	
-	try {
-		img = ColorImage(currentFile);
-	} catch (Exception) {
-		cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
-		return;
-	}
-	
-	//TODO: resize image?!
-		
-		MultiChannelImageT<double> feats;
+  int xsize;
+  int ysize;
+  ce->getImageSize( xsize, ysize );
+
+  int numClasses = classNames->numClasses();
+
+  fprintf( stderr, "ContextTree classification !\n" );
+
+  probabilities.reInit( xsize, ysize, numClasses, true );
+  probabilities.setAll( 0 );
+
+  NICE::ColorImage img;
+
+  std::string currentFile = Globals::getCurrentImgFN();
+
+  try {
+    img = ColorImage( currentFile );
+  } catch ( Exception ) {
+    cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
+    return;
+  }
+
+  //TODO: resize image?!
+
+  MultiChannelImageT<double> feats;
+
 #ifdef LOCALFEATS
-		lfcw->getFeats(img, feats);
+  lfcw->getFeats( img, feats );
+
 #else
-		feats.reInit (xsize, ysize, 3, true);
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++)
-			{
-				for(int r = 0; r < 3; r++)
-				{
-					feats.set(x,y,img.getPixel(x,y,r),r);
-				}
-			}
-		}
+  feats.reInit( xsize, ysize, 3, true );
+
+  for ( int x = 0; x < xsize; x++ )
+  {
+    for ( int y = 0; y < ysize; y++ )
+    {
+      for ( int r = 0; r < 3; r++ )
+      {
+        feats.set( x, y, img.getPixel( x, y, r ), r );
+      }
+    }
+  }
+
 #endif
-	
-	bool allleaf = false;
-	
-	MultiChannelImageT<double> integralImg;
-	
-	MultiChannelImageT<int> currentfeats(xsize, ysize, nbTrees);
-	currentfeats.setAll(0);
-	int depth = 0;
-	while(!allleaf)
-	{
-		allleaf = true;
-		//TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
-		//#pragma omp parallel for
-		MultiChannelImageT<int> lastfeats = currentfeats;
-		for(int tree = 0; tree < nbTrees; tree++)
-		{
-			for(int x = 0; x < xsize; x++)
-			{
-				for(int y = 0; y < ysize; y++)
-				{
-					int t = currentfeats.get(x,y,tree);
-					if(forest[tree][t].left > 0)
-					{
-						allleaf = false;
-						Features feat;
-						feat.feats = &feats;
-						feat.cfeats = &lastfeats;
-						feat.cTree = tree;
-						feat.tree = &forest[tree];
-						feat.integralImg = &integralImg;
-						
-						double val = forest[tree][t].feat->getVal(feat,x,y);
-						
-						if(val < forest[tree][t].decision)
-						{
-							currentfeats.set(x, y, forest[tree][t].left, tree);
-						}
-						else
-						{
-							currentfeats.set(x, y, forest[tree][t].right, tree);
-						}
-					}
-				}
-			}
-			
-			//compute integral image
-			int channels = (int)labelmap.size()+feats.channels();
-			
-			if(integralImg.width() == 0)
-			{
-				int xsize = feats.width();
-				int ysize = feats.height();
-				
-				integralImg.reInit(xsize, ysize, channels);
-			}
-		}
-		computeIntegralImage(currentfeats,feats, integralImg);
-				
-		depth++;
-	}
-	
-	if(pixelWiseLabeling)
-	{
-		//finales labeln:
-		long int offset = 0;
-		for(int x = 0; x < xsize; x++)
-		{
-			for(int y = 0; y < ysize; y++,offset++)
-			{
-				double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
-				int maxindex = 0;
-				uint s = forest[0][0].dist.size();
-				for(uint i = 0; i < s; i++)
-				{
-					probabilities.data[labelmapback[i]][offset] = getMeanProb(x,y,i,currentfeats);
-					if(probabilities.data[labelmapback[i]][offset] > maxvalue)
-					{
-						maxvalue = probabilities.data[labelmapback[i]][offset];
-						maxindex = labelmapback[i];
-					}
-					segresult.setPixel(x,y,maxindex);
-				}
-				if(maxvalue > 1)
-					cout << "maxvalue: " << maxvalue << endl;
-			}
-		}
-	}
-	else
-	{
-		//final labeling using segmentation
-		//TODO: segmentation
-		Matrix regions;
-		int regionNumber = segmentation->segRegions(img,regions);
-		cout << "regions: " << regionNumber << endl;
-		int dSize = forest[0][0].dist.size();
-		vector<vector<double> > regionProbs(regionNumber, vector<double>(dSize,0.0));
-		vector<int> bestlabels(regionNumber, 0);
-		
-		/*
-		for(int r = 0; r < regionNumber; r++)
-		{
-			Image over(img.width(), img.height());
-			for(int y = 0; y < img.height(); y++)
-			{
-				for(int x = 0; x < img.width(); x++)
-				{
-					if(((int)regions(x,y)) == r)
-						over.setPixel(x,y,1);
-					else
-						over.setPixel(x,y,0);
-				}
-			}
-			cout << "r: " << r << endl;
-			showImageOverlay(img, over);
-		}
-		*/
-		
-		for(int y = 0; y < img.height(); y++)
-		{
-			for(int x = 0; x < img.width(); x++)
-			{
-				int cregion = regions(x,y);
-				for(int d = 0; d < dSize; d++)
-				{
-					regionProbs[cregion][d]+=getMeanProb(x,y,d,currentfeats);
-				}
-			}
-		}
-		
-		int roi = 38;
-		
-		for(int r = 0; r < regionNumber; r++)
-		{
-			double maxval = regionProbs[r][0];
-			bestlabels[r] = 0;
-			if(roi == r)
-			{
-				cout << "r: " << r << endl;
-				cout << "0: " << regionProbs[r][0] << endl;
-			}
-			for(int d = 1; d < dSize; d++)
-			{
-				if(maxval < regionProbs[r][d])
-				{
-					maxval = regionProbs[r][d];
-					bestlabels[r] = d;
-				}
-				if(roi == r)
-				{
-					cout << d << ": " << regionProbs[r][d] << endl;
-				}
-			}
-			if(roi == r)
-			{
-				cout << "bestlabel: " << bestlabels[r] << " danach: " << labelmapback[bestlabels[r]] << endl;
-			}
-			bestlabels[r] = labelmapback[bestlabels[r]];
-		}
-		
-		for(int y = 0; y < img.height(); y++)
-		{
-			for(int x = 0; x < img.width(); x++)
-			{
-				
-				segresult.setPixel(x,y,bestlabels[regions(x,y)]);
-			}
-		}
-	}
+
+  bool allleaf = false;
+
+  MultiChannelImageT<double> integralImg;
+
+  MultiChannelImageT<int> currentfeats( xsize, ysize, nbTrees );
+
+  currentfeats.setAll( 0 );
+
+  int depth = 0;
+
+  while ( !allleaf )
+  {
+    allleaf = true;
+    //TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
+    //#pragma omp parallel for
+    MultiChannelImageT<int> lastfeats = currentfeats;
+
+    for ( int tree = 0; tree < nbTrees; tree++ )
+    {
+      for ( int x = 0; x < xsize; x++ )
+      {
+        for ( int y = 0; y < ysize; y++ )
+        {
+          int t = currentfeats.get( x, y, tree );
+
+          if ( forest[tree][t].left > 0 )
+          {
+            allleaf = false;
+            Features feat;
+            feat.feats = &feats;
+            feat.cfeats = &lastfeats;
+            feat.cTree = tree;
+            feat.tree = &forest[tree];
+            feat.integralImg = &integralImg;
+
+            double val = forest[tree][t].feat->getVal( feat, x, y );
+
+            if ( val < forest[tree][t].decision )
+            {
+              currentfeats.set( x, y, forest[tree][t].left, tree );
+            }
+            else
+            {
+              currentfeats.set( x, y, forest[tree][t].right, tree );
+            }
+          }
+        }
+      }
+
+      //compute integral image
+      int channels = ( int )labelmap.size() + feats.channels();
+
+      if ( integralImg.width() == 0 )
+      {
+        int xsize = feats.width();
+        int ysize = feats.height();
+
+        integralImg.reInit( xsize, ysize, channels );
+      }
+    }
+
+    computeIntegralImage( currentfeats, feats, integralImg );
+
+    depth++;
+  }
+
+  if ( pixelWiseLabeling )
+  {
+    //finales labeln:
+    long int offset = 0;
+
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++, offset++ )
+      {
+        double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
+        int maxindex = 0;
+        uint s = forest[0][0].dist.size();
+
+        for ( uint i = 0; i < s; i++ )
+        {
+          probabilities.data[labelmapback[i]][offset] = getMeanProb( x, y, i, currentfeats );
+
+          if ( probabilities.data[labelmapback[i]][offset] > maxvalue )
+          {
+            maxvalue = probabilities.data[labelmapback[i]][offset];
+            maxindex = labelmapback[i];
+          }
+
+          segresult.setPixel( x, y, maxindex );
+        }
+
+        if ( maxvalue > 1 )
+          cout << "maxvalue: " << maxvalue << endl;
+      }
+    }
+  }
+  else
+  {
+    //final labeling using segmentation
+    //TODO: segmentation
+    Matrix regions;
+    int regionNumber = segmentation->segRegions( img, regions );
+    cout << "regions: " << regionNumber << endl;
+    int dSize = forest[0][0].dist.size();
+    vector<vector<double> > regionProbs( regionNumber, vector<double>( dSize, 0.0 ) );
+    vector<int> bestlabels( regionNumber, 0 );
+
+    /*
+    for(int r = 0; r < regionNumber; r++)
+    {
+     Image over(img.width(), img.height());
+     for(int y = 0; y < img.height(); y++)
+     {
+      for(int x = 0; x < img.width(); x++)
+      {
+       if(((int)regions(x,y)) == r)
+        over.setPixel(x,y,1);
+       else
+        over.setPixel(x,y,0);
+      }
+     }
+     cout << "r: " << r << endl;
+     showImageOverlay(img, over);
+    }
+    */
+
+    for ( int y = 0; y < img.height(); y++ )
+    {
+      for ( int x = 0; x < img.width(); x++ )
+      {
+        int cregion = regions( x, y );
+
+        for ( int d = 0; d < dSize; d++ )
+        {
+          regionProbs[cregion][d] += getMeanProb( x, y, d, currentfeats );
+        }
+      }
+    }
+
+    int roi = 38;
+
+    for ( int r = 0; r < regionNumber; r++ )
+    {
+      double maxval = regionProbs[r][0];
+      bestlabels[r] = 0;
+
+      if ( roi == r )
+      {
+        cout << "r: " << r << endl;
+        cout << "0: " << regionProbs[r][0] << endl;
+      }
+
+      for ( int d = 1; d < dSize; d++ )
+      {
+        if ( maxval < regionProbs[r][d] )
+        {
+          maxval = regionProbs[r][d];
+          bestlabels[r] = d;
+        }
+
+        if ( roi == r )
+        {
+          cout << d << ": " << regionProbs[r][d] << endl;
+        }
+      }
+
+      if ( roi == r )
+      {
+        cout << "bestlabel: " << bestlabels[r] << " danach: " << labelmapback[bestlabels[r]] << endl;
+      }
+
+      bestlabels[r] = labelmapback[bestlabels[r]];
+    }
+
+    for ( int y = 0; y < img.height(); y++ )
+    {
+      for ( int x = 0; x < img.width(); x++ )
+      {
+
+        segresult.setPixel( x, y, bestlabels[regions( x,y )] );
+      }
+    }
+  }
 }

+ 231 - 222
semseg/SemSegContextTree.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file SemSegContextTree.h
 * @brief Context Trees -> Combination of decision tree and context information
 * @author Björn Fröhlich
@@ -14,250 +14,259 @@
 #include "objrec/segmentation/RegionSegmentationMethod.h"
 
 namespace OBJREC {
-	
+
 class Operation;
 
 class TreeNode
 {
+
 public:
-	/** probabilities for each class */
-	std::vector<double> probs;
-	
-	/** left child node */
-	int left;
-	
-	/** right child node */
-	int right;
-	
-	/** position of feat for decision */
-	Operation *feat;
-	
-	/** decision stamp */
-	double decision;
-	
-	/** is the node a leaf or not */
-	bool isleaf;
-	
-	/** distribution in current node */
-	std::vector<double> dist;
-	
-	/** depth of the node in the tree */
-	int depth;
-	
-	/** simple constructor */
-	TreeNode():left(-1),right(-1),feat(NULL), decision(-1.0), isleaf(false){}
-	
-	/** standard constructor */
-	TreeNode(int _left, int _right, Operation *_feat, double _decision):left(_left),right(_right),feat(_feat), decision(_decision),isleaf(false){}
+  /** probabilities for each class */
+  std::vector<double> probs;
+
+  /** left child node */
+  int left;
+
+  /** right child node */
+  int right;
+
+  /** position of feat for decision */
+  Operation *feat;
+
+  /** decision stamp */
+  double decision;
+
+  /** is the node a leaf or not */
+  bool isleaf;
+
+  /** distribution in current node */
+  std::vector<double> dist;
+
+  /** depth of the node in the tree */
+  int depth;
+
+  /** simple constructor */
+  TreeNode(): left( -1 ), right( -1 ), feat( NULL ), decision( -1.0 ), isleaf( false ) {}
+
+  /** standard constructor */
+  TreeNode( int _left, int _right, Operation *_feat, double _decision ): left( _left ), right( _right ), feat( _feat ), decision( _decision ), isleaf( false ) {}
 };
-	
-struct Features{
-	NICE::MultiChannelImageT<double> *feats;
-	MultiChannelImageT<int> *cfeats;
-	int cTree;
-	std::vector<TreeNode> *tree;
-	NICE::MultiChannelImageT<double> *integralImg;
+
+struct Features {
+  NICE::MultiChannelImageT<double> *feats;
+  MultiChannelImageT<int> *cfeats;
+  int cTree;
+  std::vector<TreeNode> *tree;
+  NICE::MultiChannelImageT<double> *integralImg;
 };
 
 class ValueAccess
 {
+
 public:
-	virtual double getVal(const Features &feats, const int &x, const int &y, const int &channel) = 0;
-	virtual std::string writeInfos() = 0;
+  virtual double getVal( const Features &feats, const int &x, const int &y, const int &channel ) = 0;
+  virtual std::string writeInfos() = 0;
 };
 
-enum OperationTypes { 
-	MINUS, 
-	MINUSABS, 
-	ADDITION,
-	ONLY1,
-	INTEGRAL,
-	INTEGRALCENT,
-	BIINTEGRALCENT,
-	HAARHORIZ,
-	HAARVERT,
-	HAARDIAG,
-	HAAR3HORIZ,
-	HAAR3VERT,
-	NBOPERATIONS
+enum OperationTypes {
+  MINUS,
+  MINUSABS,
+  ADDITION,
+  ONLY1,
+  INTEGRAL,
+  INTEGRALCENT,
+  BIINTEGRALCENT,
+  HAARHORIZ,
+  HAARVERT,
+  HAARDIAG,
+  HAAR3HORIZ,
+  HAAR3VERT,
+  RELATIVEXPOSITION,
+  RELATIVEYPOSITION,
+  GLOBALFEATS,
+  NBOPERATIONS
 };
 
 class Operation
 {
+
 protected:
-	int x1, y1, x2, y2, channel1, channel2;
-	ValueAccess *values;
+  int x1, y1, x2, y2, channel1, channel2;
+  ValueAccess *values;
+
 public:
-	
-	Operation()
-	{
-		values = NULL;
-	}
-	
-	virtual void set(int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values)
-	{
-		x1 = _x1;
-		y1 = _y1;
-		x2 = _x2;
-		y2 = _y2;
-		channel1 = _channel1;
-		channel2 = _channel2;
-		values = _values;
-	}
-	/**
-	 * @brief abstract interface for feature computation
-	 * @param feats features
-	 * @param cfeats number of tree node for each pixel
-	 * @param tree current tree
-	 * @param x current x position
-	 * @param y current y position
-	 * @return double distance
-	 **/
-	virtual double getVal(const Features &feats, const int &x, const int &y) = 0;
-	virtual Operation* clone() = 0;
-	virtual std::string writeInfos() = 0;
-	
-	inline void getXY(const Features &feats, int &xsize, int &ysize)
-	{
-		xsize = feats.feats->width();
-		ysize = feats.feats->height();
-	}
-	
-	virtual OperationTypes getOps() = 0;
+
+  Operation()
+  {
+    values = NULL;
+  }
+
+  virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+  {
+    x1 = _x1;
+    y1 = _y1;
+    x2 = _x2;
+    y2 = _y2;
+    channel1 = _channel1;
+    channel2 = _channel2;
+    values = _values;
+  }
+
+  /**
+   * @brief abstract interface for feature computation
+   * @param feats features
+   * @param cfeats number of tree node for each pixel
+   * @param tree current tree
+   * @param x current x position
+   * @param y current y position
+   * @return double distance
+   **/
+  virtual double getVal( const Features &feats, const int &x, const int &y ) = 0;
+  virtual Operation* clone() = 0;
+  virtual std::string writeInfos() = 0;
+
+  inline void getXY( const Features &feats, int &xsize, int &ysize )
+  {
+    xsize = feats.feats->width();
+    ysize = feats.feats->height();
+  }
+
+  virtual OperationTypes getOps() = 0;
 };
-  
+
 /** Localization system */
+
 class SemSegContextTree : public SemanticSegmentation
 {
-	/** Segmentation Method */
-	RegionSegmentationMethod *segmentation;
-			
-	/** tree -> saved as vector of nodes */
-	std::vector<std::vector<TreeNode> > forest;
-	
-	/** local features */
-	LFColorWeijer *lfcw;
-	
-	/** number of featuretype -> currently: local and context features = 2 */
-	int ftypes;
-	
-	/** distance between features */
-	int grid;
-	
-	/** maximum samples for tree  */
-	int maxSamples;
-	
-	/** size for neighbourhood */
-	int windowSize;
-	
-	/** how many feats should be considered for a split */
-	int featsPerSplit;
-	
-	/** count samples per label */
-	std::map<int,int> labelcounter;
-	
-	/** map of labels */
-	std::map<int,int> labelmap;
-	
-	/** map of labels inverse*/
-	std::map<int,int> labelmapback;
-	
-	/** scalefactor for balancing for each class */
-	std::vector<double> a;
-	
-	/** counter for used operations */
-	std::vector<int> opOverview;
-	
-	/** the minimum number of features allowed in a leaf */
-	int minFeats;
-	
-	/** maximal depth of tree */
-	int maxDepth;
-	
-	/** operations for pairwise features */
-	std::vector<Operation*> ops;
-	
-	/** operations for pairwise context features */
-	std::vector<Operation*> cops;
-	
-	std::vector<ValueAccess*> calcVal;
-	
-	/** vector of all possible features */
-	std::vector<Operation*> featsel;
-	
-	/** use alternative calculation for information gain */
-	bool useShannonEntropy;
-	
-	/** Classnames */
-	ClassNames classnames;
-
-	/** train selection */
-	std::set<int> forbidden_classes;
-	
-	/** Configfile */
-	const Config *conf;
-	
-	/** use pixelwise labeling or regionlabeling with additional segmenation */
-	bool pixelWiseLabeling;
-	
-	/** use Gaussian distributed features based on the feature position */
-	bool useGaussian;
-	
-	/** Number of trees used for the forest */
-	int nbTrees;
-	
-    public:
-	/** simple constructor */
-	SemSegContextTree( const NICE::Config *conf, const MultiDataset *md );
-      
-	/** simple destructor */
-	virtual ~SemSegContextTree();
-
-	/**
-	 * test a single image
-	 * @param ce input data
-	 * @param segresult segmentation results
-	 * @param probabilities probabilities for each pixel
-	 */
-	void semanticseg ( CachedExample *ce,   NICE::Image & segresult,  NICE::MultiChannelImageT<double> & probabilities );
-	
-	/**
-	 * the main training method
-	 * @param md training data 
-	 */
-	void train ( const MultiDataset *md );
-	
-	
-	/**
-	 * @brief computes integral image of given feats
-	 *
-	 * @param currentfeats input features
-	 * @param integralImage output image (must be initilized)
-	 * @return void
-	 **/
-	void computeIntegralImage(const NICE::MultiChannelImageT<int> &currentfeats, const NICE::MultiChannelImageT<double> &lfeats, NICE::MultiChannelImageT<double> &integralImage);
-	
-	/**
-	 * compute best split for current settings
-	 * @param feats features
-	 * @param currentfeats matrix with current node for each feature
-	 * @param labels labels for each feature
-	 * @param node current node
-	 * @param splitfeat output feature position
-	 * @param splitval 
-	 * @return best information gain
-	 */
-	double getBestSplit(std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<int> > &currentfeats, std::vector<NICE::MultiChannelImageT<double> > &integralImgs, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree);
-	
-	/**
-	 * @brief computes the mean probability for a given class over all trees
-	 * @param x x position
-	 * @param y y position
-	 * @param channel current class
-	 * @param currentfeats information about the nodes
-	 * @return double mean value
-	 **/
-	inline double getMeanProb(const int &x,const int &y,const int &channel, const MultiChannelImageT<int> &currentfeats);
+  /** Segmentation Method */
+  RegionSegmentationMethod *segmentation;
+
+  /** tree -> saved as vector of nodes */
+  std::vector<std::vector<TreeNode> > forest;
+
+  /** local features */
+  LFColorWeijer *lfcw;
+
+  /** number of featuretype -> currently: local and context features = 2 */
+  int ftypes;
+
+  /** distance between features */
+  int grid;
+
+  /** maximum samples for tree  */
+  int maxSamples;
+
+  /** size for neighbourhood */
+  int windowSize;
+
+  /** how many feats should be considered for a split */
+  int featsPerSplit;
+
+  /** count samples per label */
+  std::map<int, int> labelcounter;
+
+  /** map of labels */
+  std::map<int, int> labelmap;
+
+  /** map of labels inverse*/
+  std::map<int, int> labelmapback;
+
+  /** scalefactor for balancing for each class */
+  std::vector<double> a;
+
+  /** counter for used operations */
+  std::vector<int> opOverview;
+
+  /** the minimum number of features allowed in a leaf */
+  int minFeats;
+
+  /** maximal depth of tree */
+  int maxDepth;
+
+  /** operations for pairwise features */
+  std::vector<Operation*> ops;
+
+  /** operations for pairwise context features */
+  std::vector<Operation*> cops;
+
+  std::vector<ValueAccess*> calcVal;
+
+  /** vector of all possible features */
+  std::vector<Operation*> featsel;
+
+  /** use alternative calculation for information gain */
+  bool useShannonEntropy;
+
+  /** Classnames */
+  ClassNames classnames;
+
+  /** train selection */
+  std::set<int> forbidden_classes;
+
+  /** Configfile */
+  const Config *conf;
+
+  /** use pixelwise labeling or regionlabeling with additional segmenation */
+  bool pixelWiseLabeling;
+
+  /** use Gaussian distributed features based on the feature position */
+  bool useGaussian;
+
+  /** Number of trees used for the forest */
+  int nbTrees;
+
+public:
+  /** simple constructor */
+  SemSegContextTree( const NICE::Config *conf, const MultiDataset *md );
+
+  /** simple destructor */
+  virtual ~SemSegContextTree();
+
+  /**
+   * test a single image
+   * @param ce input data
+   * @param segresult segmentation results
+   * @param probabilities probabilities for each pixel
+   */
+  void semanticseg( CachedExample *ce,   NICE::Image & segresult,  NICE::MultiChannelImageT<double> & probabilities );
+
+  /**
+   * the main training method
+   * @param md training data
+   */
+  void train( const MultiDataset *md );
+
+
+  /**
+   * @brief computes integral image of given feats
+   *
+   * @param currentfeats input features
+   * @param integralImage output image (must be initilized)
+   * @return void
+   **/
+  void computeIntegralImage( const NICE::MultiChannelImageT<int> &currentfeats, const NICE::MultiChannelImageT<double> &lfeats, NICE::MultiChannelImageT<double> &integralImage );
+
+  /**
+   * compute best split for current settings
+   * @param feats features
+   * @param currentfeats matrix with current node for each feature
+   * @param labels labels for each feature
+   * @param node current node
+   * @param splitfeat output feature position
+   * @param splitval
+   * @return best information gain
+   */
+  double getBestSplit( std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<int> > &currentfeats, std::vector<NICE::MultiChannelImageT<double> > &integralImgs, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree );
+
+  /**
+   * @brief computes the mean probability for a given class over all trees
+   * @param x x position
+   * @param y y position
+   * @param channel current class
+   * @param currentfeats information about the nodes
+   * @return double mean value
+   **/
+  inline double getMeanProb( const int &x, const int &y, const int &channel, const MultiChannelImageT<int> &currentfeats );
 
 };