Procházet zdrojové kódy

move Semseg to objrec-froehlichexp

Bjoern Froehlich před 13 roky
rodič
revize
55706c66d0
42 změnil soubory, kde provedl 9854 přidání a 18 odebrání
  1. 2 9
      progs/getRelevantClasses.cpp
  2. 13 9
      progs/testSemanticSegmentation.cpp
  3. 141 0
      semseg/FIShotton.cpp
  4. 51 0
      semseg/FIShotton.h
  5. 8 0
      semseg/Makefile
  6. 103 0
      semseg/Makefile.inc
  7. 553 0
      semseg/SemSegContextTree.cpp
  8. 129 0
      semseg/SemSegContextTree.h
  9. 1854 0
      semseg/SemSegCsurka.cpp
  10. 247 0
      semseg/SemSegCsurka.h
  11. 1720 0
      semseg/SemSegCsurka2.cpp
  12. 235 0
      semseg/SemSegCsurka2.h
  13. 115 0
      semseg/SemSegLocal.cpp
  14. 56 0
      semseg/SemSegLocal.h
  15. 1440 0
      semseg/SemSegRegionBased.cpp
  16. 190 0
      semseg/SemSegRegionBased.h
  17. 233 0
      semseg/SemSegSTF.cpp
  18. 98 0
      semseg/SemSegSTF.h
  19. 115 0
      semseg/SemSegTools.cpp
  20. 55 0
      semseg/SemSegTools.h
  21. 184 0
      semseg/SemanticSegmentation.cpp
  22. 82 0
      semseg/SemanticSegmentation.h
  23. 7 0
      semseg/libdepend.inc
  24. 8 0
      semseg/postsegmentation/Makefile
  25. 103 0
      semseg/postsegmentation/Makefile.inc
  26. 200 0
      semseg/postsegmentation/PPGraphCut.cpp
  27. 127 0
      semseg/postsegmentation/PPGraphCut.h
  28. 275 0
      semseg/postsegmentation/PPSuperregion.cpp
  29. 117 0
      semseg/postsegmentation/PPSuperregion.h
  30. 75 0
      semseg/postsegmentation/PSSBackgroundModel.cpp
  31. 56 0
      semseg/postsegmentation/PSSBackgroundModel.h
  32. 124 0
      semseg/postsegmentation/PSSImageLevelPrior.cpp
  33. 58 0
      semseg/postsegmentation/PSSImageLevelPrior.h
  34. 168 0
      semseg/postsegmentation/PSSLocalizationPrior.cpp
  35. 62 0
      semseg/postsegmentation/PSSLocalizationPrior.h
  36. 34 0
      semseg/postsegmentation/PSSQueue.cpp
  37. 39 0
      semseg/postsegmentation/PSSQueue.h
  38. 34 0
      semseg/postsegmentation/PostSemSeg.cpp
  39. 45 0
      semseg/postsegmentation/PostSemSeg.h
  40. 559 0
      semseg/postsegmentation/RelativeLocationPrior.cpp
  41. 136 0
      semseg/postsegmentation/RelativeLocationPrior.h
  42. 3 0
      semseg/postsegmentation/libdepend.inc

+ 2 - 9
progs/getRelevantClasses.cpp

@@ -15,12 +15,8 @@
 #include <objrec/baselib/StringTools.h>
 #include <objrec/baselib/ICETools.h>
 
-#include <objrec/semanticsegmentation/SemanticSegmentation.h>
-#include <objrec/semanticsegmentation/SemSegLocal.h>
-#include <objrec/semanticsegmentation/SemSegSTF.h>
-#include <objrec/semanticsegmentation/SemSegCsurka.h>
-#include <objrec/semanticsegmentation/SemSegCsurka2.h>
-#include <objrec/semanticsegmentation/SemSegRegionBased.h>
+#include "objrec/cbaselib/MultiDataset.h"
+#include "objrec/image/GenericImage.h"
 
 #include <fstream>
 
@@ -66,9 +62,6 @@ int main(int argc, char **argv)
 			lm.set(0);
 			l_gt->calcLabeledImage(lm, classNames.getBackgroundClass());
 		}
-
-		//semseg->semanticseg(file, lm, probabilities);
-
 		NICE::Image lm_gt;
 
 		if (info.hasLocalizationInfo())

+ 13 - 9
progs/testSemanticSegmentation.cpp

@@ -15,15 +15,18 @@
 #include <objrec/baselib/StringTools.h>
 #include <objrec/baselib/ICETools.h>
 
-#include <objrec/semanticsegmentation/SemanticSegmentation.h>
-#include <objrec/semanticsegmentation/SemSegLocal.h>
-#include <objrec/semanticsegmentation/SemSegSTF.h>
-#include <objrec/semanticsegmentation/SemSegCsurka.h>
-#include <objrec/semanticsegmentation/SemSegCsurka2.h>
-#include <objrec/semanticsegmentation/SemSegRegionBased.h>
+#include <objrec-froehlichexp/semseg/SemanticSegmentation.h>
+#include <objrec-froehlichexp/semseg/SemSegLocal.h>
+#include <objrec-froehlichexp/semseg/SemSegSTF.h>
+#include <objrec-froehlichexp/semseg/SemSegCsurka.h>
+#include <objrec-froehlichexp/semseg/SemSegCsurka2.h>
+#include <objrec-froehlichexp/semseg/SemSegRegionBased.h>
+#include <objrec-froehlichexp/semseg/SemSegContextTree.h>
 
 #include <fstream>
 
+
+
 using namespace OBJREC;
 
 using namespace NICE;
@@ -89,7 +92,8 @@ int main(int argc, char **argv)
 
 	//SemanticSegmentation *semseg = new SemSegLocal ( &conf, &md );
 	//SemanticSegmentation *semseg = new SemSegSTF ( &conf, &md );
-	SemanticSegmentation *semseg = new SemSegCsurka ( &conf, &md);
+	//SemanticSegmentation *semseg = new SemSegCsurka ( &conf, &md);
+	SemanticSegmentation *semseg = new SemSegContextTree ( &conf, &md);
 
 	//SemanticSegmentation *semseg = new SemSegRegionBased(&conf, &md);
 
@@ -183,7 +187,7 @@ int main(int argc, char **argv)
 			if (write_results)
 			{
 				char filename[1024];
-				char *format = "ppm";
+				char *format = (char *)"ppm";
 				sprintf(filename, "%06d.%s", fileno, format);
 				std::string origfilename = resultdir + "/orig_" + string(filename);
 				cerr << "Writing to file " << origfilename << endl;
@@ -271,7 +275,7 @@ int main(int argc, char **argv)
 		fout.close();
 	}
 
-	cerr <<  "overall: " << overall << endl;
+	fprintf(stderr, "overall: %f\n", overall);
 	fprintf(stderr, "Average Performance %f\n", avg_perf / (classes_trained));
 	//fprintf(stderr, "Lower Bound %f\n", 1.0 / classes_trained);
 	for (int r = 0 ; r < (int)M.rows() ; r++)

+ 141 - 0
semseg/FIShotton.cpp

@@ -0,0 +1,141 @@
+/** 
+* @file FIShotton.cpp
+* @brief feature images
+* @author Erik Rodner
+* @date 05/30/2008
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+
+#include "FIShotton.h"
+#include "objrec/baselib/FastFilter.h"
+#include "objrec/image/GenericImageTools.h"
+
+using namespace OBJREC;
+
+using namespace std;
+using namespace NICE;
+
+
+void FIShotton::buildTextonMap ( CachedExample *ce,
+				FPCRandomForests *fpcrf,
+				map<DecisionNode *, pair<long, int> > index,
+				int subsamplex, 
+				int subsampley,
+				int maxdepthSegmentationForest )
+{
+    vector<DecisionNode *> leafs;
+
+    int xsize, ysize;
+    ce->getImageSize ( xsize, ysize );
+    int xsize_s = xsize / subsamplex;
+    int ysize_s = ysize / subsampley;
+
+    SparseVector *textonIndices = new SparseVector [xsize_s*ysize_s];
+    
+    Example pce ( ce, 0, 0 );
+    long offset = 0;
+    long offset_s = 0;
+    for ( int y = 0 ; y < ysize_s ; y++ )
+    {
+	for ( int x = 0 ; x < xsize_s ; x++, offset_s++ )
+	{
+	    for ( int yi = 0 ; yi < subsampley ; yi++ )
+	    {
+		for ( int xi = 0 ; xi < subsamplex ; xi++, offset++ )
+		{
+		    leafs.clear();
+		    pce.x = x*subsamplex + xi; pce.y = y*subsampley + yi; 
+		    fpcrf->getLeafNodes ( pce, leafs, maxdepthSegmentationForest );
+		    SparseVector v;
+		    for ( vector<DecisionNode *>::const_iterator i = leafs.begin();
+								i != leafs.end();
+								i++ )
+			v.insert ( pair<int, double> ( index[*i].first, 1.0 ) );
+
+		    textonIndices[offset_s].add(v);
+		}
+	    }
+	}
+    }
+    fprintf (stderr, "Building Texton Integral NICE::Image !!\n");
+
+    ce->buildIntegralSV ( CachedExample::SVTEXTON, textonIndices, xsize_s, ysize_s );
+}
+
+
+
+
+void FIShotton::buildSemanticMap ( CachedExample *ce,
+				FPCRandomForests *fpcrf,
+				int subsamplex, int subsampley,
+				int numClasses )
+{
+    int xsize, ysize;
+    ce->getImageSize ( xsize, ysize );
+    int xsize_s = xsize / subsamplex;
+    int ysize_s = ysize / subsampley;
+
+    GenericImage<double> & priorMap = ce->getDChannel ( CachedExample::D_INTEGRALPRIOR );
+    priorMap.reInit ( xsize_s, ysize_s, numClasses, true );
+    priorMap.setAll ( 0.0 );
+    
+    vector<DecisionNode *> leafs;
+   
+    Example pce ( ce, 0, 0 );
+    long offset = 0;
+    long offset_s = 0;
+    for ( int y = 0 ; y < ysize_s ; y++ )
+    {
+	for ( int x = 0 ; x < xsize_s ; x++, offset_s++ )
+	{
+	    for ( int yi = 0 ; yi < subsampley ; yi++ )
+	    {
+		for ( int xi = 0 ; xi < subsamplex ; xi++ )
+		{
+		    leafs.clear();
+		    pce.x = x*subsamplex + xi; pce.y = y*subsampley + yi; 
+		    fpcrf->getLeafNodes ( pce, leafs );
+		    
+		    for ( vector<DecisionNode *>::const_iterator i = leafs.begin();
+								i != leafs.end();
+								i++ )
+		    {
+			const FullVector & sv = (*i)->distribution;
+
+			for ( int i = 0 ; i < sv.size(); i++  )
+			{
+			    priorMap.data[i][offset_s] += sv[i];
+			}
+		    }
+		}
+	    }
+
+	    double sum = 0.0;
+	    for ( uint i = 0 ; i < priorMap.numChannels; i++ )
+		sum += priorMap.data[i][offset_s];
+	
+	    if ( sum < 10e-13 )
+	    {
+		fprintf (stderr, "x*subsamplex %d y*subsampley %d xsize %d ysize %d\n",
+		    x*subsamplex, y*subsampley, xsize, ysize );
+		exit(-1);
+	    } else {
+		for ( uint i = 0 ; i < priorMap.numChannels; i++ )
+		    priorMap.data[i][offset_s] /= sum;
+	    }
+
+	}
+    }
+
+    for ( uint i = 0 ; i < priorMap.numChannels ; i++ )
+	GenericImageTools::calcIntegralImage ( priorMap.data[i], priorMap.data[i], priorMap.xsize, priorMap.ysize );
+
+}
+

+ 51 - 0
semseg/FIShotton.h

@@ -0,0 +1,51 @@
+/** 
+* @file FIShotton.h
+* @brief feature images for preprocessing according to shotton
+* @author Erik Rodner
+* @date 05/30/2008
+
+*/
+#ifndef FIShottonINCLUDE
+#define FIShottonINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+ 
+#include "objrec/math/mathbase/SparseVector.h"
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+
+
+namespace OBJREC {
+
+/** feature images */
+class FIShotton
+{
+
+    protected:
+
+    public:
+  
+	static void buildSemanticMap ( CachedExample *ce,
+				FPCRandomForests *fpcrf,
+				int subsamplex, int subsampley,
+				int numClasses );
+
+	static void buildTextonMap ( CachedExample *ce,
+				FPCRandomForests *fpcrf,
+				map<DecisionNode *, pair<long, int> > index,
+				int subsamplex, 
+				int subsampley,
+				int maxdepthSegmentationForest );
+
+
+};
+
+
+} // namespace
+
+#endif

+ 8 - 0
semseg/Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
semseg/Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 553 - 0
semseg/SemSegContextTree.cpp

@@ -0,0 +1,553 @@
+#include <objrec/nice.h>
+
+#include <iostream>
+
+#include "SemSegContextTree.h"
+#include "objrec/baselib/Globals.h"
+#include "objrec/baselib/ProgressBar.h"
+#include "objrec/baselib/StringTools.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/cbaselib/PascalResults.h"
+
+#include <omp.h>
+
+using namespace OBJREC;
+using namespace std;
+using namespace NICE;
+
+
+
+SemSegContextTree::SemSegContextTree( const Config *conf, const MultiDataset *md )
+    : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
+{
+	string section = "SSContextTree";
+	lfcw = new LFColorWeijer(conf);
+	
+	grid = conf->gI(section, "grid", 10 );
+	
+	maxSamples = conf->gI(section, "max_samples", 2000 );
+	
+	minFeats = conf->gI(section, "min_feats", 50 );
+	
+	maxDepth = conf->gI(section, "max_depth", 20 );
+	
+	///////////////////////////////////
+	// Train Segmentation Context Trees
+	//////////////////////////////////
+
+	train ( md );
+}
+
+SemSegContextTree::~SemSegContextTree()
+{
+}
+
+void SemSegContextTree::getBestSplit(const vector<vector<vector<vector<double> > > > &feats, vector<vector<vector<int> > > &currentfeats,const vector<vector<vector<int> > > &labels, int node, int &splitfeat, double &splitval)
+{
+	int imgCount, featsize;
+	try
+	{
+		imgCount = (int)feats.size();
+		featsize = feats[0][0][0].size();
+	}
+	catch(Exception)
+	{
+		cerr << "no features computed?" << endl;
+	}
+	
+	double bestig = -numeric_limits< double >::max();
+	splitfeat = -1;
+	splitval = -1.0;
+	
+	set<vector<int> >selFeats;
+	map<int,int> e;
+	int featcounter = 0;
+	vector<double> maximum(featsize, -numeric_limits< double >::max());
+	vector<double> minimum(featsize, numeric_limits< double >::max());
+	
+	for(int iCounter = 0; iCounter < imgCount; iCounter++)
+	{
+		int xsize = (int)currentfeats[iCounter].size();
+		int ysize = (int)currentfeats[iCounter][0].size();
+		for(int x = 0; x < xsize; x++)
+		{
+			for(int y = 0; y < ysize; y++)
+			{
+				if(currentfeats[iCounter][x][y] == node)
+				{
+					featcounter++;
+				}
+			}
+		}
+	}
+	
+	//double fraction = (double)maxSamples/(double)featcounter;
+	
+	vector<double> fraction(a.size(),0.0);
+	for(uint i = 0; i < fraction.size(); i++)
+	{
+		fraction[i] = ((double)maxSamples)/(a[i]*(double)featcounter*8);
+		//cout << "fraction["<<i<<"]: "<< fraction[i] << endl;
+	}
+	featcounter = 0;
+	
+	
+	for(int iCounter = 0; iCounter < imgCount; iCounter++)
+	{
+		int xsize = (int)currentfeats[iCounter].size();
+		int ysize = (int)currentfeats[iCounter][0].size();
+		for(int x = 0; x < xsize; x++)
+		{
+			for(int y = 0; y < ysize; y++)
+			{
+				if(currentfeats[iCounter][x][y] == node)
+				{
+					int cn = labels[iCounter][x][y];
+					double randD = (double)rand()/(double)RAND_MAX;
+					//cout << "class: " << cn << " thres: "<< fraction<<  " rand: " << randD << " scale: " << a[labelmap[cn]] << " newrand: ";
+					//randD *= a[labelmap[cn]];
+					//cout << randD << endl;
+					//getchar();
+					if(randD < fraction[labelmap[cn]])
+					{
+						vector<int> tmp(3,0);
+						tmp[0] = iCounter;
+						tmp[1] = x;
+						tmp[2] = y;
+						featcounter++;
+						selFeats.insert(tmp);
+						
+						e[cn] = e[cn]+1;
+						for(int f= 0; f < featsize; f++)
+						{
+							maximum[f] = std::max(maximum[f], feats[iCounter][x][y][f]);
+							minimum[f] = std::min(minimum[f], feats[iCounter][x][y][f]);
+						}
+					}
+				}
+
+			}
+		}
+	}
+	
+	//cout << "size: " << selFeats.size() << endl;
+	
+	map<int,int>::iterator mapit;
+	double globent = 0.0;
+	for ( mapit=e.begin() ; mapit != e.end(); mapit++ )
+	{
+	  //cout << "class: " << mapit->first << ": " << mapit->second << endl;
+		double p = (double)(*mapit).second/(double)featcounter;
+		globent += p*log2(p);
+	}
+	globent = -globent;
+	
+	if(globent < 0.5)
+	{
+		cout << "globent to small: " << globent << endl;
+		return;
+	}
+	
+	if(featcounter < minFeats)
+	{
+		cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
+		return;
+	}
+
+	//omp_set_num_threads(2);
+#pragma omp parallel for private(mapit)
+	for(int f = 0; f < featsize; f++)
+	{
+		double l_bestig = -numeric_limits< double >::max();
+		double l_splitval = -1.0;
+	
+		set<vector<int> >::iterator it;
+		for ( it=selFeats.begin() ; it != selFeats.end(); it++ )
+		{
+			set<vector<int> >::iterator it2;
+			double val = feats[(*it)[0]][(*it)[1]][(*it)[2]] [f];
+			
+			//cout << "val: " << val << endl;
+			if(val == maximum[f] || val == minimum[f])
+			{
+				continue;
+			}
+			
+			map<int,int> eL, eR;
+			int counterL = 0, counterR = 0;
+			
+			for ( it2=selFeats.begin() ; it2 != selFeats.end(); it2++ )
+			{
+				int cn = labels[(*it2)[0]][(*it2)[1]][(*it2)[2]];
+				if(feats[(*it2)[0]][(*it2)[1]][(*it2)[2]][f] < val)
+				{
+					//left entropie:
+					eL[cn] = eL[cn]+1;
+					counterL++;
+				}
+				else
+				{
+					//right entropie:
+					eR[cn] = eR[cn]+1;
+					counterR++;
+				}
+				
+			}
+			
+			double leftent = 0.0;
+			for ( mapit=eL.begin() ; mapit != eL.end(); mapit++ )
+			{
+				double p = (double)(*mapit).second/(double)counterL;
+				leftent += p*log2(p);
+			}
+			leftent = -leftent;
+			
+			double rightent = 0.0;
+			for ( mapit=eR.begin() ; mapit != eR.end(); mapit++ )
+			{
+				double p = (double)(*mapit).second/(double)counterR;
+				rightent += p*log2(p);
+			}
+			rightent = -rightent;
+			
+			double ig = globent - rightent - leftent;
+			
+			if(ig > l_bestig)
+			{
+				l_bestig = ig;
+				l_splitval = val;
+			}
+		}
+#pragma omp critical
+{
+		if(l_bestig > bestig)
+		{
+			bestig = l_bestig;
+			splitfeat = f;
+			splitval = l_splitval;
+		}
+}
+	}
+	//cout << "globent: " << globent <<  " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
+}
+
+void SemSegContextTree::train ( const MultiDataset *md )
+{
+	const LabeledSet train = * ( *md ) ["train"];
+	const LabeledSet *trainp = &train;
+	
+	ProgressBar pb ( "compute feats" );
+	pb.show();
+	
+	//TODO: Speichefresser!, lohnt sich sparse?
+	vector<vector<vector<vector<double> > > > allfeats;
+	vector<vector<vector<int> > > currentfeats;
+	vector<vector<vector<int> > > labels;
+	
+	int imgcounter = 0;
+	
+	LOOP_ALL_S ( *trainp )
+	{
+		EACH_INFO ( classno,info );
+
+		NICE::ColorImage img;
+
+		std::string currentFile = info.img();
+
+		CachedExample *ce = new CachedExample ( currentFile );
+
+		const LocalizationResult *locResult = info.localization();
+		if ( locResult->size() <= 0 )
+		{
+			fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+			          currentFile.c_str() );
+			continue;
+		}
+
+		fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
+
+		int xsize, ysize;
+		ce->getImageSize ( xsize, ysize );
+
+		vector<vector<int> > tmp = vector<vector<int> >(xsize, vector<int>(ysize,0));
+		currentfeats.push_back(tmp);
+		labels.push_back(tmp);
+
+		try {
+			img = ColorImage(currentFile);
+		} catch (Exception) {
+			cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
+			continue;
+		}
+
+		Globals::setCurrentImgFN ( currentFile );
+
+		//TODO: resize image?!
+		
+		vector<vector<vector<double> > > feats;
+#if 1
+		lfcw->getFeats(img, feats);
+#else
+		feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
+		for(int x = 0; x < xsize; x++)
+		{
+			for(int y = 0; y < ysize; y++)
+			{
+				for(int r = 0; r < 3; r++)
+				{
+					feats[x][y][r] = img.getPixel(x,y,r);
+				}
+			}
+		}
+#endif	
+		allfeats.push_back(feats);
+		
+		// getting groundtruth
+		NICE::Image pixelLabels (xsize, ysize);
+		pixelLabels.set(0);
+		locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+		for(int x = 0; x < xsize; x++)
+		{
+			for(int y = 0; y < ysize; y++)
+			{
+				classno = pixelLabels.getPixel(x, y);
+				labels[imgcounter][x][y] = classno;
+				labelcounter[classno]++;
+				//if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+					//continue;
+			}
+		}
+		imgcounter++;
+		pb.update ( trainp->count());
+		delete ce;
+	}
+	pb.hide();
+	
+	map<int,int>::iterator mapit;
+	int classes = 0;
+	for(mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++)
+	{
+		labelmap[mapit->first] = classes;
+		labelmapback[classes] = mapit->first;
+		classes++;
+	}
+	
+	//balancing
+	int featcounter = 0;
+	a = vector<double>(classes,0.0);
+	for(int iCounter = 0; iCounter < imgcounter; iCounter++)
+	{
+		int xsize = (int)currentfeats[iCounter].size();
+		int ysize = (int)currentfeats[iCounter][0].size();
+		for(int x = 0; x < xsize; x++)
+		{
+			for(int y = 0; y < ysize; y++)
+			{
+				featcounter++;
+				int cn = labels[iCounter][x][y];
+				a[labelmap[cn]] ++;
+			}
+		}
+	}
+	
+	for(int i = 0; i < (int)a.size(); i++)
+	{
+		a[i] /= (double)featcounter;
+		cout << "a["<<i<<"]: " << a[i] << endl;
+	}
+	
+	tree.push_back(Node());
+	tree[0].dist = vector<double>(classes,0.0);
+	int depth = 0;
+	tree[0].depth = depth;
+	
+	bool allleaf = false;
+	while(!allleaf && depth < maxDepth)
+	{
+		allleaf = true;
+		//TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
+		//#pragma omp parallel for
+		int t = (int) tree.size();
+		for(int i = 0; i < t; i++)
+		{
+			if(!tree[i].isleaf && tree[i].left < 0)
+			{  
+				int splitfeat;
+				double splitval;
+
+				getBestSplit(allfeats, currentfeats,labels, i, splitfeat, splitval);
+				tree[i].feat = splitfeat;
+				tree[i].decision = splitval;
+				if(splitfeat >= 0)
+				{
+					allleaf = false;
+					int left = tree.size();
+					tree.push_back(Node());
+					tree.push_back(Node());
+					int right = left+1;
+					tree[i].left = left;
+					tree[i].right = right;
+					tree[left].dist = vector<double>(classes, 0.0);
+					tree[right].dist = vector<double>(classes, 0.0);
+					tree[left].depth = depth+1;
+					tree[right].depth = depth+1;
+//#pragma omp parallel for
+					for(int iCounter = 0; iCounter < imgcounter; iCounter++)
+					{
+						int xsize = currentfeats[iCounter].size();
+						int ysize = currentfeats[iCounter][0].size();
+						for(int x = 0; x < xsize; x++)
+						{
+							for(int y = 0; y < ysize; y++)
+							{
+								if(currentfeats[iCounter][x][y] == i)
+								{
+									if(allfeats[iCounter][x][y][splitfeat] < splitval)
+									{ 
+										currentfeats[iCounter][x][y] = left;
+										tree[left].dist[labelmap[labels[iCounter][x][y]]]++;
+									}
+									else
+									{  
+										currentfeats[iCounter][x][y] = right;
+										tree[right].dist[labelmap[labels[iCounter][x][y]]]++;
+									}
+								}
+							}
+						}
+					}
+					double lcounter = 0.0, rcounter = 0.0;
+					for(uint d = 0; d < tree[left].dist.size(); d++)
+					{
+						tree[left].dist[d]/=a[d];
+						lcounter +=tree[left].dist[d];
+						tree[right].dist[d]/=a[d];
+						rcounter +=tree[right].dist[d];
+					}
+					for(uint d = 0; d < tree[left].dist.size(); d++)
+					{
+						tree[left].dist[d]/=lcounter;
+						tree[right].dist[d]/=rcounter;
+					}
+				}
+				else
+				{
+					tree[i].isleaf = true;
+				}
+				//TODO: probability ermitteln
+			}
+		}
+		
+		//TODO: features neu berechnen!
+		depth++;
+		cout << "d: " << depth << endl;
+	}
+
+}
+
+void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult,GenericImage<double> & probabilities )
+{
+	int xsize;
+	int ysize;
+	ce->getImageSize ( xsize, ysize );
+	
+	int numClasses = classNames->numClasses();
+	
+	fprintf (stderr, "ContextTree classification !\n");
+
+	probabilities.reInit ( xsize, ysize, numClasses, true );
+	probabilities.setAll ( 0 );
+
+	NICE::ColorImage img;
+
+	std::string currentFile = Globals::getCurrentImgFN();
+	
+	try {
+		img = ColorImage(currentFile);
+	} catch (Exception) {
+		cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
+		return;
+	}
+	
+	//TODO: resize image?!
+		
+	vector<vector<vector<double> > > feats;
+	
+#if 1
+	lfcw->getFeats(img, feats);
+#else
+	feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
+	for(int x = 0; x < xsize; x++)
+	{
+		for(int y = 0; y < ysize; y++)
+		{
+			for(int r = 0; r < 3; r++)
+			{
+				feats[x][y][r] = img.getPixel(x,y,r);
+			}
+		}
+	}
+#endif
+	
+	bool allleaf = false;
+	
+	vector<vector<int> > currentfeats = vector<vector<int> >(xsize, vector<int>(ysize,0));
+	int depth = 0;
+	while(!allleaf)
+	{
+		allleaf = true;
+		//TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
+		//#pragma omp parallel for
+		int t = (int) tree.size();
+		for(int i = 0; i < t; i++)
+		{
+			for(int x = 0; x < xsize; x++)
+			{
+				for(int y = 0; y < ysize; y++)
+				{
+					int t = currentfeats[x][y];
+					if(tree[t].left > 0)
+					{
+						allleaf = false;
+						if(feats[x][y][tree[t].feat] < tree[t].decision)
+						{
+							currentfeats[x][y] = tree[t].left;
+						}
+						else
+						{
+							currentfeats[x][y] = tree[t].right;
+						}
+					}
+				}
+			}
+		}
+		
+		//TODO: features neu berechnen! analog zum training
+		
+		depth++;
+	}
+	
+	//finales labeln:
+	 long int offset = 0;
+	for(int x = 0; x < xsize; x++)
+	{
+		for(int y = 0; y < ysize; y++,offset++)
+		{
+			int t = currentfeats[x][y];
+			double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
+			int maxindex = 0;
+			for(uint i = 0; i < tree[i].dist.size(); i++)
+			{
+				probabilities.data[labelmapback[i]][offset] = tree[t].dist[i];
+				if(tree[t].dist[i] > maxvalue)
+				{
+					maxvalue = tree[t].dist[i];
+					maxindex = labelmapback[i];
+				}
+				segresult.setPixel(x,y,maxindex);
+			}
+		}
+	}
+}

+ 129 - 0
semseg/SemSegContextTree.h

@@ -0,0 +1,129 @@
+/** 
+* @file SemSegContextTree.h
+* @brief Context Trees -> Combination of decision tree and context information
+* @author Björn Fröhlich
+* @date 29.11.2011
+
+*/
+#ifndef SemSegContextTreeINCLUDE
+#define SemSegContextTreeINCLUDE
+
+#include "SemanticSegmentation.h"
+#include <objrec/math/mathbase/VVector.h>
+#include "objrec/features/localfeatures/LFColorWeijer.h"
+
+namespace OBJREC {
+
+/** Localization system */
+class SemSegContextTree : public SemanticSegmentation
+{
+    protected:
+ 
+	class Node
+	{
+	public:
+		/** probabilities for each class */
+		vector<double> probs;
+		
+		/** left child node */
+		int left;
+		
+		/** right child node */
+		int right;
+		
+		/** position of feat for decision */
+		int feat;
+		
+		/** decision stamp */
+		double decision;
+		
+		/** is the node a leaf or not */
+		bool isleaf;
+		
+		/** distribution in current node */
+		vector<double> dist;
+		
+		/** depth of the node in the tree */
+		int depth;
+		
+		/** simple constructor */
+		Node():left(-1),right(-1),feat(-1), decision(-1.0), isleaf(false){}
+		
+		/** standard constructor */
+		Node(int _left, int _right, int _feat, double _decision):left(_left),right(_right),feat(_feat), decision(_decision),isleaf(false){}
+	};
+      
+	/** store features */
+	VVector currentfeats;
+	
+	/** store the positions of the features */
+	VVector positions;
+	
+	/** tree -> saved as vector of nodes */
+	vector<Node> tree;
+	
+	/** local features */
+	LFColorWeijer *lfcw;
+	
+	/** distance between features */
+	int grid;
+	
+	/** maximum samples for tree  */
+	int maxSamples;
+	
+	/** count samples per label */
+	map<int,int> labelcounter;
+	
+	/** map of labels */
+	map<int,int> labelmap;
+	
+	/** map of labels inverse*/
+	map<int,int> labelmapback;
+	
+	/** scalefactor for balancing for each class */
+	vector<double> a;
+	
+	/** the minimum number of features allowed in a leaf */
+	int minFeats;
+	
+	/** maximal depth of tree */
+	int maxDepth;
+	
+    public:
+	/** simple constructor */
+	SemSegContextTree( const Config *conf, const MultiDataset *md );
+      
+	/** simple destructor */
+	virtual ~SemSegContextTree();
+
+	/**
+	 * test a single image
+	 * @param ce input data
+	 * @param segresult segmentation results
+	 * @param probabilities probabilities for each pixel
+	 */
+	void semanticseg ( CachedExample *ce,   NICE::Image & segresult,  GenericImage<double> & probabilities );
+	
+	/**
+	 * the main training method
+	 * @param md training data 
+	 */
+	void train ( const MultiDataset *md );
+	
+	/**
+	 * compute best split for current settings
+	 * @param feats features
+	 * @param currentfeats matrix with current node for each feature
+	 * @param labels labels for each feature
+	 * @param node current node
+	 * @param splitfeat output feature position
+	 * @param splitval 
+	 */
+	void getBestSplit(const vector<vector<vector<vector<double> > > > &feats, vector<vector<vector<int> > > &currentfeats,const vector<vector<vector<int> > > &labels, int node, int &splitfeat, double &splitval);
+
+};
+
+
+} // namespace
+
+#endif

+ 1854 - 0
semseg/SemSegCsurka.cpp

@@ -0,0 +1,1854 @@
+/**
+ * @file SemSegCsurka.cpp
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich
+ * @date 04/24/2009
+ */
+#include <iostream>
+
+#include "SemSegCsurka.h"
+
+#include "objrec/baselib/ICETools.h"
+
+#include "core/image/Filter.h"
+
+#include <sstream>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+#undef DEBUG_CSURK
+
+SemSegCsurka::SemSegCsurka ( const Config *conf,
+                             const MultiDataset *md )
+		: SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
+{
+	this->conf = conf;
+
+	opSiftImpl = conf->gS ( "Descriptor", "implementation", "VANDESANDE" );
+	readfeat = conf->gB ( "Descriptor", "read", true );
+	writefeat = conf->gB ( "Descriptor", "write", true );
+#ifdef DEBUG_CSURK
+	clog << "[log] SemSegCsurka::SemSegCsurka: OppenentSift implemenation: " << opSiftImpl << endl;
+#endif
+
+	save_cache = conf->gB ( "FPCPixel", "save_cache", true );
+	read_cache = conf->gB ( "FPCPixel", "read_cache", false );
+	cache = conf->gS ( "cache", "root", "" );
+	sigmaweight = conf->gD ( "SemSegCsurka", "sigmaweight", 0.6 );
+
+	dim = conf->gI ( "SemSegCsurka", "pcadim", 50 );
+
+	usepca = conf->gB ( "SemSegCsurka", "usepca", true );
+	calcpca = conf->gB ( "SemSegCsurka", "calcpca", false );
+
+	usegmm = conf->gB ( "SemSegCsurka", "usegmm", false );
+	norm = conf->gB ( "SemSegCsurka", "normalize", false );
+	usefisher = conf->gB ( "SemSegCsurka", "usefisher", false );
+	dogmm = conf->gB ( "SemSegCsurka", "dogmm", false );
+	gaussians = conf->gI ( "SemSegCsurka", "gaussians", 50 );
+
+	usekmeans = conf->gB ( "SemSegCsurka", "usekmeans", false );
+	kmeansfeat = conf->gI ( "SemSegCsurka", "kmeansfeat", 50 );
+	kmeanshard = conf->gB ( "SemSegCsurka", "kmeanshard", false );
+
+	cname = conf->gS ( "SemSegCsurka", "classifier", "RandomForests" );
+	anteil = conf->gD ( "SemSegCsurka", "anteil", 1.0 );
+	userellocprior = conf->gB ( "SemSegCsurka", "rellocfeat", false );
+	bool usesrg = conf->gB ( "SemSegCsurka", "usesrg", false );
+
+	useregions = conf->gB ( "SemSegCsurka", "useregions", true );
+	savesteps = conf->gB ( "SemSegCsurka", "savesteps", true );
+	bool usegcopt = conf->gB ( "SemSegCsurka", "usegcopt", false );
+
+	bestclasses = conf->gI ( "SemSegCsurka", "bestclasses", 0 );
+
+	smoothhl = conf->gB ( "SemSegCsurka", "smoothhl", false );
+	smoothfactor = conf->gD ( "SemSegCsurka", "smoothfactor", 1.0 );
+	
+	usecolorfeats = conf->gB("SemSegCsurka", "usecolorfeats", false);
+	
+	string rsMethod = conf->gS("SemSegCsurka", "segmentation", "meanshift");
+	
+	
+	
+	g = NULL;
+	k = NULL;
+	relloc = NULL;
+	srg = NULL;
+	gcopt = NULL;
+
+	if ( !useregions && ( userellocprior || usesrg ) )
+	{
+		cerr << "relative location priors and super region growing are just supported in combination with useregions" << endl;
+		exit ( 1 );
+	}
+
+	if ( usepca )
+		pca = PCA ( dim );
+
+	RegionSegmentationMethod * tmpseg;
+	if(rsMethod == "meanshift")
+	    tmpseg = new RSMeanShift ( conf );
+	else
+	    tmpseg = new RSGraphBased(conf);
+	
+	if(save_cache)
+	  seg = new RSCache ( conf, tmpseg );
+	else
+	  seg = tmpseg;
+
+	if ( userellocprior )
+		relloc = new RelativeLocationPrior ( conf );
+	else
+		relloc = NULL;
+
+	if ( usesrg )
+		srg = new PPSuperregion ( conf );
+	else
+		srg = NULL;
+
+	if ( usegcopt )
+		gcopt = new PPGraphCut ( conf );
+	else
+		gcopt = NULL;
+
+	classifier = NULL;
+	vclassifier = NULL;
+	if ( cname == "RandomForests" )
+		classifier = new FPCRandomForests ( conf, "ClassifierForest" );
+	else if ( cname == "SMLR" )
+		classifier = new FPCSMLR ( conf, "ClassifierSMLR" );
+	else
+		vclassifier = CSGeneric::selectVecClassifier ( conf, "main" );
+	//classifier = new FPCSparseMultinomialLogisticRegression(conf, "ClassifierSMLR");
+
+	if(classifier != NULL)
+		classifier->setMaxClassNo ( classNames->getMaxClassno() );
+	else
+		vclassifier->setMaxClassNo ( classNames->getMaxClassno() );
+
+	cn = md->getClassNames ( "train" );
+
+	if ( read_cache )
+	{
+		fprintf ( stderr, "SemSegCsurka:: Reading classifier data from %s\n", ( cache+"/fpcrf.data" ).c_str() );
+
+		if(classifier != NULL)
+			classifier->read ( cache+"/fpcrf.data" );
+		else
+			vclassifier->read ( cache+"/veccl.data" );
+
+		if ( usepca )
+		{
+			std::string filename = cache + "/pca";
+			pca.read ( filename );
+		}
+
+		if ( usegmm )
+		{
+			g = new GMM ( conf, gaussians );
+
+			if ( !g->loadData ( cache+"/gmm" ) )
+			{
+				cerr << "SemSegCsurka:: no gmm file found" << endl;
+				exit ( -1 );
+			}
+		}
+		else{ g = NULL; }
+
+		if ( usekmeans )
+		{
+			k = new KMeansOnline ( gaussians );
+		}
+
+		fprintf ( stderr, "SemSegCsurka:: successfully read\n" );
+
+		std::string filename = cache + "/rlp";
+
+		FILE *value;
+		value = fopen ( filename.c_str(),"r" );
+
+		if ( value==NULL )
+		{
+			trainpostprocess ( md );
+		}
+		else
+		{
+			if ( userellocprior )
+			{
+				relloc->read ( filename );
+			}
+		}
+
+		filename = cache + "/srg";
+
+		value = fopen ( filename.c_str(),"r" );
+
+		if ( value==NULL )
+		{
+			trainpostprocess ( md );
+		}
+		else
+		{
+			if ( srg != NULL )
+			{
+				srg->read ( filename );
+			}
+		}
+	}
+	else
+	{
+		train ( md );
+	}
+}
+
+SemSegCsurka::~SemSegCsurka()
+{
+	// clean-up
+	if ( classifier != NULL )
+		delete classifier;
+	if( vclassifier !=NULL)
+		delete vclassifier;
+	if ( seg != NULL )
+		delete seg;
+
+	g = NULL;
+	if ( g != NULL )
+		delete g;
+}
+
+void SemSegCsurka::normalize(Examples &ex)
+{
+	assert(ex.size() > 0);
+	if(vecmin.size() == 0)
+	{
+		for(int j = 0; j < (int)ex[0].second.vec->size(); j++)
+		{
+			double maxv = -numeric_limits<int>::max();
+			double minv = numeric_limits<int>::max();
+			for(int i = 0; i < (int)ex.size(); i++)
+			{
+				maxv = std::max(maxv,(*ex[i].second.vec)[j]);
+				minv = std::min(minv,(*ex[i].second.vec)[j]);
+			}
+			vecmin.push_back(minv);
+			vecmax.push_back(maxv);
+		}
+	}
+	for(int i = 0; i < (int)ex.size(); i++)
+	{
+		for(int j = 0; j < (int)ex[i].second.vec->size(); j++)
+		{
+			(*ex[i].second.vec)[j] = ((*ex[i].second.vec)[j]-vecmin[j])/(vecmax[j]-vecmin[j]);
+		}
+	}
+	return;
+}
+
+void SemSegCsurka::convertLowToHigh ( Examples &ex, double reduce )
+{
+	cout << "converting low-level features to high-level features" << endl;
+	
+	if ( reduce >= 1.0 )
+	{
+		for ( int i = 0; i < ( int ) ex.size(); i++ )
+		{
+			SparseVector *f = new SparseVector();
+
+			if ( usekmeans )
+			{
+				k->getDist ( *ex[i].second.vec, *f, kmeansfeat, kmeanshard );
+			}
+			else
+			{
+				if ( usefisher )
+					g->getFisher ( *ex[i].second.vec, *f );
+				else
+					g->getProbs ( *ex[i].second.vec, *f );
+			}
+			delete ex[i].second.vec;
+
+			ex[i].second.vec = NULL;
+			ex[i].second.svec = f;
+		}
+	}
+	else
+	{
+		srand ( time ( NULL ) );
+
+		vector<bool> del(ex.size(), false);
+		cout << "Example size old " << ex.size() << endl;
+
+#pragma omp parallel for
+		for ( int i = 0; i < ( int ) ex.size(); i++ )
+		{
+			double rval = ( double ) rand() / ( double ) RAND_MAX;
+			if ( rval < reduce )
+			{
+				SparseVector *f = new SparseVector();
+
+				if ( usekmeans )
+					k->getDist ( *ex[i].second.vec, *f, kmeansfeat, kmeanshard );
+				else
+				{
+					if ( usefisher )
+						g->getFisher ( *ex[i].second.vec, *f );
+					else
+						g->getProbs ( *ex[i].second.vec, *f );
+				}
+
+				delete ex[i].second.vec;
+				ex[i].second.vec = NULL;
+				ex[i].second.svec = f;
+			}
+			else
+			{
+				del[i] = true;
+			}
+		}
+		for ( int i = ( int ) del.size()-1; i >= 0; i-- )
+		{
+		  if(del[i])
+		  {
+			ex.erase ( ex.begin() +i);
+		  }
+		}
+		cerr << "Example size new " << ex.size() << endl;
+	}
+	cerr << "converting low-level features to high-level features finished" << endl;
+}
+
+void SemSegCsurka::smoothHL ( Examples ex )
+{
+
+	if ( !smoothhl )
+		return;
+	assert ( ex.size() > 1 );
+
+	long long int minx = numeric_limits<long long int>::max();
+	long long int miny = numeric_limits<long long int>::max();
+	long long int maxx = -numeric_limits<long long int>::max();
+	long long int maxy = -numeric_limits<long long int>::max();
+	long long int distx = numeric_limits<long long int>::max();
+	long long int disty = numeric_limits<long long int>::max();
+
+	set<double> scales;
+	for ( int i = 0; i < (int)ex.size(); i++ )
+	{
+		scales.insert ( ex[i].second.scale );
+	}
+
+	map<double, int> scalepos;
+	int it = 0;
+
+	for ( set<double>::const_iterator iter = scales.begin(); iter != scales.end();    ++iter, ++it )
+	{
+		scalepos.insert(make_pair(*iter, it));
+	}
+
+	for ( int i = 0; i < (int)ex.size(); i++ )
+	{
+		if ( minx < numeric_limits<int>::max() && ex[i].second.x - minx > 0 )
+			distx = std::min ( distx, ex[i].second.x - minx );
+		if ( miny < numeric_limits<int>::max() && ex[i].second.y - miny > 0 )
+			disty = std::min ( disty, ex[i].second.y - miny );
+		minx = std::min ( (long long int)ex[i].second.x, minx );
+		maxx = std::max ( (long long int)ex[i].second.x, maxx );
+		miny = std::min ( (long long int)ex[i].second.y, miny );
+		maxy = std::max ( (long long int)ex[i].second.y, maxy );
+	}
+
+	distx = abs ( distx );
+
+	int xsize = ( maxx - minx ) /distx +1;
+	int ysize = ( maxy - miny ) /disty +1;
+	double valx = ( ( double ) xsize-1 ) / ( double ) ( maxx - minx );
+	double valy = ( ( double ) ysize-1 ) / ( double ) ( maxy - miny );
+
+	//double sigma = smoothfactor;
+	double sigma = std::max(xsize,ysize) * smoothfactor;
+	//double sigma = 0.2;
+	cout << "sigma1: " << sigma << endl;
+	
+	vector<NICE::FloatImage> imgv;
+	vector<NICE::FloatImage> gaussImgv;
+	for(int i = 0; i < (int)scalepos.size(); i++)
+	{
+		NICE::FloatImage img( xsize, ysize);
+		NICE::FloatImage gaussImg( xsize, ysize);
+		imgv.push_back(img);
+		gaussImgv.push_back(gaussImg);
+	}
+
+	for ( int d = 0; d < ex[0].second.svec->getDim(); d++ )
+	{
+		//TODO: max und min dynamisches bestimmen
+
+		for(int i = 0; i < (int)scalepos.size(); i++)
+		{
+			imgv[i].set(0.0);
+			gaussImgv[i].set(0.0);
+		}
+		
+		for ( int i = 0; i < (int)ex.size(); i++ )
+		{
+			int xpos = ( ex[i].second.x - minx ) *valx;
+			int ypos = ( ex[i].second.y - miny ) *valy;
+			
+			double val = ex[i].second.svec->get ( d );
+			// refactor-nice.pl: check this substitution
+			// old: PutValD ( imgv[scalepos[ex[i].second.scale]],xpos,ypos,val);
+			imgv[scalepos[ex[i].second.scale]].setPixel(xpos,ypos,val);
+		}
+		
+		/*
+		for(int y = 0; y < ysize; y++)
+		{
+			for(int x = 0; x < xsize; x++)
+			{
+				// refactor-nice.pl: check this substitution
+				// old: double val = GetValD(img,x,y);
+				double val = img.getPixel(x,y);
+				double  c = 0.0;
+				if(val == 0.0)
+				{
+					if(x > 0)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x-1,y);
+						val+=img.getPixel(x-1,y);
+						c+=1.0;
+					}
+					if(y > 0)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x,y-1);
+						val+=img.getPixel(x,y-1);
+						c+=1.0;
+					}
+					if(x < xsize-1)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x+1,y);
+						val+=img.getPixel(x+1,y);
+						c+=1.0;
+					}
+					if(y < ysize-1)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x,y+1);
+						val+=img.getPixel(x,y+1);
+						c+=1.0;
+					}
+					// refactor-nice.pl: check this substitution
+					// old: PutValD(img,x,y,val/c);
+					img.setPixel(x,y,val/c);
+				}
+			}
+		}*/
+
+		for(int i = 0; i < (int)imgv.size(); i++)
+			filterGaussSigmaApproximate<float,float,float>( imgv[i], sigma, &gaussImgv[i] );	
+		
+		for ( int i = 0; i < (int)ex.size(); i++ )
+		{
+			int xpos = ( ex[i].second.x - minx ) *valx;
+			int ypos = ( ex[i].second.y - miny ) *valy;
+			// refactor-nice.pl: check this substitution
+			// old: double val = GetValD ( gaussImgv[scalepos[ex[i].second.scale]], xpos, ypos );
+			double val = gaussImgv[scalepos[ex[i].second.scale]].getPixel(xpos,ypos);
+
+			if ( fabs ( val ) < 1e-7 )
+			{
+				if ( ex[i].second.svec->get ( d ) != 0.0 )
+				{
+					ex[i].second.svec->erase ( d );
+				}
+			}
+			else
+			{
+				( *ex[i].second.svec ) [d] = val;
+			}
+		}
+	}
+}
+
+void SemSegCsurka::initializePCA ( Examples &ex )
+{
+#ifdef DEBUG
+	cerr << "start computing pca" << endl;
+#endif
+	std::string filename = cache + "/pca";
+	FILE *value;
+	value = fopen ( filename.c_str(),"r" );
+
+	if ( value==NULL || calcpca )
+	{
+		srand ( time ( NULL ) );
+
+		int featsize = ( int ) ex.size();
+		int maxfeatures = dim*10;
+		int olddim = ex[0].second.vec->size();
+
+		maxfeatures = std::min ( maxfeatures, featsize );
+
+		NICE::Matrix features ( maxfeatures, olddim );
+
+		for ( int i = 0; i < maxfeatures; i++ )
+		{
+			int k = rand() % featsize;
+
+			int vsize = (int)ex[k].second.vec->size();
+			for(int j = 0; j < vsize; j++)
+			{
+				features(i,j) = (*( ex[k].second.vec))[j];
+			}
+		}
+		pca.calculateBasis ( features, dim, 1 );
+
+		if ( save_cache )
+			pca.save ( filename );
+
+	}
+	else
+	{
+		cout << "readpca: " << filename << endl;
+		pca.read ( filename );
+		cout << "end" << endl;
+	}
+#ifdef DEBUG
+	cerr << "finished computing pca" << endl;
+#endif
+}
+
+void SemSegCsurka::doPCA ( Examples &ex )
+{
+	cout << "converting features using pca starts" << endl;
+
+	std::string savedir = cname = conf->gS ( "cache", "root", "/dev/null/" );
+	std::string shortf = ex.filename;
+	if ( string::npos != ex.filename.rfind ( "/" ) )
+		shortf = ex.filename.substr ( ex.filename.rfind ( "/" ) );
+	std::string filename = savedir+"/pcasave/"+shortf;
+	std::string syscall = "mkdir "+savedir+"/pcasave";
+	system ( syscall.c_str() );
+	cout << "filename: " << filename << endl;
+	
+	if ( !FileMgt::fileExists(filename) || calcpca )
+	{
+		ofstream ofStream;
+
+		//Opens the file binary
+		ofStream.open ( filename.c_str(),fstream::out | fstream::binary );
+
+		for ( int k = 0; k < ( int ) ex.size(); k++ )
+		{
+			NICE::Vector tmp = pca.getFeatureVector ( * ( ex[k].second.vec ), true );
+			delete ex[k].second.vec;
+			for ( int d = 0; d < (int)tmp.size(); d++ )
+				ofStream.write ( ( char* ) &tmp[d], sizeof ( double ) );
+			ex[k].second.vec = new NICE::Vector ( tmp );
+		}
+		ofStream.close();
+		cout << endl;
+	}
+	else
+	{
+		ifstream ifStream;
+		ifStream.open ( filename.c_str(),std::fstream::in | std::fstream::binary );
+		for ( int k = 0; k < ( int ) ex.size(); k++ )
+		{
+			NICE::Vector tmp = NICE::Vector ( dim );
+			delete ex[k].second.vec;
+			for ( int d = 0; d < dim; d++ )
+				ifStream.read ( ( char* ) &tmp[d], sizeof ( double ) );
+			ex[k].second.vec = new NICE::Vector ( tmp );
+		}
+
+		ifStream.close();
+	}
+	cout << "converting features using pca finished" << endl;
+}
+
+
+
+
+void SemSegCsurka::train ( const MultiDataset *md )
+{
+
+	/*die einzelnen Trainingsschritte
+	1. auf allen Trainingsbilder SIFT Merkmale an den Gitterpunkten bei allen Auflösungen bestimmen
+	2. PCA anwenden
+	3. aus diesen ein GMM erstellen
+	4. für jedes SIFT-Merkmal einen Vektor erstellen, der an der Stelle i die Wahrscheinlichkeit enthällt zur Verteilung i des GMM, Zur Zeit mit BoV-Alternative durch Moosman06 erledigt
+	5. diese Vektoren in einem diskriminitativen Klassifikator ( z.B. SLR oder Randomized Forests) zusammen mit ihrer Klassenzugehörigkeit anlernen
+	*/
+#ifdef DEBUG
+	cerr << "SemSegCsurka:: training starts" << endl;
+#endif
+
+	Examples examples;
+	examples.filename = "training";
+
+
+	// Welche Opponentsift Implementierung soll genutzt werden ?
+	LocalFeatureRepresentation *cSIFT = NULL;
+	LocalFeatureRepresentation *writeFeats = NULL;
+	LocalFeatureRepresentation *readFeats = NULL;
+	LocalFeatureRepresentation *getFeats = NULL;
+	
+	if( opSiftImpl == "NICE" )
+	{
+		cSIFT = new LFonHSG( conf, "HSGtrain" );
+	}
+	else if( opSiftImpl == "VANDESANDE" )
+	{
+		// the used features
+		cSIFT = new LFColorSande ( conf, "LFColorSandeTrain" );
+	}
+	else
+	{
+		fthrow(Exception, "feattype: %s not yet supported" << opSiftImpl );
+	}
+	
+	getFeats = cSIFT;
+	
+	if(writefeat)
+	{
+		// write the features to a file, if there isn't any to read
+		writeFeats = new LFWriteCache ( conf, cSIFT );
+		getFeats = writeFeats;
+	}
+	
+	if(readfeat)
+	{
+		// read the features from a file
+		if(writefeat)
+		{
+			readFeats = new LFReadCache ( conf, writeFeats,-1 );
+		}
+		else
+		{
+			readFeats = new LFReadCache ( conf, cSIFT,-1 );
+		}
+		getFeats = readFeats;
+	}
+	
+	// additional Colorfeatures
+	LFColorWeijer lcw(conf);
+
+	int lfdimension = -1;
+
+	const LabeledSet train = * ( *md ) ["train"];
+	const LabeledSet *trainp = &train;
+
+	////////////////////////
+	// Merkmale berechnen //
+	////////////////////////
+
+	set<int> forbidden_classes;
+
+	std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
+	if ( forbidden_classes_s == "" )
+	{
+		forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
+	}
+	cn.getSelection ( forbidden_classes_s, forbidden_classes );
+	cerr << "forbidden: " << forbidden_classes_s << endl;
+
+	ProgressBar pb ( "Local Feature Extraction" );
+	pb.show();
+
+	int imgnb = 0;
+
+	LOOP_ALL_S ( *trainp )
+	{
+		//EACH_S(classno, currentFile);
+		EACH_INFO ( classno,info );
+
+		pb.update ( trainp->count() );
+
+		NICE::ColorImage img;
+
+		std::string currentFile = info.img();
+
+		CachedExample *ce = new CachedExample ( currentFile );
+
+		const LocalizationResult *locResult = info.localization();
+		if ( locResult->size() <= 0 )
+		{
+			fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+			          currentFile.c_str() );
+			continue;
+		}
+
+		fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+		          currentFile.c_str() );
+
+		int xsize, ysize;
+		ce->getImageSize ( xsize, ysize );
+
+		NICE::Image pixelLabels (xsize, ysize);
+		pixelLabels.set(0);
+		locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+		try {
+			img = ColorImage(currentFile);
+		} catch (Exception) {
+			cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+			continue;
+		}
+		
+		Globals::setCurrentImgFN ( currentFile );
+
+		VVector features;
+		VVector cfeatures;
+		VVector positions;
+
+		NICE::ColorImage cimg(currentFile);
+		
+		getFeats->extractFeatures ( img, features, positions );
+
+#ifdef DEBUG_CSURK
+		cout << "[log] SemSegCsruka::train -> " << currentFile << " an " << positions.size() << " Positionen wurden Features (Anz = " << features.size() << ") " << endl; cout << "mit einer Dimension von " << features[ 0].size() << " extrahiert." << endl;
+#endif
+
+		if(usecolorfeats)
+			lcw.getDescriptors(cimg, cfeatures, positions);
+
+		int j = 0;
+		
+		for ( VVector::const_iterator i = features.begin();
+		        i != features.end();
+		        i++,j++ )
+		{
+			const NICE::Vector & x = *i;
+			classno = pixelLabels.getPixel(( int )positions[j][0], ( int )positions[j][1] );
+
+			if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+				continue;
+
+			if ( lfdimension < 0 )
+				lfdimension = ( int ) x.size();
+			else
+				assert ( lfdimension == ( int ) x.size() );
+
+			NICE::Vector *v = new NICE::Vector ( x );
+			
+			if(usecolorfeats && !usepca)
+				v->append(cfeatures[j]);
+
+			Example example ( v );
+			example.position = imgnb;
+			examples.push_back (
+			    pair<int, Example> ( classno, example ) );
+		}
+		features.clear();
+		positions.clear();
+		delete ce;
+		imgnb++;
+	}
+
+	pb.hide();
+
+	//////////////////
+	// PCA anwenden //
+	//////////////////
+
+	if ( usepca )
+	{
+		if ( !read_cache )
+		{
+			initializePCA ( examples );
+		}
+		doPCA ( examples );
+		lfdimension = dim;
+	}
+
+	/////////////////////////////////////////////////////
+	// Low-Level Features in High-Level transformieren //
+	/////////////////////////////////////////////////////
+
+	int hlfdimension = lfdimension;
+
+	if(norm)
+		normalize(examples);
+	
+	if ( usegmm )
+	{
+		if(!usepca && !norm)
+			normalize(examples);
+		g = new GMM ( conf,gaussians );
+
+		if ( dogmm || !g->loadData ( cache+"/gmm" ) )
+		{
+			g->computeMixture ( examples );
+			if ( save_cache )
+				g->saveData ( cache+"/gmm" );
+		}
+
+		hlfdimension = gaussians;
+
+		if ( usefisher )
+			hlfdimension = gaussians*2*dim;
+	}
+
+	if ( usekmeans )
+	{
+		if(!usepca || norm)
+			normalize(examples);
+		k = new KMeansOnline ( gaussians );
+
+		k->cluster ( examples );
+
+		hlfdimension = gaussians;
+	}
+
+	if ( usekmeans || usegmm )
+	{
+		examples.clear();
+		pb.reset("Local Feature Extraction");
+		lfdimension = -1;
+		pb.update ( trainp->count() );
+		LOOP_ALL_S ( *trainp )
+		{
+			EACH_INFO ( classno,info );
+
+			pb.update ( trainp->count() );
+
+			NICE::ColorImage img;
+
+			std::string currentFile = info.img();
+
+			CachedExample *ce = new CachedExample ( currentFile );
+			
+			const LocalizationResult *locResult = info.localization();
+			if ( locResult->size() <= 0 )
+			{
+				fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+				          currentFile.c_str() );
+				continue;
+			}
+
+			fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+			          currentFile.c_str() );
+
+			int xsize, ysize;
+			ce->getImageSize ( xsize, ysize );
+
+			NICE::Image pixelLabels (xsize, ysize);
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			try{
+				img = ColorImage(currentFile);
+			}
+			catch (Exception){
+				cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+				continue;
+			}
+
+			Globals::setCurrentImgFN ( currentFile );
+
+			VVector features;
+			VVector cfeatures;
+			VVector positions;
+
+			NICE::ColorImage cimg(currentFile);
+
+			getFeats->extractFeatures ( img, features, positions );
+
+			if(usecolorfeats)
+				lcw.getDescriptors(cimg, cfeatures, positions);
+
+			int j = 0;
+
+			Examples tmpex;
+
+			for ( VVector::const_iterator i = features.begin();
+			        i != features.end();
+			        i++,j++ )
+			{
+				
+				const NICE::Vector & x = *i;
+
+				classno = pixelLabels.getPixel(( int )positions[j][0], ( int )positions[j][1] );
+
+				if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+					continue;
+				
+				if ( lfdimension < 0 )
+					lfdimension = ( int ) x.size();
+				else
+					assert ( lfdimension == ( int ) x.size() );
+				
+				NICE::Vector *v = new NICE::Vector ( x );
+				if(usecolorfeats)
+					v->append(cfeatures[j]);
+
+				Example example ( v );
+				example.position = imgnb;
+				example.x = ( int ) positions[j][0];
+				example.y = ( int )positions[j][1];
+				example.scale = positions[j][2];
+
+				tmpex.push_back ( pair<int, Example> ( classno, example ) );
+			}
+			tmpex.filename = currentFile;
+			if ( usepca )
+			{
+				doPCA ( tmpex );
+			}
+
+			convertLowToHigh ( tmpex, anteil );
+
+			smoothHL ( tmpex );
+			
+			for ( int i = 0; i < (int)tmpex.size(); i++ )
+			{
+				examples.push_back ( pair<int, Example> ( tmpex[i].first, tmpex[i].second ) );
+			}
+
+			tmpex.clear();
+
+			features.clear();
+			positions.clear();
+			delete ce;
+			imgnb++;
+			
+		}
+
+		pb.hide();
+	}
+	////////////////////////////
+	// Klassifikator anlernen //
+	////////////////////////////
+	FeaturePool fp;
+	
+	Feature *f;
+
+	if ( usegmm || usekmeans )
+		f = new SparseVectorFeature ( hlfdimension );
+	else
+		f = new VectorFeature ( hlfdimension );
+	
+	f->explode ( fp );
+	delete f;
+	
+	if(usecolorfeats && !( usekmeans || usegmm ))
+	{
+		int dimension = hlfdimension+11;
+		for ( int i = hlfdimension ; i < dimension ; i++ )
+		{
+			VectorFeature *f = new VectorFeature ( dimension );
+			f->feature_index = i;
+			fp.addFeature(f, 1.0 / dimension);
+		}
+	}
+/*
+cout << "train classifier" << endl;
+fp.store(cout);
+getchar();
+for(int z = 0; z < examples.size(); z++)
+{
+cout << "examples.size() " << examples.size() << endl;
+cout << "class: " << examples[z].first << endl;
+	cout << *examples[z].second.vec << endl;
+	getchar();
+}*/
+	if(classifier != NULL)
+		classifier->train ( fp, examples );
+	else
+	{
+		LabeledSetVector lvec;
+		convertExamplesToLSet(examples, lvec);
+		vclassifier->teach(lvec);
+		if(usegmm)
+			convertLSetToSparseExamples(examples, lvec);
+		else
+			convertLSetToExamples(examples, lvec);
+		vclassifier->finishTeaching();
+	}
+
+	fp.destroy();
+
+	if ( save_cache )
+	{
+		if(classifier != NULL)
+			classifier->save ( cache+"/fpcrf.data" );
+		else
+			vclassifier->save ( cache+"/veccl.data" );
+	}
+
+	////////////
+	//clean up//
+	////////////
+	for ( int i = 0; i < ( int ) examples.size(); i++ )
+	{
+		examples[i].second.clean();
+	}
+	examples.clear();
+
+	if(cSIFT != NULL)
+		delete cSIFT;
+	if(writeFeats != NULL)
+		delete writeFeats;
+	if(readFeats != NULL)
+		delete readFeats;
+	getFeats = NULL;
+
+	trainpostprocess ( md );
+
+	cerr << "SemSeg training finished" << endl;
+}
+
+void SemSegCsurka::trainpostprocess ( const MultiDataset *md )
+{
+	cout<< "start postprocess" << endl;
+	////////////////////////////
+	// Postprocess trainieren //
+	////////////////////////////
+	const LabeledSet train = * ( *md ) ["train"];
+	const LabeledSet *trainp = &train;
+
+	if ( userellocprior || srg != NULL || gcopt !=NULL )
+	{
+		clog << "[log] SemSegCsurka::trainpostprocess: if ( userellocprior || srg != NULL || gcopt !=NULL )" << endl;
+		if ( userellocprior )
+			relloc->setClassNo ( cn.numClasses() );
+
+		if ( gcopt !=NULL )
+		{
+			gcopt->setClassNo ( cn.numClasses() );
+		}
+
+		ProgressBar pb ( "learn relative location prior maps" );
+		pb.show();
+		LOOP_ALL_S ( *trainp ) // für alle Bilder den ersten Klassifikationsschritt durchführen um den zweiten Klassifikator anzutrainieren
+		{
+			EACH_INFO ( classno,info );
+			
+			pb.update ( trainp->count() );
+
+			NICE::ColorImage img;
+
+			std::string currentFile = info.img();
+			Globals::setCurrentImgFN ( currentFile );
+			CachedExample *ce = new CachedExample ( currentFile );
+
+			const LocalizationResult *locResult = info.localization();
+			if ( locResult->size() <= 0 )
+			{
+				fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+				          currentFile.c_str() );
+				continue;
+			}
+
+			fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+			          currentFile.c_str() );
+
+			int xsize, ysize;
+			ce->getImageSize ( xsize, ysize );
+
+			NICE::Image pixelLabels (xsize, ysize);
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			try{
+				img = ColorImage(currentFile);
+			}
+			catch(Exception)
+			{
+				cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+				continue;
+			}
+
+			//Regionen ermitteln
+			NICE::Matrix mask;
+
+			int regionsize = seg->segRegions ( img, mask );
+#ifdef DEBUG_CSURK
+			Image overlay(img.width(), img.height());
+
+			double maxval = 0.0;
+
+			for(int y = 0; y < img.height(); y++)
+			{
+				for(int x = 0; x < img.width(); x++)
+				{
+					int val = ((int)mask(x,y)+1)%256;
+					overlay.setPixel(x,y,val);
+					maxval = std::max(mask(x,y), maxval);
+				}
+			}
+
+			cout << maxval << " different regions found" << endl;
+
+			NICE::showImageOverlay ( img, overlay, "Segmentation Result" );
+#endif
+
+			Examples regions;
+
+			vector<vector<int> > hists;
+
+			for ( int i = 0; i < regionsize; i++ )
+			{
+				Example tmp;
+				regions.push_back ( pair<int, Example> ( 0, tmp ) );
+				vector<int> hist ( cn.numClasses(), 0 );
+				hists.push_back ( hist );
+			}
+
+			for ( int x = 0; x < xsize; x++ )
+			{
+				for ( int y = 0; y < ysize; y++ )
+				{
+					int numb = mask(x,y);
+					regions[numb].second.x += x;
+					regions[numb].second.y += y;
+					regions[numb].second.weight += 1.0;
+					hists[numb][pixelLabels.getPixel(x,y)]++;
+				}
+			}
+
+			for ( int i = 0; i < regionsize; i++ )
+			{
+				regions[i].second.x /= ( int ) regions[i].second.weight;
+				regions[i].second.y /= ( int ) regions[i].second.weight;
+
+				int maxval = -numeric_limits<int>::max();
+				int maxpos = -1;
+				int secondpos = -1;
+				for ( int k = 0; k < ( int ) hists[i].size(); k++ )
+				{
+					if ( maxval <hists[i][k] )
+					{
+						maxval = hists[i][k];
+						secondpos = maxpos;
+						maxpos = k;
+					}
+				}
+
+				if ( cn.text ( maxpos ) == "various" )
+					regions[i].first = secondpos;
+				else
+					regions[i].first = maxpos;
+
+			}
+			if ( userellocprior )
+				relloc->trainPriorsMaps ( regions, xsize, ysize );
+
+			if ( srg != NULL )
+				srg->trainShape ( regions, mask );
+
+			if ( gcopt !=NULL )
+				gcopt->trainImage ( regions, mask );
+
+			delete ce;
+
+		}
+		pb.hide();
+		if ( userellocprior )
+			relloc->finishPriorsMaps ( cn );
+
+		if ( srg != NULL )
+			srg->finishShape ( cn );
+
+		if ( gcopt != NULL )
+			gcopt->finishPP ( cn );
+	}
+	if ( userellocprior )
+	{
+		clog << "[log] SemSegCsurka::trainpostprocess: if ( userellocprior )" << endl;
+		ProgressBar pb ( "learn relative location classifier" );
+		pb.show();
+
+		int nummer = 0;
+		LOOP_ALL_S ( *trainp ) // für alle Bilder den ersten Klassifikationsschritt durchführen um den zweiten Klassifikator anzutrainieren
+		{
+			//EACH_S(classno, currentFile);
+			EACH_INFO ( classno,info );
+			nummer++;
+			pb.update ( trainp->count() );
+
+			NICE::Image img;
+			std::string currentFile = info.img();
+
+			CachedExample *ce = new CachedExample ( currentFile );
+
+			const LocalizationResult *locResult = info.localization();
+			if ( locResult->size() <= 0 )
+			{
+				fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+				          currentFile.c_str() );
+				continue;
+			}
+
+			fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+			          currentFile.c_str() );
+
+			int xsize, ysize;
+			ce->getImageSize ( xsize, ysize );
+
+			NICE::Image pixelLabels (xsize, ysize);
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			try{
+				img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+			}
+			catch(Exception)
+			{
+				cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+				continue;
+			}
+			Globals::setCurrentImgFN ( currentFile );
+
+			NICE::Image segresult;
+
+			GenericImage<double> probabilities ( xsize,ysize,classno,true );
+
+			Examples regions;
+
+			NICE::Matrix mask;
+
+			if ( savesteps )
+			{
+				std::ostringstream s1;
+				s1 << cache << "/rlpsave/" << nummer;
+
+				std::string filename = s1.str();
+				s1 << ".probs";
+				
+				std::string fn2 = s1.str();
+
+				FILE *file;
+				file = fopen ( filename.c_str(),"r" );
+
+				if ( file==NULL )
+				{
+					//berechnen
+					classifyregions ( ce, segresult, probabilities, regions, mask );
+					//schreiben
+					ofstream fout ( filename.c_str(), ios::app );
+					fout << regions.size() << endl;
+					for ( int i = 0; i < ( int ) regions.size(); i++ )
+					{
+						regions[i].second.store ( fout );
+						fout << regions[i].first << endl;
+					}
+					fout.close();
+					probabilities.store ( fn2 );
+				}
+				else
+				{
+					//lesen
+					ifstream fin ( filename.c_str() );
+					int size;
+					fin >> size;
+
+					for ( int i = 0; i < size; i++ )
+					{
+						Example ex;
+						ex.restore ( fin );
+						int tmp;
+						fin >> tmp;
+						regions.push_back ( pair<int, Example> ( tmp, ex ) );
+					}
+
+					fin.close();
+
+					probabilities.restore ( fn2 );
+				}
+			}
+			else
+			{
+				classifyregions ( ce, segresult, probabilities, regions, mask );
+			}
+
+			relloc->trainClassifier ( regions, probabilities );
+
+			delete ce;
+
+		}
+		relloc->finishClassifier();
+		pb.hide();
+
+		relloc->save ( cache+"/rlp" );
+	}
+	cout << "finished postprocess" << endl;
+}
+
+void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult, GenericImage<double> & probabilities, Examples &Regionen, NICE::Matrix & mask )
+{
+	/* die einzelnen Testschritte:
+	1.x  auf dem Testbild alle SIFT Merkmale an den Gitterpunkten bei allen Auflösungen bestimmen
+	2.x  für jedes SIFT-Merkmal einen Vektor erstellen, der an der Stelle i die Wahrscheinlichkeit enthällt zur Verteilung i des GMM
+	3.x diese Vektoren klassifizieren, so dass für jede Klasse die Wahrscheinlichkeit gespeichert wird
+	4.x für jeden Pixel die Wahrscheinlichkeiten mitteln aus allen Patches, in denen der Pixel vorkommt
+	5.x das Originalbild in homogene Bereiche segmentieren
+	6.x die homogenen Bereiche bekommen die gemittelten Wahrscheinlichkeiten ihrer Pixel
+	7. (einzelne Klassen mit einem globalen Klassifikator ausschließen)
+	8.x jeder Pixel bekommt die Klasse seiner Region zugeordnet
+	*/
+
+	clog << "[log] SemSegCsruka::classifyregions" << endl;
+	int xsize, ysize;
+
+	ce->getImageSize ( xsize, ysize );
+
+	probabilities.reInit ( xsize, ysize, classNames->getMaxClassno() +1, true/*allocMem*/ );
+	clog << "[log] SemSegCsruka::classifyregions: probabilities.numChannels = " << probabilities.numChannels << endl;
+
+	segresult.resize(xsize, ysize);
+
+	Examples pce;
+
+	// Welche Opponentsift Implementierung soll genutzt werden ?
+	LocalFeatureRepresentation *cSIFT = NULL;
+	LocalFeatureRepresentation *writeFeats = NULL;
+	LocalFeatureRepresentation *readFeats = NULL;
+	LocalFeatureRepresentation *getFeats = NULL;
+	
+	
+	if( opSiftImpl == "NICE" )
+	{
+		cSIFT = new LFonHSG( conf, "HSGtrain" );
+	}
+	else if( opSiftImpl == "VANDESANDE" )
+	{
+		// the used features
+		cSIFT = new LFColorSande ( conf, "LFColorSandeTrain" );
+	}
+	else
+	{
+		fthrow(Exception, "feattype: %s not yet supported" << opSiftImpl );
+	}
+	
+	getFeats = cSIFT;
+	
+	if(writefeat)
+	{
+		// write the features to a file, if there isn't any to read
+		writeFeats = new LFWriteCache ( conf, cSIFT );
+		getFeats = writeFeats;
+	}
+	
+	if(readfeat)
+	{
+		// read the features from a file
+		if(writefeat)
+		{
+			readFeats = new LFReadCache ( conf, writeFeats,-1 );
+		}
+		else
+		{
+			readFeats = new LFReadCache ( conf, cSIFT,-1 );
+		}
+		getFeats = readFeats;
+	}
+	
+	
+	// additional Colorfeatures
+	LFColorWeijer lcw(conf);
+	
+	NICE::ColorImage img;
+
+	std::string currentFile = Globals::getCurrentImgFN();
+	
+	try
+	{
+		img = ColorImage(currentFile);
+	}
+	catch(Exception)
+	{
+		cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+	}
+
+	VVector features;
+	VVector cfeatures;
+	VVector positions;
+
+	getFeats->extractFeatures ( img, features, positions ); 
+
+	if(usecolorfeats)
+		lcw.getDescriptors(img, cfeatures, positions);
+
+	set<double> scales;
+
+	int j = 0;
+	int lfdimension = -1;
+	for ( VVector::const_iterator i = features.begin();
+	        i != features.end();
+	        i++,j++ )
+	{
+		const NICE::Vector & x = *i;
+
+		if ( lfdimension < 0 ) lfdimension = ( int ) x.size();
+		else assert ( lfdimension == ( int ) x.size() );
+
+		NICE::Vector *v = new NICE::Vector ( x );
+
+		if(usecolorfeats)
+			v->append(cfeatures[j]);
+		
+		Example tmp = Example ( v );
+		tmp.x = ( int )positions[j][0];
+		tmp.y = ( int ) positions[j][1];
+		tmp.width = ( int ) ( 16.0*positions[j][2] );
+		tmp.height = tmp.width;
+		tmp.scale = positions[j][2];
+		scales.insert ( positions[j][2] );
+		pce.push_back ( pair<int, Example> ( 0, tmp ) );
+	}
+	
+	//////////////////
+	// PCA anwenden //
+	//////////////////
+	pce.filename = currentFile;
+	if ( usepca )
+	{
+		doPCA ( pce );
+		lfdimension = dim;
+	}
+
+	//////////////////
+	// BoV anwenden //
+	//////////////////
+	if(norm)
+		normalize(pce);
+	if ( usegmm || usekmeans )
+	{
+		if(!usepca && !norm)
+			normalize(pce);
+		convertLowToHigh ( pce );
+		smoothHL ( pce );
+		lfdimension = gaussians;
+	}
+
+	/////////////////////////////////////////
+	// Wahrscheinlichkeitskarten erstellen //
+	/////////////////////////////////////////
+	int klassen = probabilities.numChannels;
+	GenericImage<double> preMap ( xsize,ysize,klassen*scales.size(),true );
+
+	long int offset = 0;
+
+	// initialisieren
+	for ( int y = 0 ; y < ysize ; y++ )
+		for ( int x = 0 ; x < xsize ; x++,offset++ )
+		{
+			// alles zum Hintergrund machen
+			segresult.setPixel(x,y,0);
+			// Die Wahrscheinlichkeitsmaps auf 0 initialisieren
+			for ( int i = 0 ; i < ( int ) probabilities.numChannels; i++ )
+			{
+				probabilities.data[i][offset] = 0.0;
+			}
+			for ( int j = 0; j < ( int ) preMap.numChannels; j++ )
+			{
+				preMap.data[j][offset]=0.0;
+			}
+		}
+
+	// Die Wahrscheinlichkeitsmaps mit den einzelnen Wahrscheinlichkeiten je Skalierung füllen
+	int scalesize = scales.size();
+
+	// Globale Häufigkeiten akkumulieren
+	FullVector fV ( ( int ) probabilities.numChannels );
+
+	for ( int i = 0; i < fV.size(); i++ )
+		fV[i] = 0.0;
+
+	// read allowed classes
+
+	string cndir = conf->gS("SemSegCsurka", "cndir", "");
+	int classes = (int)probabilities.numChannels;
+	vector<int> useclass(classes,1);
+
+	std::vector< std::string > list;
+	StringTools::split (currentFile, '/', list);
+
+	string orgname = list.back();
+	if(cndir != "")
+	{
+	    useclass = vector<int>(classes,0);
+	    ifstream infile((cndir+"/"+orgname+".dat").c_str());
+	    while(!infile.eof() && infile.good())
+	    {
+		int tmp;
+		infile >> tmp;
+		if(tmp >= 0 && tmp < classes)
+		{
+		  useclass[tmp] = 1;
+		}
+	    }
+	}
+	if(classifier != NULL)
+	{
+		clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen: classifier != NULL" << endl;
+#pragma omp parallel for
+		for ( int s = 0; s < scalesize; s++ )
+		{
+#pragma omp parallel for
+			for ( int i = s; i < ( int ) pce.size(); i+=scalesize )
+			{
+				ClassificationResult r = classifier->classify ( pce[i].second );
+				for ( int j = 0 ; j < r.scores.size(); j++ )
+				{
+					if(useclass[j] == 0)
+					  continue;
+					fV[j] += r.scores[j];
+					preMap.set ( pce[i].second.x,pce[i].second.y,r.scores[j],j+s*klassen );
+				}
+			}
+		}
+	}
+	else
+	{
+//#pragma omp parallel for
+		for ( int s = 0; s < scalesize; s++ )
+		{
+//#pragma omp parallel for
+			for ( int i = s; i < ( int ) pce.size(); i+=scalesize )
+			{
+				ClassificationResult r = vclassifier->classify ( *(pce[i].second.vec) );
+				for ( int j = 0 ; j < ( int ) r.scores.size(); j++ )
+				{
+					if(useclass[j] == 0)
+					  continue;
+					fV[j] += r.scores[j];
+					preMap.set ( pce[i].second.x,pce[i].second.y,r.scores[j],j+s*klassen );
+				}
+			}
+		}
+	}
+	vector<double> scalesVec;
+	for ( set<double>::const_iterator iter = scales.begin();
+	        iter != scales.end();
+	        ++iter )
+	{
+		scalesVec.push_back ( *iter );
+	}
+
+
+	// Gaußfiltern
+	clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen -> Gaussfiltern" << endl;
+	for ( int s = 0; s < scalesize; s++ )
+	{
+		double sigma = sigmaweight*16.0*scalesVec[s];
+		cerr << "sigma: " << sigma << endl;
+#pragma omp parallel for
+		for ( int i = 0; i < klassen; i++ )
+		{
+			int pos = i+s*klassen;
+			
+			double maxval = preMap.data[pos][0];
+			double minval = preMap.data[pos][0];
+
+			for ( int z = 1; z < xsize*ysize; z++ )
+			{
+				maxval = std::max ( maxval, preMap.data[pos][z] );
+				minval = std::min ( minval, preMap.data[pos][z] );
+			}
+
+			NICE::FloatImage dblImg( xsize, ysize);
+			NICE::FloatImage gaussImg( xsize, ysize);
+
+			long int offset2 = 0;
+			for ( int y = 0; y < ysize; y++ )
+			{
+				for ( int x = 0; x < xsize; x++, offset2++ )
+				{
+					dblImg.setPixel(x,y,preMap.data[pos][offset2]);
+				}
+			}
+
+			filterGaussSigmaApproximate<float,float,float>( dblImg, sigma, &gaussImg );
+
+			offset2 = 0;
+			for ( int y = 0; y < ysize; y++ )
+			{
+				for ( int x = 0; x < xsize; x++, offset2++ )
+				{
+					preMap.data[pos][offset2]=gaussImg.getPixel(x,y);
+				}
+			}
+		}
+
+	}
+
+			
+	// Zusammenfassen und auswerten
+	clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen -> zusammenfassen und auswerten" << endl;
+#pragma omp parallel for
+	for ( int x = 0; x < xsize; x++ )
+	{
+		for ( int y = 0; y < ysize; y++ )
+		{
+			for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+			{
+				double prob = 0.0;
+				for ( int s = 0; s < ( int ) scalesize; s++ )
+				{
+
+					prob+=preMap.get ( x,y,j+s*klassen );
+
+				}
+
+				double val = prob / ( double ) ( scalesize );
+				probabilities.set ( x,y,val, j );
+			}
+		}
+	}
+
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+
+	std::string s;
+	std::stringstream out;
+	std::vector< std::string > list;
+	StringTools::split (Globals::getCurrentImgFN (), '/', list);
+		
+	out << "probmaps/" << list.back() << ".probs";
+		
+	s = out.str();
+		
+	probabilities.store(s);s
+
+	for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+	{
+		cout << "klasse: " << j << endl;//" " << cn.text ( j ) << endl;
+
+		NICE::Matrix tmp ( probabilities.ysize, probabilities.xsize );
+		double maxval = 0.0;
+		for ( int y = 0; y < probabilities.ysize; y++ )
+			for ( int x = 0; x < probabilities.xsize; x++ )
+			{
+				double val = probabilities.get ( x,y,j );
+				tmp(y, x) = val;
+				maxval = std::max ( val, maxval );
+			}
+		NICE::ColorImage imgrgb (probabilities.xsize, probabilities.ysize);
+		ICETools::convertToRGB ( tmp, imgrgb );
+
+		cout << "maxval = " << maxval << " for class " << j << endl; //cn.text ( j ) << endl;
+
+		//Show ( ON, imgrgb, cn.text ( j ) );
+		//showImage(imgrgb, "Ergebnis");
+		
+		std::string s;
+		std::stringstream out;
+		out << "tmp" << j << ".ppm";
+		s = out.str();
+		imgrgb.writePPM( s );
+
+		//getchar();
+	}
+#endif
+	if ( useregions )
+	{
+		if ( bestclasses > 0 )
+		{
+			PSSImageLevelPrior pss ( 0, bestclasses, 0.2 );
+			pss.setPrior ( fV );
+			pss.postprocess ( segresult, probabilities );
+		}
+
+		//Regionen ermitteln
+
+		int regionsize = seg->segRegions ( img, mask);
+		
+
+
+		Regionen.clear();
+		vector<vector <double> > regionprob;
+
+		// Wahrscheinlichkeiten für jede Region initialisieren
+		for ( int i = 0; i < regionsize; i++ )
+		{
+			vector<double> tmp;
+			for ( int j = 0; j < ( int ) probabilities.numChannels; j++ )
+			{
+				tmp.push_back ( 0.0 );
+			}
+			regionprob.push_back ( tmp );
+			Regionen.push_back ( pair<int, Example> ( 0, Example() ) );
+		}
+
+		// Wahrscheinlichkeiten für Regionen bestimmen
+		for ( int x = 0; x < xsize; x++ )
+		{
+			for ( int y = 0; y < ysize; y++ )
+			{
+				for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+				{
+					double val = probabilities.get ( x,y,j );
+					int pos = mask(x,y);
+					Regionen[pos].second.weight+=1.0;
+					Regionen[pos].second.x += x;
+					Regionen[pos].second.y += y;
+					regionprob[pos][j] += val;
+				}
+			}
+		}
+
+
+/*
+cout << "regions: " << regionsize << endl;
+cout << "outfeats: " << endl;
+for(int j = 0; j < regionprob.size(); j++)
+{
+	for(int i = 0; i < regionprob[j].size(); i++)
+	{
+		cout << regionprob[j][i] << " ";
+	}
+	cout << endl;
+}
+cout << endl;
+getchar();*/
+
+	// beste Wahrscheinlichkeit je Region wählen
+		for ( int i = 0; i < regionsize; i++ )
+		{
+			if(Regionen[i].second.weight > 0)
+			{
+				Regionen[i].second.x /= ( int ) Regionen[i].second.weight;
+				Regionen[i].second.y /= ( int ) Regionen[i].second.weight;
+			}
+			double maxval = 0.0;
+			int maxpos = 0;
+			
+			for ( int j = 0 ; j < ( int ) regionprob[i].size(); j++ )
+			{
+				regionprob[i][j] /= Regionen[i].second.weight;
+				
+				if ( maxval < regionprob[i][j] )
+				{
+					maxval = regionprob[i][j];
+					maxpos = j;
+				}
+				probabilities.set (Regionen[i].second.x,Regionen[i].second.y,regionprob[i][j], j );
+			}
+			
+			Regionen[i].first = maxpos;
+		}
+
+		// Pixel jeder Region labeln
+		for ( int y = 0; y < (int)mask.cols(); y++ )
+		{
+			for ( int x = 0; x < (int)mask.rows(); x++ )
+			{
+				int pos = mask(x,y);
+				segresult.setPixel(x,y,Regionen[pos].first);
+			}
+		}
+		#define WRITEREGIONS
+#ifdef WRITEREGIONS
+		RegionGraph rg;
+		seg->getGraphRepresentation(img, mask, rg);
+		for(uint pos = 0; pos < regionprob.size(); pos++)
+		{
+			rg[pos]->setProbs(regionprob[pos]);
+		}
+		
+		std::string s;
+		std::stringstream out;
+		std::vector< std::string > list;
+		StringTools::split (Globals::getCurrentImgFN (), '/', list);
+		
+		out << "rgout/" << list.back() << ".graph";
+		string writefile = out.str();
+		rg.write(writefile);
+#endif
+	}
+	else
+	{
+
+		PSSImageLevelPrior pss ( 1, 4, 0.2 );
+		pss.setPrior ( fV );
+		pss.postprocess ( segresult, probabilities );
+
+	}
+
+	// Saubermachen:
+	clog << "[log] SemSegCsurka::classifyregions: sauber machen" << endl;
+	for ( int i = 0; i < ( int ) pce.size(); i++ )
+	{
+		pce[i].second.clean();
+	}
+	pce.clear();
+
+	if(cSIFT != NULL)
+		delete cSIFT;
+	if(writeFeats != NULL)
+		delete writeFeats;
+	if(readFeats != NULL)
+		delete readFeats;
+	getFeats = NULL;
+}
+
+void SemSegCsurka::semanticseg ( CachedExample *ce,
+								NICE::Image & segresult,
+								GenericImage<double> & probabilities )
+{
+
+	Examples regions;
+	NICE::Matrix regionmask;
+	classifyregions ( ce, segresult, probabilities, regions, regionmask );
+	if ( userellocprior || srg != NULL || gcopt !=NULL )
+	{
+		if ( userellocprior )
+			relloc->postprocess ( regions, probabilities );
+
+		if ( srg != NULL )
+			srg->optimizeShape ( regions, regionmask, probabilities );
+
+		if ( gcopt != NULL )
+			gcopt->optimizeImage ( regions, regionmask, probabilities );
+
+		// Pixel jeder Region labeln
+		for ( int y = 0; y < (int)regionmask.cols(); y++ )
+		{
+			for ( int x = 0; x < (int)regionmask.rows(); x++ )
+			{
+				int pos = regionmask(x,y);
+				segresult.setPixel(x,y,regions[pos].first);
+			}
+		}
+	}
+
+#ifndef NOVISUAL
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+// 	showImage(img);
+	for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+	{
+		cout << "klasse: " << j << " " << cn.text ( j ) << endl;
+
+		NICE::Matrix tmp ( probabilities.ysize, probabilities.xsize );
+		double maxval = 0.0;
+		for ( int y = 0; y < probabilities.ysize; y++ )
+			for ( int x = 0; x < probabilities.xsize; x++ )
+			{
+				double val = probabilities.get ( x,y,j );
+				tmp(y, x) = val;
+				maxval = std::max ( val, maxval );
+			}
+
+		NICE::ColorImage imgrgb (probabilities.xsize, probabilities.ysize);
+		ICETools::convertToRGB ( tmp, imgrgb );
+
+		cout << "maxval = " << maxval << " for class " << cn.text ( j ) << endl;
+
+		Show ( ON, imgrgb, cn.text ( j ) );
+		imgrgb.Write ( "tmp.ppm" );
+
+		getchar();
+	}
+#endif
+#endif
+
+}

+ 247 - 0
semseg/SemSegCsurka.h

@@ -0,0 +1,247 @@
+/** 
+ * @file SemSegCsurka.h
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich
+ * @date 04/24/2009
+ */
+#ifndef SemSegCsurkaINCLUDE
+#define SemSegCsurkaINCLUDE
+
+#include <objrec/nice.h>
+ 
+#include "SemanticSegmentation.h"
+
+#include "objrec/math/ftransform/PCA.h"
+
+#include "objrec/features/localfeatures/GenericLocalFeatureSelection.h"
+#include "objrec/features/localfeatures/LFonHSG.h"
+#include "objrec/features/localfeatures/LFColorSande.h"
+#include "objrec/features/localfeatures/LFColorWeijer.h"
+#include "objrec/features/localfeatures/LFReadCache.h"
+#include "objrec/features/localfeatures/LFWriteCache.h"
+#include "objrec/cbaselib/VectorFeature.h"
+#include "objrec/features/fpfeatures/SparseVectorFeature.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/segmentation/RegionSegmentationMethod.h"
+#include "objrec/segmentation/RSMeanShift.h"
+#include "objrec/segmentation/RSGraphBased.h"
+#include "objrec/segmentation/RSCache.h"
+
+#include "SemSegTools.h" 
+
+#include "objrec/math/cluster/GMM.h"
+#include "objrec/math/cluster/KMeansOnline.h"
+
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "objrec/classifier/fpclassifier/logisticregression/FPCSMLR.h"
+
+#include "objrec-froehlichexp/semseg/postsegmentation/PSSImageLevelPrior.h"
+#include "objrec-froehlichexp/semseg/postsegmentation/RelativeLocationPrior.h"
+#include "objrec-froehlichexp/semseg/postsegmentation/PPSuperregion.h"
+#include "objrec-froehlichexp/semseg/postsegmentation/PPGraphCut.h"
+
+
+#include <objrec/iclassifier/icgeneric/CSGeneric.h>
+
+/** @brief pixelwise labeling systems */
+
+namespace OBJREC {
+
+class SemSegCsurka : public SemanticSegmentation
+{
+
+  protected:
+	  
+	//! for normalization
+	vector<double> vecmin, vecmax;
+	  
+	//! boolean whether to save the cache or not
+	bool save_cache;
+	  
+	//! boolean whether to read the cache or not, if read_cache is false, everything will be trained
+	bool read_cache;
+	
+	//! The cached Data
+	std::string cache;
+	
+	//! The PCA
+	PCA pca;
+	
+	//! using normalization
+	bool norm;
+	
+	//! feature Dimension after PCA
+	int dim;
+	
+	//! Classifier
+	FeaturePoolClassifier *classifier;
+	VecClassifier *vclassifier;
+	
+	//! Configuration File
+	const Config *conf;	
+	
+	ClassNames cn;
+	
+	//! whether to use the colorfeats or not
+	bool usecolorfeats;
+	
+	//! low level Segmentation method
+	RegionSegmentationMethod *seg;
+	
+	//! weight for the gaussimage
+	double sigmaweight;
+	
+	//! Gaussian Mixture
+	GMM *g;
+	
+	//! KMeans
+	KMeansOnline *k;
+	
+	//! use pca or not
+	bool usepca;
+	
+	//! forced recalculation of the pca
+	bool calcpca;
+	
+	//! use highlevel transformation with gmm or not
+	bool usegmm;
+	
+	//! use highlevel transformation with kmeans or not
+	bool usekmeans;
+	
+	int bestclasses;
+	
+	//! how much clusters of the kmeans to use
+	int kmeansfeat;
+	
+	//! use hard assignment or not
+	bool kmeanshard;
+	
+	//! use fisher kernel for bag if visual words
+	bool usefisher;
+	
+	//! forced recalculation of the gmm
+	bool dogmm;
+	
+	//! number of gaussians
+	int gaussians;
+	
+	//! whether to use the relative location features or not
+	bool userellocprior;
+	
+	//! which classifier to use
+	std::string cname;
+	
+	//! use regions segmentation or not
+	bool useregions;
+	
+	//! how many features should be used for training the classifier (relative value between 0 and 1
+	double anteil;
+	
+	//! save steps for faster computing postprocesses
+	bool savesteps;
+	
+	//! the relative location features
+	RelativeLocationPrior *relloc;
+	
+	//! Shape pp
+	PPSuperregion *srg;
+	
+	//! Graph Cut pp
+	PPGraphCut *gcopt;
+	
+	//! smooth high level features or not
+	bool smoothhl;
+	
+	//! sigma for high level smoothing
+	double smoothfactor;
+	
+	//! which OpponentSIFT implementation to use {NICE, VANDESANDE}
+	string opSiftImpl;
+	
+	//! read features?
+	bool readfeat;
+	
+	//! write features?
+	bool writefeat;
+	
+	/**
+	 * converts the low level features in high level features
+	 * @param ex input and output features
+	 * @param reduce reduce the dataset (1.0 means no reduction)
+	 */
+	void convertLowToHigh(Examples &ex, double reduce = 1.0);
+		
+	/**
+	 * Starts the PCA
+	 * @param ex input features
+	 */
+	void initializePCA(Examples &ex);
+	
+	/**
+	 * using PCA on al input features
+	 * @param ex input features
+	 */
+	void doPCA(Examples &ex);
+	
+	/**
+	 * normalize the features between 0 and 1
+	 * @param ex input features
+	 */
+	void normalize(Examples &ex);
+	
+	
+	/**
+	 * smooth the high level features
+	 * @param ex input features
+	 */
+	void smoothHL(Examples ex);
+	
+  public:
+  
+	/** constructor 
+	  *  @param conf needs a configfile
+	  *  @param md and a MultiDataset (contains images and other things)
+	  */
+	SemSegCsurka( const Config *conf, const MultiDataset *md );
+      
+	/** simple destructor */
+	virtual ~SemSegCsurka();
+
+	/** The trainingstep
+	  *  @param md and a MultiDataset (contains images and other things)
+	  */
+	void train ( const MultiDataset *md );
+
+	/** The trainingstep for the postprocess
+	  *  @param md and a MultiDataset (contains images and other things)
+	  */
+	void trainpostprocess( const MultiDataset *md );
+	
+	/** The main procedure. Input: Image, Output: Segmented Image with pixelwise labeles and the probabilities
+	  * @param ce image data
+	  * @param segresult result of the semantic segmentation with a label for each pixel
+	  * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+	  */
+	void semanticseg ( CachedExample *ce, 
+                       NICE::Image & segresult,
+                       GenericImage<double> & probabilities );
+
+    /** this procedure is equal semanticseg, if there is no post process
+	  * @param ce image data
+	  * @param segresult result of the semantic segmentation with a label for each pixel
+	  * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+	  * @param Regionen the output regions
+	  * @param mask the positions of the regions
+	  */
+	void classifyregions ( CachedExample *ce, NICE::Image & segresult, GenericImage<double> & probabilities, Examples &Regionen, NICE::Matrix &mask );
+    void getFeats(NICE::Image arg1, VVector arg2, VVector arg3);
+};
+
+} //namespace
+
+#endif

+ 1720 - 0
semseg/SemSegCsurka2.cpp

@@ -0,0 +1,1720 @@
+/**
+ * @file SemSegCsurka2.cpp
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich
+ * @date 04/24/2009
+ */
+#include <iostream>
+
+#include "SemSegCsurka2.h"
+
+#include "objrec/fourier/FourierLibrary.h"
+
+#include "objrec/baselib/ICETools.h"
+
+
+#include <sstream>
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+#undef DEBUG_CSURK
+
+void readFeats(VVector &features, VVector &positions, string file, string posfile)
+{
+	 ifstream fin (file.c_str());
+	 int nb;
+	 int dim;
+	 
+	 fin >> nb;
+	 fin >> dim;
+	 for(int i = 0; i < nb; i++)
+	 {
+		int l;
+		Vector vec(dim);
+		fin >> l;
+		for(int d = 0; d < dim; d++)
+		{
+			fin >> vec[d];
+		}
+		features.push_back(vec);
+		assert(vec.size() == features[0].size());
+	 }
+	 fin.close();
+	 
+	 ifstream posin(posfile.c_str());
+	 
+	 posin >> nb;
+	 posin >> dim;
+	 	 for(int i = 0; i < nb; i++)
+	 {
+		Vector vec(dim);
+		for(int d = 0; d < dim; d++)
+		{
+			posin >> vec[d];
+		}
+		positions.push_back(vec);
+		assert(vec.size() == positions[0].size());
+	 }
+	 posin.close();
+	 
+	 cout << "positions: " << positions.size() << " feats.size: " << features.size() << endl;
+}
+
+void readex(string file, Examples &examples)
+{
+	 ifstream fin (file.c_str());
+	 int nb;
+	 int dim;
+	 
+	 fin >> nb;
+	 fin >> dim;
+	 for(int i = 0; i < nb; i++)
+	 {
+		int l;
+		Vector *vec = new Vector(dim);
+		fin >> l;
+		for(int d = 0; d < dim; d++)
+		{
+			fin >> (*vec)[d];
+		}
+		Example ex;
+		ex.vec = vec;
+		ex.svec = NULL;
+		ex.ce = NULL;
+		examples.push_back(pair<int, Example> ( l, ex));
+		//assert(vec.size() == (examples[0].second.vec->size());
+	 }
+	 fin.close();
+}
+
+
+SemSegCsurka2::SemSegCsurka2 ( const Config *conf,
+                             const MultiDataset *md )
+		: SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
+{
+	this->conf = conf;
+	save_cache = conf->gB ( "FPCPixel", "save_cache", true );
+	read_cache = conf->gB ( "FPCPixel", "read_cache", false );
+	cache = conf->gS ( "cache", "root", "" );
+	sigmaweight = conf->gD ( "SemSegCsurka", "sigmaweight", 0.6 );
+
+	dim = conf->gI ( "SemSegCsurka", "pcadim", 50 );
+
+	usepca = conf->gB ( "SemSegCsurka", "usepca", true );
+	calcpca = conf->gB ( "SemSegCsurka", "calcpca", false );
+
+	usegmm = conf->gB ( "SemSegCsurka", "usegmm", false );
+	norm = conf->gB ( "SemSegCsurka", "normalize", false );
+	usefisher = conf->gB ( "SemSegCsurka", "usefisher", false );
+	dogmm = conf->gB ( "SemSegCsurka", "dogmm", false );
+	gaussians = conf->gI ( "SemSegCsurka", "gaussians", 50 );
+
+	usekmeans = conf->gB ( "SemSegCsurka", "usekmeans", false );
+	kmeansfeat = conf->gI ( "SemSegCsurka", "kmeansfeat", 50 );
+	kmeanshard = conf->gB ( "SemSegCsurka", "kmeanshard", false );
+
+	cname = conf->gS ( "SemSegCsurka", "classifier", "RandomForests" );
+	anteil = conf->gD ( "SemSegCsurka", "anteil", 1.0 );
+	userellocprior = conf->gB ( "SemSegCsurka", "rellocfeat", false );
+	bool usesrg = conf->gB ( "SemSegCsurka", "usesrg", false );
+
+	useregions = conf->gB ( "SemSegCsurka", "useregions", true );
+	savesteps = conf->gB ( "SemSegCsurka", "savesteps", true );
+	bool usegcopt = conf->gB ( "SemSegCsurka", "usegcopt", false );
+
+	bestclasses = conf->gI ( "SemSegCsurka", "bestclasses", 0 );
+
+	smoothhl = conf->gB ( "SemSegCsurka", "smoothhl", false );
+	smoothfactor = conf->gD ( "SemSegCsurka", "smoothfactor", 1.0 );
+	
+	usecolorfeats = conf->gB("SemSegCsurka", "usecolorfeats", false);
+
+	if ( !useregions && ( userellocprior || usesrg ) )
+	{
+		cerr << "relative location priors and super region growing are just supported in combination with useregions" << endl;
+		exit ( 1 );
+	}
+
+	if ( usepca )
+		pca = PCA ( dim );
+
+	RegionSegmentationMethod * tmpseg = new RSMeanShift ( conf );
+
+	seg = new RSCache ( conf, tmpseg );
+
+	if ( userellocprior )
+		relloc = new RelativeLocationPrior ( conf );
+	else
+		relloc = NULL;
+
+	if ( usesrg )
+		srg = new PPSuperregion ( conf );
+	else
+		srg = NULL;
+
+	if ( usegcopt )
+		gcopt = new PPGraphCut ( conf );
+	else
+		gcopt = NULL;
+
+	classifier = NULL;
+	vclassifier = NULL;
+	if ( cname == "RandomForests" )
+		classifier = new FPCRandomForests ( conf, "ClassifierForest" );
+	else if ( cname == "SMLR" )
+		classifier = new FPCSMLR ( conf, "ClassifierSMLR" );
+	else
+		vclassifier = CSGeneric::selectVecClassifier ( conf, "main" );
+	//classifier = new FPCSparseMultinomialLogisticRegression(conf, "ClassifierSMLR");
+
+	if(classifier != NULL)
+		classifier->setMaxClassNo ( classNames->getMaxClassno() );
+	else
+		vclassifier->setMaxClassNo ( classNames->getMaxClassno() );
+
+	cn = md->getClassNames ( "train" );
+
+	if ( read_cache )
+	{
+		fprintf ( stderr, "SemSegCsurka2:: Reading classifier data from %s\n", ( cache+"/fpcrf.data" ).c_str() );
+
+		if(classifier != NULL)
+			classifier->read ( cache+"/fpcrf.data" );
+		else
+			vclassifier->read ( cache+"/veccl.data" );
+
+		if ( usepca )
+		{
+			std::string filename = cache + "/pca";
+			pca.read ( filename );
+		}
+
+		if ( usegmm )
+		{
+			g = new GMM ( conf, gaussians );
+
+			if ( !g->loadData ( cache+"/gmm" ) )
+			{
+				cerr << "SemSegCsurka2:: no gmm file found" << endl;
+				exit ( -1 );
+			}
+		}
+
+		if ( usekmeans )
+		{
+			k = new KMeansOnline ( gaussians );
+		}
+
+		fprintf ( stderr, "SemSegCsurka2:: successfully read\n" );
+
+		std::string filename = cache + "/rlp";
+
+		FILE *value;
+		value = fopen ( filename.c_str(),"r" );
+
+		if ( value==NULL )
+		{
+			trainpostprocess ( md );
+		}
+		else
+		{
+			if ( userellocprior )
+			{
+				relloc->read ( filename );
+			}
+		}
+
+		filename = cache + "/srg";
+
+		value = fopen ( filename.c_str(),"r" );
+
+		if ( value==NULL )
+		{
+			trainpostprocess ( md );
+		}
+		else
+		{
+			if ( srg != NULL )
+			{
+				srg->read ( filename );
+			}
+		}
+	}
+	else
+	{
+		train ( md );
+	}
+}
+
+SemSegCsurka2::~SemSegCsurka2()
+{
+	// clean-up
+	if ( classifier != NULL )
+		delete classifier;
+	if( vclassifier !=NULL)
+		delete vclassifier;
+	if ( seg != NULL )
+		delete seg;
+	if ( g != NULL )
+		delete g;
+}
+
+void SemSegCsurka2::normalize(Examples &ex)
+{
+	assert(ex.size() > 0);
+	if(vecmin.size() == 0)
+	{
+		for(int j = 0; j < (int)ex[0].second.vec->size(); j++)
+		{
+			double maxv = -numeric_limits<int>::max();
+			double minv = numeric_limits<int>::max();
+			for(int i = 0; i < (int)ex.size(); i++)
+			{
+				maxv = std::max(maxv,(*ex[i].second.vec)[j]);
+				minv = std::min(minv,(*ex[i].second.vec)[j]);
+			}
+			vecmin.push_back(minv);
+			vecmax.push_back(maxv);
+		}
+	}
+	for(int i = 0; i < (int)ex.size(); i++)
+	{
+		for(int j = 0; j < (int)ex[i].second.vec->size(); j++)
+		{
+			(*ex[i].second.vec)[j] = ((*ex[i].second.vec)[j]-vecmin[j])/(vecmax[j]-vecmin[j]);
+		}
+	}
+	return;
+}
+
+
+
+void SemSegCsurka2::convertLowToHigh ( Examples &ex, double reduce )
+{
+	cout << "converting low-level features to high-level features" << endl;
+	
+	if ( reduce >= 1.0 )
+	{
+		for ( int i = 0; i < ( int ) ex.size(); i++ )
+		{
+			SparseVector *f = new SparseVector();
+			if ( usekmeans )
+				k->getDist ( *ex[i].second.vec, *f, kmeansfeat, kmeanshard );
+			else
+			{
+				if ( usefisher )
+					g->getFisher ( *ex[i].second.vec, *f );
+				else
+					g->getProbs ( *ex[i].second.vec, *f );
+			}
+			delete ex[i].second.vec;
+			ex[i].second.vec = NULL;
+			ex[i].second.svec = f;
+		}
+	}
+	else
+	{
+		srand ( time ( NULL ) );
+
+		vector<int> del;
+		cout << "Example size old " << ex.size() << endl;
+		for ( int i = 0; i < ( int ) ex.size(); i++ )
+		{
+			double rval = ( double ) rand() / ( double ) RAND_MAX;
+			if ( rval < reduce )
+			{
+				SparseVector *f = new SparseVector();
+
+				if ( usekmeans )
+					k->getDist ( *ex[i].second.vec, *f, kmeansfeat, kmeanshard );
+				else
+				{
+					if ( usefisher )
+						g->getFisher ( *ex[i].second.vec, *f );
+					else
+						g->getProbs ( *ex[i].second.vec, *f );
+				}
+
+				delete ex[i].second.vec;
+				ex[i].second.vec = NULL;
+				ex[i].second.svec = f;
+			}
+			else
+			{
+				del.push_back ( i );
+			}
+		}
+		for ( int i = ( int ) del.size() - 1; i >= 0; i-- )
+		{
+			ex.erase ( ex.begin() +del[i] );
+		}
+		cerr << "Example size new " << ex.size() << endl;
+	}
+	cerr << "converting low-level features to high-level features finished" << endl;
+}
+
+void SemSegCsurka2::smoothHL ( Examples ex )
+{
+
+	if ( !smoothhl )
+		return;
+	assert ( ex.size() > 1 );
+
+	long long int minx = numeric_limits<long long int>::max();
+	long long int miny = numeric_limits<long long int>::max();
+	long long int maxx = -numeric_limits<long long int>::max();
+	long long int maxy = -numeric_limits<long long int>::max();
+	long long int distx = numeric_limits<long long int>::max();
+	long long int disty = numeric_limits<long long int>::max();
+
+	set<double> scales;
+	for ( int i = 0; i < (int)ex.size(); i++ )
+	{
+		scales.insert ( ex[i].second.scale );
+	}
+
+	map<double, int> scalepos;
+	int it = 0;
+
+	for ( set<double>::const_iterator iter = scales.begin(); iter != scales.end();    ++iter, ++it )
+	{
+		scalepos.insert(make_pair(*iter, it));
+	}
+
+	for ( int i = 0; i < (int)ex.size(); i++ )
+	{
+		if ( minx < numeric_limits<int>::max() && ex[i].second.x - minx > 0 )
+			distx = std::min ( distx, ex[i].second.x - minx );
+		if ( miny < numeric_limits<int>::max() && ex[i].second.y - miny > 0 )
+			disty = std::min ( disty, ex[i].second.y - miny );
+		minx = std::min ( ex[i].second.x, minx );
+		maxx = std::max ( ex[i].second.x, maxx );
+		miny = std::min ( ex[i].second.y, miny );
+		maxy = std::max ( ex[i].second.y, maxy );
+	}
+
+	distx = abs ( distx );
+
+	int xsize = ( maxx - minx ) /distx +1;
+	int ysize = ( maxy - miny ) /disty +1;
+	double valx = ( ( double ) xsize-1 ) / ( double ) ( maxx - minx );
+	double valy = ( ( double ) ysize-1 ) / ( double ) ( maxy - miny );
+
+	//double sigma = smoothfactor;
+	double sigma = std::max(xsize,ysize) * smoothfactor;
+	//double sigma = 0.2;
+	cout << "sigma1: " << sigma << endl;
+	
+	vector<NICE::FloatImage> imgv;
+	vector<NICE::FloatImage> gaussImgv;
+	for(int i = 0; i < (int)scalepos.size(); i++)
+	{
+		NICE::FloatImage img( xsize, ysize);
+		NICE::FloatImage gaussImg( xsize, ysize);
+		imgv.push_back(img);
+		gaussImgv.push_back(gaussImg);
+	}
+
+	for ( int d = 0; d < ex[0].second.svec->getDim(); d++ )
+	{
+		//TODO: max und min dynamisches bestimmen
+
+		for(int i = 0; i < (int)scalepos.size(); i++)
+		{
+			imgv[i].set(0.0);
+			gaussImgv[i].set(0.0);
+		}
+		
+		for ( int i = 0; i < (int)ex.size(); i++ )
+		{
+			int xpos = ( ex[i].second.x - minx ) *valx;
+			int ypos = ( ex[i].second.y - miny ) *valy;
+			
+			double val = ex[i].second.svec->get ( d );
+			// refactor-nice.pl: check this substitution
+			// old: PutValD ( imgv[scalepos[ex[i].second.scale]],xpos,ypos,val);
+			imgv[scalepos[ex[i].second.scale]].setPixel(xpos,ypos,val);
+		}
+		
+		/*
+		for(int y = 0; y < ysize; y++)
+		{
+			for(int x = 0; x < xsize; x++)
+			{
+				// refactor-nice.pl: check this substitution
+				// old: double val = GetValD(img,x,y);
+				double val = img.getPixel(x,y);
+				double  c = 0.0;
+				if(val == 0.0)
+				{
+					if(x > 0)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x-1,y);
+						val+=img.getPixel(x-1,y);
+						c+=1.0;
+					}
+					if(y > 0)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x,y-1);
+						val+=img.getPixel(x,y-1);
+						c+=1.0;
+					}
+					if(x < xsize-1)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x+1,y);
+						val+=img.getPixel(x+1,y);
+						c+=1.0;
+					}
+					if(y < ysize-1)
+					{
+						// refactor-nice.pl: check this substitution
+						// old: val+=GetValD(img,x,y+1);
+						val+=img.getPixel(x,y+1);
+						c+=1.0;
+					}
+					// refactor-nice.pl: check this substitution
+					// old: PutValD(img,x,y,val/c);
+					img.setPixel(x,y,val/c);
+				}
+			}
+		}*/
+
+		for(int i = 0; i < (int)imgv.size(); i++)
+			FourierLibrary::gaussFilterD ( imgv[i], gaussImgv[i], sigma );	
+		
+		for ( int i = 0; i < (int)ex.size(); i++ )
+		{
+			int xpos = ( ex[i].second.x - minx ) *valx;
+			int ypos = ( ex[i].second.y - miny ) *valy;
+			// refactor-nice.pl: check this substitution
+			// old: double val = GetValD ( gaussImgv[scalepos[ex[i].second.scale]], xpos, ypos );
+			double val = gaussImgv[scalepos[ex[i].second.scale]].getPixel(xpos,ypos);
+
+			if ( fabs ( val ) < 1e-7 )
+			{
+				if ( ex[i].second.svec->get ( d ) != 0.0 )
+				{
+					ex[i].second.svec->erase ( d );
+				}
+			}
+			else
+			{
+				( *ex[i].second.svec ) [d] = val;
+			}
+		}
+	}
+}
+
+void SemSegCsurka2::initializePCA ( Examples &ex )
+{
+#ifdef DEBUG
+	cerr << "start computing pca" << endl;
+#endif
+	std::string filename = cache + "/pca";
+
+	FILE *value;
+	value = fopen ( filename.c_str(),"r" );
+
+	if ( value==NULL || calcpca )
+	{
+		srand ( time ( NULL ) );
+
+		int featsize = ( int ) ex.size();
+		int maxfeatures = dim*10;
+		int olddim = ex[0].second.vec->size();
+
+		maxfeatures = std::min ( maxfeatures, featsize );
+
+		NICE::Matrix features ( maxfeatures, olddim );
+
+		for ( int i = 0; i < maxfeatures; i++ )
+		{
+			int k = rand() % featsize;
+
+			int vsize = (int)ex[k].second.vec->size();
+			for(int j = 0; j < vsize; j++)
+			{
+				features(i,j) = (*( ex[k].second.vec))[j];
+			}
+		}
+		pca.calculateBasis ( features, dim, 1 );
+
+		if ( save_cache )
+			pca.save ( filename );
+
+	}
+	else
+	{
+		cout << "readpca: " << filename << endl;
+		pca.read ( filename );
+		cout << "end" << endl;
+	}
+#ifdef DEBUG
+	cerr << "finished computing pca" << endl;
+#endif
+}
+
+void SemSegCsurka2::doPCA ( Examples &ex )
+{
+	cout << "converting features using pca starts" << endl;
+
+	std::string savedir = cname = conf->gS ( "cache", "root", "/dev/null/" );
+	std::string shortf = ex.filename;
+	if ( string::npos != ex.filename.rfind ( "/" ) )
+		shortf = ex.filename.substr ( ex.filename.rfind ( "/" ) );
+	std::string filename = savedir+"/pcasave/"+shortf;
+	std::string syscall = "mkdir "+savedir+"/pcasave";
+	system ( syscall.c_str() );
+	cout << "filename: " << filename << endl;
+	
+	if ( !FileMgt::fileExists(filename) || calcpca )
+	{
+		ofstream ofStream;
+
+		//Opens the file binary
+		ofStream.open ( filename.c_str(),fstream::out | fstream::binary );
+
+		for ( int k = 0; k < ( int ) ex.size(); k++ )
+		{
+			NICE::Vector tmp = pca.getFeatureVector ( * ( ex[k].second.vec ), true );
+			delete ex[k].second.vec;
+			for ( int d = 0; d < (int)tmp.size(); d++ )
+				ofStream.write ( ( char* ) &tmp[d], sizeof ( double ) );
+			ex[k].second.vec = new NICE::Vector ( tmp );
+		}
+		ofStream.close();
+		cout << endl;
+	}
+	else
+	{
+		ifstream ifStream;
+		ifStream.open ( filename.c_str(),std::fstream::in | std::fstream::binary );
+		for ( int k = 0; k < ( int ) ex.size(); k++ )
+		{
+			NICE::Vector tmp = NICE::Vector ( dim );
+			delete ex[k].second.vec;
+			for ( int d = 0; d < dim; d++ )
+				ifStream.read ( ( char* ) &tmp[d], sizeof ( double ) );
+			ex[k].second.vec = new NICE::Vector ( tmp );
+		}
+
+		ifStream.close();
+	}
+	cout << "converting features using pca finished" << endl;
+}
+
+
+
+
+void SemSegCsurka2::train ( const MultiDataset *md )
+{
+
+	/*die einzelnen Trainingsschritte
+	1. auf allen Trainingsbilder SIFT Merkmale an den Gitterpunkten bei allen Auflösungen bestimmen
+	2. PCA anwenden
+	3. aus diesen ein GMM erstellen
+	4. für jedes SIFT-Merkmal einen Vektor erstellen, der an der Stelle i die Wahrscheinlichkeit enthällt zur Verteilung i des GMM, Zur Zeit mit BoV-Alternative durch Moosman06 erledigt
+	5. diese Vektoren in einem diskriminitativen Klassifikator ( z.B. SLR oder Randomized Forests) zusammen mit ihrer Klassenzugehörigkeit anlernen
+	*/
+#ifdef DEBUG
+	cerr << "SemSegCsurka2:: training starts" << endl;
+#endif
+
+	Examples examples;
+	examples.filename = "training";
+	// the used features
+	LocalFeatureRepresentation *cSIFT = new LFColorSande ( conf, "LFColorSandeTrain" );
+	// write the features to a file, if there isn't any to read
+	LocalFeatureRepresentation *writeFeats = new LFWriteCache ( conf, cSIFT );
+	// read the features from a file
+	LocalFeatureRepresentation *getFeats = new LFReadCache ( conf, writeFeats,-1 );
+	cout << 4 << endl;
+	// additional Colorfeatures
+	LFColorWeijer lcw(conf);
+	int lfdimension = -1;
+
+	const LabeledSet train = * ( *md ) ["train"];
+	const LabeledSet *trainp = &train;
+
+	////////////////////////
+	// Merkmale berechnen //
+	////////////////////////
+
+	set<int> forbidden_classes;
+
+	std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
+	if ( forbidden_classes_s == "" )
+	{
+		forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
+	}
+	cn.getSelection ( forbidden_classes_s, forbidden_classes );
+	cerr << "forbidden: " << forbidden_classes_s << endl;
+
+	ProgressBar pb ( "Local Feature Extraction" );
+	pb.show();
+
+	int imgnb = 0;
+#if 0
+	LOOP_ALL_S ( *trainp )
+	{
+		//EACH_S(classno, currentFile);
+		EACH_INFO ( classno,info );
+
+		pb.update ( trainp->count() );
+
+		NICE::Image img;
+
+		std::string currentFile = info.img();
+
+		CachedExample *ce = new CachedExample ( currentFile );
+
+		const LocalizationResult *locResult = info.localization();
+		if ( locResult->size() <= 0 )
+		{
+			fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+			          currentFile.c_str() );
+			continue;
+		}
+
+		fprintf ( stderr, "SemSegCsurka2: Collecting pixel examples from localization info: %s\n",
+		          currentFile.c_str() );
+
+		int xsize, ysize;
+		ce->getImageSize ( xsize, ysize );
+
+		NICE::Image pixelLabels (xsize, ysize);
+		pixelLabels.set(0);
+		locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+		try {
+			img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+		} catch (Exception) {
+			cerr << "SemSegCsurka2: error opening image file <" << currentFile << ">" << endl;
+			continue;
+		}
+		
+		Globals::setCurrentImgFN ( currentFile );
+
+		VVector features;
+		VVector cfeatures;
+		VVector positions;
+
+		NICE::ColorImage cimg(currentFile);
+		
+		getFeats->extractFeatures ( img, features, positions );
+		
+		if(usecolorfeats)
+			lcw.getDescriptors(cimg, cfeatures, positions);
+
+		int j = 0;
+		
+		for ( VVector::const_iterator i = features.begin();
+		        i != features.end();
+		        i++,j++ )
+		{
+			const NICE::Vector & x = *i;
+			classno = pixelLabels.getPixel(( int )positions[j][0], ( int )positions[j][1] );
+
+			if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+				continue;
+
+			if ( lfdimension < 0 )
+				lfdimension = ( int ) x.size();
+			else
+				assert ( lfdimension == ( int ) x.size() );
+
+			NICE::Vector *v = new NICE::Vector ( x );
+			
+			if(usecolorfeats && !usepca)
+				v->append(cfeatures[j]);
+
+			Example example ( v );
+			example.position = imgnb;
+			examples.push_back (
+			    pair<int, Example> ( classno, example ) );
+		}
+		features.clear();
+		positions.clear();
+		delete ce;
+		imgnb++;
+	}
+
+	pb.hide();
+#endif
+
+	examples.clear();
+	
+	readex("/home/staff/froehlich/fernerkundung/irene/sattrain.feats", examples);
+
+	lfdimension = (int)examples[0].second.vec->size();
+	//////////////////
+	// PCA anwenden //
+	//////////////////
+
+	if ( usepca )
+	{
+		if ( !read_cache )
+		{
+			initializePCA ( examples );
+		}
+		doPCA ( examples );
+		lfdimension = dim;
+	}
+
+	/////////////////////////////////////////////////////
+	// Low-Level Features in High-Level transformieren //
+	/////////////////////////////////////////////////////
+
+	int hlfdimension = lfdimension;
+
+	if(norm)
+		normalize(examples);
+	
+	if ( usegmm )
+	{
+		if(!usepca && !norm)
+			normalize(examples);
+		g = new GMM ( conf,gaussians );
+
+		if ( dogmm || !g->loadData ( cache+"/gmm" ) )
+		{
+			g->computeMixture ( examples );
+			if ( save_cache )
+				g->saveData ( cache+"/gmm" );
+		}
+
+		hlfdimension = gaussians;
+
+		if ( usefisher )
+			hlfdimension = gaussians*2*dim;
+	}
+
+	if ( usekmeans )
+	{
+		if(!usepca || norm)
+			normalize(examples);
+		k = new KMeansOnline ( gaussians );
+
+		k->cluster ( examples );
+
+		hlfdimension = gaussians;
+	}
+
+	if ( usekmeans || usegmm )
+	{
+		examples.clear();
+		pb.reset("Local Feature Extraction");
+		lfdimension = -1;
+		pb.update ( trainp->count() );
+		LOOP_ALL_S ( *trainp )
+		{
+			EACH_INFO ( classno,info );
+
+			pb.update ( trainp->count() );
+
+			NICE::Image img;
+
+			std::string currentFile = info.img();
+
+			CachedExample *ce = new CachedExample ( currentFile );
+			
+			const LocalizationResult *locResult = info.localization();
+			if ( locResult->size() <= 0 )
+			{
+				fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+				          currentFile.c_str() );
+				continue;
+			}
+
+			fprintf ( stderr, "SemSegCsurka2: Collecting pixel examples from localization info: %s\n",
+			          currentFile.c_str() );
+
+			int xsize, ysize;
+			ce->getImageSize ( xsize, ysize );
+
+			NICE::Image pixelLabels (xsize, ysize);
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			try{
+				img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+			}
+			catch (Exception){
+				cerr << "SemSegCsurka2: error opening image file <" << currentFile << ">" << endl;
+				continue;
+			}
+
+			Globals::setCurrentImgFN ( currentFile );
+
+			VVector features;
+			VVector cfeatures;
+			VVector positions;
+
+			NICE::ColorImage cimg(currentFile);
+		
+			getFeats->extractFeatures ( img, features, positions );
+		
+			if(usecolorfeats)
+				lcw.getDescriptors(cimg, cfeatures, positions);
+
+			int j = 0;
+
+			Examples tmpex;
+
+			for ( VVector::const_iterator i = features.begin();
+			        i != features.end();
+			        i++,j++ )
+			{
+				
+				const NICE::Vector & x = *i;
+
+				classno = pixelLabels.getPixel(( int )positions[j][0], ( int )positions[j][1] );
+
+				if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+					continue;
+				
+				if ( lfdimension < 0 )
+					lfdimension = ( int ) x.size();
+				else
+					assert ( lfdimension == ( int ) x.size() );
+				
+				NICE::Vector *v = new NICE::Vector ( x );
+				if(usecolorfeats)
+					v->append(cfeatures[j]);
+
+				Example example ( v );
+				example.position = imgnb;
+				example.x = ( int ) positions[j][0];
+				example.y = ( int )positions[j][1];
+				example.scale = positions[j][2];
+
+				tmpex.push_back ( pair<int, Example> ( classno, example ) );
+			}
+			tmpex.filename = currentFile;
+			if ( usepca )
+			{
+				doPCA ( tmpex );
+			}
+
+			convertLowToHigh ( tmpex, anteil );
+
+			smoothHL ( tmpex );
+			
+			for ( int i = 0; i < (int)tmpex.size(); i++ )
+			{
+				examples.push_back ( pair<int, Example> ( tmpex[i].first, tmpex[i].second ) );
+			}
+
+			tmpex.clear();
+
+			features.clear();
+			positions.clear();
+			delete ce;
+			imgnb++;
+			
+		}
+
+		pb.hide();
+	}
+	////////////////////////////
+	// Klassifikator anlernen //
+	////////////////////////////
+	FeaturePool fp;
+	
+	Feature *f;
+
+	if ( usegmm || usekmeans )
+		f = new SparseVectorFeature ( hlfdimension );
+	else
+		f = new VectorFeature ( hlfdimension );
+	
+	f->explode ( fp );
+	delete f;
+	if(usecolorfeats && !( usekmeans || usegmm ))
+	{
+		int dimension = hlfdimension+11;
+		for ( int i = hlfdimension ; i < dimension ; i++ )
+		{
+			VectorFeature *f = new VectorFeature ( dimension );
+			f->feature_index = i;
+			fp.addFeature(f, 1.0 / dimension);
+		}
+	}
+	
+/*
+cout << "train classifier" << endl;
+fp.store(cout);
+getchar();
+for(int z = 0; z < examples.size(); z++)
+{
+cout << "examples.size() " << examples.size() << endl;
+cout << "class: " << examples[z].first << endl;
+	cout << *examples[z].second.vec << endl;
+	getchar();
+}*/
+	if(classifier != NULL)
+		classifier->train ( fp, examples );
+	else
+	{
+		LabeledSetVector lvec;
+		convertExamplesToLSet(examples, lvec);
+		vclassifier->teach(lvec);
+		if(usegmm)
+			convertLSetToSparseExamples(examples, lvec);
+		else
+			convertLSetToExamples(examples, lvec);
+		vclassifier->finishTeaching();
+	}
+
+	fp.destroy();
+
+	if ( save_cache )
+	{
+		if(classifier != NULL)
+			classifier->save ( cache+"/fpcrf.data" );
+		else
+			vclassifier->save ( cache+"/veccl.data" );
+	}
+	////////////
+	//clean up//
+	////////////
+
+	for ( int i = 0; i < ( int ) examples.size(); i++ )
+	{
+		examples[i].second.clean();
+	}
+
+	examples.clear();
+
+	delete cSIFT;
+
+	delete writeFeats;
+
+	delete getFeats;
+	trainpostprocess ( md );
+
+	cerr << "SemSeg training finished" << endl;
+}
+
+void SemSegCsurka2::trainpostprocess ( const MultiDataset *md )
+{
+	cout<< "start postprocess" << endl;
+	////////////////////////////
+	// Postprocess trainieren //
+	////////////////////////////
+	const LabeledSet train = * ( *md ) ["train"];
+	const LabeledSet *trainp = &train;
+
+	if ( userellocprior || srg != NULL || gcopt !=NULL )
+	{
+		if ( userellocprior )
+			relloc->setClassNo ( cn.numClasses() );
+
+		if ( gcopt !=NULL )
+		{
+			gcopt->setClassNo ( cn.numClasses() );
+		}
+
+		ProgressBar pb ( "learn relative location prior maps" );
+		pb.show();
+		LOOP_ALL_S ( *trainp ) // für alle Bilder den ersten Klassifikationsschritt durchführen um den zweiten Klassifikator anzutrainieren
+		{
+			EACH_INFO ( classno,info );
+			
+			pb.update ( trainp->count() );
+
+			NICE::Image img;
+
+			std::string currentFile = info.img();
+			Globals::setCurrentImgFN ( currentFile );
+			CachedExample *ce = new CachedExample ( currentFile );
+
+			const LocalizationResult *locResult = info.localization();
+			if ( locResult->size() <= 0 )
+			{
+				fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+				          currentFile.c_str() );
+				continue;
+			}
+
+			fprintf ( stderr, "SemSegCsurka2: Collecting pixel examples from localization info: %s\n",
+			          currentFile.c_str() );
+
+			int xsize, ysize;
+			ce->getImageSize ( xsize, ysize );
+
+			NICE::Image pixelLabels (xsize, ysize);
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			try{
+				img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+			}
+			catch(Exception)
+			{
+				cerr << "SemSegCsurka2: error opening image file <" << currentFile << ">" << endl;
+				continue;
+			}
+
+			//Regionen ermitteln
+			NICE::Matrix mask;
+
+			int regionsize = seg->segRegions ( img,mask );
+
+			Examples regions;
+
+			vector<vector<int> > hists;
+
+			for ( int i = 0; i < regionsize; i++ )
+			{
+				Example tmp;
+				regions.push_back ( pair<int, Example> ( 0, tmp ) );
+				vector<int> hist ( cn.numClasses(), 0 );
+				hists.push_back ( hist );
+			}
+
+			for ( int x = 0; x < xsize; x++ )
+			{
+				for ( int y = 0; y < ysize; y++ )
+				{
+					int numb = mask(x,y);
+					regions[numb].second.x += x;
+					regions[numb].second.y += y;
+					regions[numb].second.weight += 1.0;
+					hists[numb][pixelLabels.getPixel(x,y)]++;
+				}
+			}
+
+			for ( int i = 0; i < regionsize; i++ )
+			{
+				regions[i].second.x /= ( int ) regions[i].second.weight;
+				regions[i].second.y /= ( int ) regions[i].second.weight;
+
+				int maxval = -numeric_limits<int>::max();
+				int maxpos = -1;
+				int secondpos = -1;
+				for ( int k = 0; k < ( int ) hists[i].size(); k++ )
+				{
+					if ( maxval <hists[i][k] )
+					{
+						maxval = hists[i][k];
+						secondpos = maxpos;
+						maxpos = k;
+					}
+				}
+
+				if ( cn.text ( maxpos ) == "various" )
+					regions[i].first = secondpos;
+				else
+					regions[i].first = maxpos;
+
+			}
+			if ( userellocprior )
+				relloc->trainPriorsMaps ( regions, xsize, ysize );
+
+			if ( srg != NULL )
+				srg->trainShape ( regions, mask );
+
+			if ( gcopt !=NULL )
+				gcopt->trainImage ( regions, mask );
+
+			delete ce;
+
+		}
+		pb.hide();
+		if ( userellocprior )
+			relloc->finishPriorsMaps ( cn );
+
+		if ( srg != NULL )
+			srg->finishShape ( cn );
+
+		if ( gcopt != NULL )
+			gcopt->finishPP ( cn );
+	}
+	if ( userellocprior )
+	{
+		ProgressBar pb ( "learn relative location classifier" );
+		pb.show();
+
+		int nummer = 0;
+		LOOP_ALL_S ( *trainp ) // für alle Bilder den ersten Klassifikationsschritt durchführen um den zweiten Klassifikator anzutrainieren
+		{
+			//EACH_S(classno, currentFile);
+			EACH_INFO ( classno,info );
+			nummer++;
+			pb.update ( trainp->count() );
+
+			NICE::Image img;
+			std::string currentFile = info.img();
+
+			CachedExample *ce = new CachedExample ( currentFile );
+
+			const LocalizationResult *locResult = info.localization();
+			if ( locResult->size() <= 0 )
+			{
+				fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+				          currentFile.c_str() );
+				continue;
+			}
+
+			fprintf ( stderr, "SemSegCsurka2: Collecting pixel examples from localization info: %s\n",
+			          currentFile.c_str() );
+
+			int xsize, ysize;
+			ce->getImageSize ( xsize, ysize );
+
+			NICE::Image pixelLabels (xsize, ysize);
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			try{
+				img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+			}
+			catch(Exception)
+			{
+				cerr << "SemSegCsurka2: error opening image file <" << currentFile << ">" << endl;
+				continue;
+			}
+			Globals::setCurrentImgFN ( currentFile );
+
+			NICE::Image segresult;
+
+			GenericImage<double> probabilities ( xsize,ysize,classno,true );
+
+			Examples regions;
+
+			NICE::Matrix mask;
+
+			if ( savesteps )
+			{
+				std::ostringstream s1;
+				s1 << cache << "/rlpsave/" << nummer;
+
+				std::string filename = s1.str();
+				s1 << ".probs";
+				
+				std::string fn2 = s1.str();
+
+				FILE *file;
+				file = fopen ( filename.c_str(),"r" );
+
+				if ( file==NULL )
+				{
+					//berechnen
+					classifyregions ( ce, segresult, probabilities, regions, mask );
+					//schreiben
+					ofstream fout ( filename.c_str(), ios::app );
+					fout << regions.size() << endl;
+					for ( int i = 0; i < ( int ) regions.size(); i++ )
+					{
+						regions[i].second.store ( fout );
+						fout << regions[i].first << endl;
+					}
+					fout.close();
+					probabilities.store ( fn2 );
+				}
+				else
+				{
+					//lesen
+					ifstream fin ( filename.c_str() );
+					int size;
+					fin >> size;
+
+					for ( int i = 0; i < size; i++ )
+					{
+						Example ex;
+						ex.restore ( fin );
+						int tmp;
+						fin >> tmp;
+						regions.push_back ( pair<int, Example> ( tmp, ex ) );
+					}
+
+					fin.close();
+
+					probabilities.restore ( fn2 );
+				}
+			}
+			else
+			{
+				classifyregions ( ce, segresult, probabilities, regions, mask );
+			}
+
+			relloc->trainClassifier ( regions, probabilities );
+
+			delete ce;
+
+		}
+		relloc->finishClassifier();
+		pb.hide();
+
+		relloc->save ( cache+"/rlp" );
+	}
+	cout << "finished postprocess" << endl;
+}
+
+void SemSegCsurka2::classifyregions ( CachedExample *ce, NICE::Image & segresult, GenericImage<double> & probabilities, Examples &Regionen, NICE::Matrix & mask )
+{
+	/* die einzelnen Testschritte:
+	1.x  auf dem Testbild alle SIFT Merkmale an den Gitterpunkten bei allen Auflösungen bestimmen
+	2.x  für jedes SIFT-Merkmal einen Vektor erstellen, der an der Stelle i die Wahrscheinlichkeit enthällt zur Verteilung i des GMM
+	3.x diese Vektoren klassifizieren, so dass für jede Klasse die Wahrscheinlichkeit gespeichert wird
+	4.x für jeden Pixel die Wahrscheinlichkeiten mitteln aus allen Patches, in denen der Pixel vorkommt
+	5.x das Originalbild in homogene Bereiche segmentieren
+	6.x die homogenen Bereiche bekommen die gemittelten Wahrscheinlichkeiten ihrer Pixel
+	7. (einzelne Klassen mit einem globalen Klassifikator ausschließen)
+	8.x jeder Pixel bekommt die Klasse seiner Region zugeordnet
+	*/
+
+	int xsize, ysize;
+
+	ce->getImageSize ( xsize, ysize );
+
+	probabilities.reInit ( xsize, ysize, classNames->getMaxClassno() +1, true/*allocMem*/ );
+
+	segresult.resize(xsize, ysize);
+
+	Examples pce;
+
+	// the features to use
+	LocalFeatureRepresentation *cSIFT = new LFColorSande ( conf, "LFColorSandeTest" );
+
+	// write the features to a file, if there isn't any to read
+	LocalFeatureRepresentation *writeFeats = new LFWriteCache ( conf, cSIFT );
+
+	// read the features from a file
+	LocalFeatureRepresentation *getFeats = new LFReadCache ( conf, writeFeats,-1 );
+
+	// additional Colorfeatures
+	LFColorWeijer lcw(conf);
+	
+	NICE::Image img;
+
+	std::string currentFile = Globals::getCurrentImgFN();
+	
+	try
+	{
+		img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+	}
+	catch(Exception)
+	{
+		cerr << "SemSegCsurka2: error opening image file <" << currentFile << ">" << endl;
+	}
+
+	VVector features;
+	VVector cfeatures;
+	VVector positions;
+	NICE::ColorImage cimg(currentFile);
+	//getFeats->extractFeatures ( img, features, positions );
+	
+	readFeats(features,positions,"/home/staff/froehlich/fernerkundung/irene/sattest.feats","/home/staff/froehlich/fernerkundung/irene/sattest.coords");
+	
+	if(usecolorfeats)
+		lcw.getDescriptors(cimg, cfeatures, positions);
+
+	set<double> scales;
+
+	int j = 0;
+	int lfdimension = -1;
+	for ( VVector::const_iterator i = features.begin();
+	        i != features.end();
+	        i++,j++ )
+	{
+		const NICE::Vector & x = *i;
+
+		if ( lfdimension < 0 ) lfdimension = ( int ) x.size();
+		else assert ( lfdimension == ( int ) x.size() );
+
+		NICE::Vector *v = new NICE::Vector ( x );
+
+		if(usecolorfeats)
+			v->append(cfeatures[j]);
+		
+		Example tmp = Example ( v );
+		tmp.x = ( int )positions[j][0];
+		tmp.y = ( int ) positions[j][1];
+		tmp.width = ( int ) ( 16.0*positions[j][2] );
+		tmp.height = tmp.width;
+		tmp.scale = positions[j][2];
+		scales.insert ( positions[j][2] );
+		pce.push_back ( pair<int, Example> ( 0, tmp ) );
+	}
+
+	//////////////////
+	// PCA anwenden //
+	//////////////////
+	pce.filename = currentFile;
+	if ( usepca )
+	{
+		doPCA ( pce );
+		lfdimension = dim;
+	}
+
+	//////////////////
+	// BoV anwenden //
+	//////////////////
+	if(norm)
+		normalize(pce);
+	if ( usegmm || usekmeans )
+	{
+		if(!usepca && !norm)
+			normalize(pce);
+		convertLowToHigh ( pce );
+		smoothHL ( pce );
+		lfdimension = gaussians;
+	}
+
+	/////////////////////////////////////////
+	// Wahrscheinlichkeitskarten erstellen //
+	/////////////////////////////////////////
+
+	int klassen = probabilities.numChannels;
+	GenericImage<double> preMap ( xsize,ysize,klassen*scales.size(),true );
+
+	long int offset = 0;
+
+	// initialisieren
+	for ( int y = 0 ; y < ysize ; y++ )
+		for ( int x = 0 ; x < xsize ; x++,offset++ )
+		{
+			// alles zum Hintergrund machen
+			segresult.setPixel(x,y,0);
+			// Die Wahrscheinlichkeitsmaps auf 0 initialisieren
+			for ( int i = 0 ; i < ( int ) probabilities.numChannels; i++ )
+			{
+				probabilities.data[i][offset] = 0.0;
+			}
+			for ( int j = 0; j < ( int ) preMap.numChannels; j++ )
+			{
+				preMap.data[j][offset]=0.0;
+			}
+		}
+
+	// Die Wahrscheinlichkeitsmaps mit den einzelnen Wahrscheinlichkeiten je Skalierung füllen
+
+	int scalesize = scales.size();
+
+	// Globale Häufigkeiten akkumulieren
+	FullVector fV ( ( int ) probabilities.numChannels );
+
+	for ( int i = 0; i < fV.size(); i++ )
+		fV[i] = 0.0;
+
+	if(classifier != NULL)
+	{
+#pragma omp parallel for
+		for ( int s = 0; s < scalesize; s++ )
+		{
+#pragma omp parallel for
+			for ( int i = s; i < ( int ) pce.size(); i+=scalesize )
+			{
+				ClassificationResult r = classifier->classify ( pce[i].second );
+				for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+				{
+					fV[j] += r.scores[j];
+					preMap.set ( pce[i].second.x,pce[i].second.y,r.scores[j],j+s*klassen );
+				}
+			}
+		}
+	}
+	else
+	{
+#pragma omp parallel for
+		for ( int s = 0; s < scalesize; s++ )
+		{
+#pragma omp parallel for
+			for ( int i = s; i < ( int ) pce.size(); i+=scalesize )
+			{
+				ClassificationResult r = vclassifier->classify ( *(pce[i].second.vec) );
+				for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+				{
+					fV[j] += r.scores[j];
+					preMap.set ( pce[i].second.x,pce[i].second.y,r.scores[j],j+s*klassen );
+				}
+			}
+		}
+	}
+
+	vector<double> scalesVec;
+	for ( set<double>::const_iterator iter = scales.begin();
+	        iter != scales.end();
+	        ++iter )
+	{
+		scalesVec.push_back ( *iter );
+	}
+
+
+	// Gaußfiltern
+	for ( int s = 0; s < scalesize; s++ )
+	{
+		double sigma = sigmaweight*16.0*scalesVec[s];
+		cerr << "sigma: " << sigma << endl;
+#pragma omp parallel for
+		for ( int i = 0; i < klassen; i++ )
+		{
+			int pos = i+s*klassen;
+			
+			double maxval = preMap.data[pos][0];
+			double minval = preMap.data[pos][0];
+
+			for ( int z = 1; z < xsize*ysize; z++ )
+			{
+				maxval = std::max ( maxval, preMap.data[pos][z] );
+				minval = std::min ( minval, preMap.data[pos][z] );
+			}
+
+			NICE::FloatImage dblImg( xsize, ysize);
+			NICE::FloatImage gaussImg( xsize, ysize);
+
+			long int offset2 = 0;
+			for ( int y = 0; y < ysize; y++ )
+			{
+				for ( int x = 0; x < xsize; x++, offset2++ )
+				{
+					dblImg.setPixel(x,y,preMap.data[pos][offset2]);
+				}
+			}
+
+			FourierLibrary::gaussFilterD ( dblImg, gaussImg, sigma );
+
+			offset2 = 0;
+			for ( int y = 0; y < ysize; y++ )
+			{
+				for ( int x = 0; x < xsize; x++, offset2++ )
+				{
+					preMap.data[pos][offset2]=gaussImg.getPixel(x,y);
+				}
+			}
+		}
+
+	}
+
+			
+	// Zusammenfassen und auswerten
+#pragma omp parallel for
+	for ( int x = 0; x < xsize; x++ )
+	{
+		for ( int y = 0; y < ysize; y++ )
+		{
+			for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+			{
+				double prob = 0.0;
+				for ( int s = 0; s < ( int ) scalesize; s++ )
+				{
+
+					prob+=preMap.get ( x,y,j+s*klassen );
+
+				}
+
+				double val = prob / ( double ) ( scalesize );
+				probabilities.set ( x,y,val, j );
+			}
+		}
+	}
+
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+// 	showImage(img);
+	for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+	{
+		cout << "klasse: " << j << endl;//" " << cn.text ( j ) << endl;
+
+		NICE::Matrix tmp ( probabilities.ysize, probabilities.xsize );
+		double maxval = 0.0;
+		for ( int y = 0; y < probabilities.ysize; y++ )
+			for ( int x = 0; x < probabilities.xsize; x++ )
+			{
+				double val = probabilities.get ( x,y,j );
+				tmp(y, x) = val;
+				maxval = std::max ( val, maxval );
+			}
+
+		NICE::ColorImage imgrgb (probabilities.xsize, probabilities.ysize);
+		ICETools::convertToRGB ( tmp, imgrgb );
+
+		cout << "maxval = " << maxval << " for class " << j << endl; //cn.text ( j ) << endl;
+
+		//Show ( ON, imgrgb, cn.text ( j ) );
+		showImage(imgrgb, "Ergebnis");
+		//imgrgb.Write ( "tmp.ppm" );
+
+		//getchar();
+	}
+#endif
+	if ( useregions )
+	{
+		if ( bestclasses > 0 )
+		{
+			PSSImageLevelPrior pss ( 0, bestclasses, 0.2 );
+			pss.setPrior ( fV );
+			pss.postprocess ( segresult, probabilities );
+		}
+
+		//Regionen ermitteln
+
+		int regionsize = seg->segRegions ( img, mask);
+
+		Regionen.clear();
+		vector<vector <double> > regionprob;
+
+		// Wahrscheinlichkeiten für jede Region initialisieren
+		for ( int i = 0; i < regionsize; i++ )
+		{
+			vector<double> tmp;
+			for ( int j = 0; j < ( int ) probabilities.numChannels; j++ )
+			{
+				tmp.push_back ( 0.0 );
+			}
+			regionprob.push_back ( tmp );
+			Regionen.push_back ( pair<int, Example> ( 0, Example() ) );
+		}
+
+		// Wahrscheinlichkeiten für Regionen bestimmen
+		for ( int x = 0; x < xsize; x++ )
+		{
+			for ( int y = 0; y < ysize; y++ )
+			{
+				for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+				{
+					double val = probabilities.get ( x,y,j );
+					int pos = mask(x,y);
+					Regionen[pos].second.weight+=1.0;
+					Regionen[pos].second.x += x;
+					Regionen[pos].second.y += y;
+					regionprob[pos][j] += val;
+				}
+			}
+		}
+/*
+cout << "regions: " << regionsize << endl;
+cout << "outfeats: " << endl;
+for(int j = 0; j < regionprob.size(); j++)
+{
+	for(int i = 0; i < regionprob[j].size(); i++)
+	{
+		cout << regionprob[j][i] << " ";
+	}
+	cout << endl;
+}
+cout << endl;
+getchar();*/
+
+		// beste Wahrscheinlichkeit je Region wählen
+		for ( int i = 0; i < regionsize; i++ )
+		{
+			Regionen[i].second.x /= ( int ) Regionen[i].second.weight;
+			Regionen[i].second.y /= ( int ) Regionen[i].second.weight;
+			double maxval = 0.0;
+			int maxpos = 0;
+			
+			for ( int j = 0 ; j < ( int ) regionprob[i].size(); j++ )
+			{
+				regionprob[i][j] /= Regionen[i].second.weight;
+				
+				if ( maxval < regionprob[i][j] )
+				{
+					maxval = regionprob[i][j];
+					maxpos = j;
+				}
+				probabilities.set (Regionen[i].second.x,Regionen[i].second.y,regionprob[i][j], j );
+			}
+			
+			Regionen[i].first = maxpos;
+		}
+
+		// Pixel jeder Region labeln
+		for ( int y = 0; y < (int)mask.cols(); y++ )
+		{
+			for ( int x = 0; x < (int)mask.rows(); x++ )
+			{
+				int pos = mask(x,y);
+				segresult.setPixel(x,y,Regionen[pos].first);
+			}
+		}
+	}
+	else
+	{
+
+		PSSImageLevelPrior pss ( 1, 4, 0.2 );
+		pss.setPrior ( fV );
+		pss.postprocess ( segresult, probabilities );
+
+	}
+
+	// Saubermachen:
+	for ( int i = 0; i < ( int ) pce.size(); i++ )
+	{
+		pce[i].second.clean();
+	}
+	pce.clear();
+	delete getFeats;
+	delete writeFeats;
+	delete cSIFT;
+}
+
+void SemSegCsurka2::semanticseg ( CachedExample *ce,
+								NICE::Image & segresult,
+								GenericImage<double> & probabilities )
+{
+
+	Examples regions;
+	NICE::Matrix regionmask;
+	classifyregions ( ce, segresult, probabilities, regions, regionmask );
+	if ( userellocprior || srg != NULL || gcopt !=NULL )
+	{
+		if ( userellocprior )
+			relloc->postprocess ( regions, probabilities );
+
+		if ( srg != NULL )
+			srg->optimizeShape ( regions, regionmask, probabilities );
+
+		if ( gcopt != NULL )
+			gcopt->optimizeImage ( regions, regionmask, probabilities );
+
+		// Pixel jeder Region labeln
+		for ( int y = 0; y < (int)regionmask.cols(); y++ )
+		{
+			for ( int x = 0; x < (int)regionmask.rows(); x++ )
+			{
+				int pos = regionmask(x,y);
+				segresult.setPixel(x,y,regions[pos].first);
+			}
+		}
+	}
+
+#ifndef NOVISUAL
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+// 	showImage(img);
+	for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+	{
+		cout << "klasse: " << j << " " << cn.text ( j ) << endl;
+
+		NICE::Matrix tmp ( probabilities.ysize, probabilities.xsize );
+		double maxval = 0.0;
+		for ( int y = 0; y < probabilities.ysize; y++ )
+			for ( int x = 0; x < probabilities.xsize; x++ )
+			{
+				double val = probabilities.get ( x,y,j );
+				tmp(y, x) = val;
+				maxval = std::max ( val, maxval );
+			}
+
+		NICE::ColorImage imgrgb (probabilities.xsize, probabilities.ysize);
+		ICETools::convertToRGB ( tmp, imgrgb );
+
+		cout << "maxval = " << maxval << " for class " << cn.text ( j ) << endl;
+
+		Show ( ON, imgrgb, cn.text ( j ) );
+		imgrgb.Write ( "tmp.ppm" );
+
+		getchar();
+	}
+#endif
+#endif
+
+}

+ 235 - 0
semseg/SemSegCsurka2.h

@@ -0,0 +1,235 @@
+/** 
+ * @file SemSegCsurka2.h
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich
+ * @date 04/24/2009
+ */
+#ifndef SemSegCsurka2INCLUDE
+#define SemSegCsurka2INCLUDE
+
+#include <objrec/nice.h>
+ 
+#include "SemanticSegmentation.h"
+
+#include "objrec/math/ftransform/PCA.h"
+
+
+#include "objrec/features/localfeatures/LFColorSande.h"
+#include "objrec/features/localfeatures/LFColorWeijer.h"
+#include "objrec/features/localfeatures/LFReadCache.h"
+#include "objrec/features/localfeatures/LFWriteCache.h"
+#include "objrec/cbaselib/VectorFeature.h"
+#include "objrec/features/fpfeatures/SparseVectorFeature.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/segmentation/RegionSegmentationMethod.h"
+#include "objrec/segmentation/RSMeanShift.h"
+#include "objrec/segmentation/RSCache.h"
+
+#include "SemSegTools.h" 
+
+#include "objrec/math/cluster/GMM.h"
+#include "objrec/math/cluster/KMeansOnline.h"
+
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "objrec/classifier/fpclassifier/logisticregression/FPCSMLR.h"
+
+#include "objrec-froehlichexp/semseg/postsegmentation/PSSImageLevelPrior.h"
+#include "objrec-froehlichexp/semseg/postsegmentation/RelativeLocationPrior.h"
+#include "objrec-froehlichexp/semseg/postsegmentation/PPSuperregion.h"
+#include "objrec-froehlichexp/semseg/postsegmentation/PPGraphCut.h"
+
+
+#include <objrec/iclassifier/icgeneric/CSGeneric.h>
+
+/** @brief pixelwise labeling systems */
+
+namespace OBJREC {
+
+class SemSegCsurka2 : public SemanticSegmentation
+{
+
+  protected:
+	  
+	//! for normalization
+	vector<double> vecmin, vecmax;
+	  
+	//! boolean whether to save the cache or not
+	bool save_cache;
+	  
+	//! boolean whether to read the cache or not, if read_cache is false, everything will be trained
+	bool read_cache;
+	
+	//! The cached Data
+	std::string cache;
+	
+	//! The PCA
+	PCA pca;
+	
+	//! using normalization
+	bool norm;
+	
+	//! feature Dimension after PCA
+	int dim;
+	
+	//! Classifier
+	FeaturePoolClassifier *classifier;
+	VecClassifier *vclassifier;
+	
+	//! Configuration File
+	const Config *conf;	
+	
+	ClassNames cn;
+	
+	//! whether to use the colorfeats or not
+	bool usecolorfeats;
+	
+	//! low level Segmentation method
+	RegionSegmentationMethod *seg;
+	
+	//! weight for the gaussimage
+	double sigmaweight;
+	
+	//! Gaussian Mixture
+	GMM *g;
+	
+	//! KMeans
+	KMeansOnline *k;
+	
+	//! use pca or not
+	bool usepca;
+	
+	//! forced recalculation of the pca
+	bool calcpca;
+	
+	//! use highlevel transformation with gmm or not
+	bool usegmm;
+	
+	//! use highlevel transformation with kmeans or not
+	bool usekmeans;
+	
+	int bestclasses;
+	
+	//! how much clusters of the kmeans to use
+	int kmeansfeat;
+	
+	//! use hard assignment or not
+	bool kmeanshard;
+	
+	//! use fisher kernel for bag if visual words
+	bool usefisher;
+	
+	//! forced recalculation of the gmm
+	bool dogmm;
+	
+	//! number of gaussians
+	int gaussians;
+	
+	//! whether to use the relative location features or not
+	bool userellocprior;
+	
+	//! which classifier to use
+	std::string cname;
+	
+	//! use regions segmentation or not
+	bool useregions;
+	
+	//! how many features should be used for training the classifier (relative value between 0 and 1
+	double anteil;
+	
+	//! save steps for faster computing postprocesses
+	bool savesteps;
+	
+	//! the relative location features
+	RelativeLocationPrior *relloc;
+	
+	//! Shape pp
+	PPSuperregion *srg;
+	
+	//! Graph Cut pp
+	PPGraphCut *gcopt;
+	
+	//! smooth high level features or not
+	bool smoothhl;
+	
+	//! sigma for high level smoothing
+	double smoothfactor;
+	
+	/**
+	 * converts the low level features in high level features
+	 * @param ex input and output features
+	 * @param reduce reduce the dataset (1.0 means no reduction)
+	 */
+	void convertLowToHigh(Examples &ex, double reduce = 1.0);
+		
+	/**
+	 * Starts the PCA
+	 * @param ex input features
+	 */
+	void initializePCA(Examples &ex);
+	
+	/**
+	 * using PCA on al input features
+	 * @param ex input features
+	 */
+	void doPCA(Examples &ex);
+	
+	/**
+	 * normalize the features between 0 and 1
+	 * @param ex input features
+	 */
+	void normalize(Examples &ex);
+	
+	
+	/**
+	 * smooth the high level features
+	 * @param ex input features
+	 */
+	void smoothHL(Examples ex);
+	
+  public:
+  
+	/** constructor 
+	  *  @param conf needs a configfile
+	  *  @param md and a MultiDataset (contains images and other things)
+	  */
+	SemSegCsurka2( const Config *conf, const MultiDataset *md );
+      
+	/** simple destructor */
+	virtual ~SemSegCsurka2();
+
+	/** The trainingstep
+	  *  @param md and a MultiDataset (contains images and other things)
+	  */
+	void train ( const MultiDataset *md );
+
+	/** The trainingstep for the postprocess
+	  *  @param md and a MultiDataset (contains images and other things)
+	  */
+	void trainpostprocess( const MultiDataset *md );
+	
+	/** The main procedure. Input: Image, Output: Segmented Image with pixelwise labeles and the probabilities
+	  * @param ce image data
+	  * @param segresult result of the semantic segmentation with a label for each pixel
+	  * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+	  */
+	void semanticseg ( CachedExample *ce, 
+                       NICE::Image & segresult,
+                       GenericImage<double> & probabilities );
+
+    /** this procedure is equal semanticseg, if there is no post process
+	  * @param ce image data
+	  * @param segresult result of the semantic segmentation with a label for each pixel
+	  * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+	  * @param Regionen the output regions
+	  * @param mask the positions of the regions
+	  */
+	void classifyregions ( CachedExample *ce, NICE::Image & segresult, GenericImage<double> & probabilities, Examples &Regionen, NICE::Matrix &mask );
+};
+
+} //namespace
+
+#endif

+ 115 - 0
semseg/SemSegLocal.cpp

@@ -0,0 +1,115 @@
+/** 
+* @file SemSegLocal.cpp
+* @brief semantic segmentation using image patches only
+* @author Erik Rodner
+* @date 05/08/2008
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+
+#include "SemSegLocal.h"
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "objrec/features/fpfeatures/PixelPairFeature.h"
+
+#include "SemSegTools.h" 
+
+using namespace OBJREC;
+
+using namespace std;
+using namespace NICE;
+
+
+SemSegLocal::SemSegLocal( const Config *conf, 
+			  const MultiDataset *md  )
+    : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
+{
+    save_cache = conf->gB("FPCPixel", "save_cache", true );
+    read_cache = conf->gB("FPCPixel", "read_cache", false );
+    cache = conf->gS("FPCPixel", "cache", "fpc.data" );
+    fpc = new FPCRandomForests ( conf, "FPCPixel" );
+    fpc->setMaxClassNo ( classNames->getMaxClassno() );
+
+    
+    if ( read_cache ) {
+	fprintf (stderr, "LocSSimpleFP:: Reading classifier data from %s\n", cache.c_str() );
+	fpc->read ( cache );
+	fprintf (stderr, "LocSSimpleFP:: successfully read\n" );
+    } else {
+	train ( conf, md );
+    }
+}
+	
+void SemSegLocal::train ( const Config *conf, const MultiDataset *md )
+{
+    Examples examples;
+    vector<CachedExample *> imgexamples;
+
+    SemSegTools::collectTrainingExamples ( 
+	conf, 
+	"FPCPixel", // config section for grid settings
+	*((*md)["train"]),
+	*classNames, 
+	examples, 
+	imgexamples );
+
+    assert ( examples.size() > 0 );
+    
+    FeaturePool fp;
+    PixelPairFeature hf (conf);
+    hf.explode ( fp );
+
+    fpc->train ( fp, examples );
+
+    // clean up memory !!
+    for ( vector<CachedExample *>::iterator i = imgexamples.begin();
+		    i != imgexamples.end();
+		    i++ )
+	delete ( *i );
+
+    if ( save_cache ) {
+	fpc->save ( cache );
+    }
+
+    fp.destroy();
+}
+
+
+SemSegLocal::~SemSegLocal()
+{
+    if ( fpc != NULL )
+	delete fpc;
+}
+
+
+void SemSegLocal::semanticseg ( CachedExample *ce, 
+			   // refactor-nice.pl: check this substitution
+			   // old: Image & segresult,
+			   NICE::Image & segresult,
+			   GenericImage<double> & probabilities )
+{
+    // for speed optimization
+    FPCRandomForests *fpcrf = dynamic_cast<FPCRandomForests *> ( fpc );
+    int xsize, ysize;
+    ce->getImageSize ( xsize, ysize );
+    probabilities.reInit ( xsize, ysize, classNames->getMaxClassno()+1, true/*allocMem*/ );
+    segresult.resize(xsize, ysize);
+    
+    Example pce ( ce, 0, 0 );
+    long int offset = 0;
+    for ( int y = 0 ; y < ysize ; y++ ) 
+	for ( int x = 0 ; x < xsize ; x++,offset++ )  
+	{
+	    pce.x = x ; pce.y = y; 
+	    ClassificationResult r = fpcrf->classify ( pce );
+	    segresult.setPixel(x,y,r.classno);
+	    for ( int i = 0 ; i < (int)probabilities.numChannels; i++ )
+		probabilities.data[i][offset] = r.scores[i];
+	}
+}

+ 56 - 0
semseg/SemSegLocal.h

@@ -0,0 +1,56 @@
+/** 
+* @file SemSegLocal.h
+* @brief semantic segmentation using image patches only
+* @author Erik Rodner
+* @date 05/08/2008
+
+*/
+#ifndef SEMSEGLOCALINCLUDE
+#define SEMSEGLOCALINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+  
+#include "objrec/classifier/classifierbase/FeaturePoolClassifier.h"
+#include "SemanticSegmentation.h"
+
+
+namespace OBJREC {
+
+/** abstract interface for pixelwise localization systems */
+class SemSegLocal : public SemanticSegmentation
+{
+
+    protected:
+	bool save_cache;
+	bool read_cache;
+	// refactor-nice.pl: check this substitution
+	// old: string cache;
+	std::string cache;
+	FeaturePoolClassifier *fpc;
+
+    public:
+  
+	/** simple constructor */
+	SemSegLocal( const Config *conf, const MultiDataset *md );
+      
+	/** simple destructor */
+	virtual ~SemSegLocal();
+
+	void train ( const Config *conf, const MultiDataset *md );
+
+	void semanticseg ( CachedExample *ce, 
+			   // refactor-nice.pl: check this substitution
+			   // old: Image & segresult,
+			   NICE::Image & segresult,
+			   GenericImage<double> & probabilities );
+
+};
+
+
+} // namespace
+
+#endif

+ 1440 - 0
semseg/SemSegRegionBased.cpp

@@ -0,0 +1,1440 @@
+#ifdef NICE_USELIB_OPENMP
+#include <omp.h>
+#endif
+
+#include "SemSegRegionBased.h"
+
+#include <iostream>
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "objrec/classifier/fpclassifier/logisticregression/FPCSMLR.h"
+
+#include <objrec/iclassifier/icgeneric/CSGeneric.h>
+#include "objrec/features/fpfeatures/PixelPairFeature.h"
+#include "objrec/classifier/genericClassifierSelection.h"
+
+#include "SemSegTools.h"
+
+#include "objrec/segmentation/RSMeanShift.h"
+#include "objrec/segmentation/RSCache.h"
+#include "objrec/segmentation/RSGraphBased.h"
+
+#include "objrec/baselib/Globals.h"
+
+#include <objrec/cbaselib/VectorFeature.h>
+
+#include "objrec/features/fpfeatures/SparseVectorFeature.h"
+#include "objrec/features/localfeatures/LFColorWeijer.h"
+#include "objrec/features/localfeatures/LFColorSande.h"
+#include "objrec/features/localfeatures/LocalFeatureSift.h"
+#include "objrec/features/localfeatures/LocalFeatureOpponnentSift.h"
+#include "objrec/features/localfeatures/LocalFeatureLFInterface.h"
+#include "objrec/features/localfeatures/LocalFeatureRGBSift.h"
+#include "objrec/features/localfeatures/LFCache.h"
+
+#include "objrec/features/regionfeatures/RFColor.h"
+#include "objrec/features/regionfeatures/RFHoG.h"
+#include "objrec/features/regionfeatures/RFBoV.h"
+#include "objrec/features/regionfeatures/RFBoVCodebook.h"
+#include "objrec/features/regionfeatures/RFCsurka.h"
+
+#include "objrec/iclassifier/codebook/CodebookRandomForest.h"
+
+#include "objrec/math/cluster/GMM.h"
+
+#undef DEMO
+#undef WRITEFEATS
+
+using namespace OBJREC;
+using namespace std;
+using namespace NICE;
+
+#define DEBUG_PRINTS
+
+SemSegRegionBased::SemSegRegionBased(const Config *c, const MultiDataset *md)
+		: SemanticSegmentation(c, &(md->getClassNames("train")))
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased Constructor starts" << endl;
+	#endif
+	conf = c;	
+	
+	save_cache = conf->gB("SemSegRegion", "save_cache", true);
+	read_cache = conf->gB("SemSegRegion", "read_cache", false);
+	classifiercache = conf->gS("SemSegRegion", "cache", "classifier.data");
+	cache = conf->gS("cache", "root", "tmp/");
+	bool colorw = conf->gB("SemSegRegion", "colorw", false);
+	bool bov = conf->gB("SemSegRegion", "bov", false);
+	bool hog = conf->gB("SemSegRegion", "hog", false);
+	bool structf = conf->gB("SemSegRegion", "struct", false);
+	string classifiertype = conf->gS("SemSegRegion", "classifier", "RF");
+	bool usegcopt = conf->gB("SemSegRegion", "gcopt", false);
+	bool bovmoosmann = conf->gB("SemSegRegion", "bovmoosmann", false);
+	bool csurka = conf->gB("SemSegRegion", "csurka", false);
+	
+	if(colorw)
+	{
+		LocalFeature *lfcw = new LFColorWeijer(conf);
+		rfc = new RFColor(conf, lfcw);
+	}
+	else
+	{
+		rfc = NULL;
+	}
+	
+	if(hog)
+	{
+		rfhog = new RFHoG(conf);
+	}
+	else
+	{
+		rfhog = NULL;
+	}
+	
+	if(structf)
+	{
+		rfstruct = new RFStruct(conf);
+	}
+	else
+	{
+		rfstruct = NULL;
+	}
+	
+	LocalFeature *lfcache = NULL;
+	
+	if(bov || bovmoosmann || csurka)
+	{
+		string ftype = conf->gS("BOV", "feature", "sift");
+		
+		siftFeats = NULL;
+		
+		if(ftype == "sift")
+		{
+			siftFeats = new LocalFeatureSift ( conf );
+			lfcache = new LFCache(conf, siftFeats);
+		}
+		
+		if(ftype == "osift")
+		{
+			siftFeats = new LocalFeatureOpponnentSift ( conf );
+			lfcache = new LFCache(conf, siftFeats);
+		}
+		
+		if(ftype == "rsift")
+		{
+			siftFeats = new LocalFeatureRGBSift ( conf );
+			lfcache = new LFCache(conf, siftFeats);
+		}
+		
+		if(ftype == "sande")
+		{
+			LocalFeatureRepresentation *sande = new LFColorSande(conf, "LFColorSandeTrain");
+			siftFeats = new LocalFeatureLFInterface ( conf, sande);
+			
+			LocalFeatureRepresentation *sande2 = new LFColorSande(conf, "LFColorSandeTest");
+			LocalFeature *siftFeats2 = new LocalFeatureLFInterface ( conf, sande2);
+			lfcache = new LFCache(conf, siftFeats2);
+		}
+		
+		if(siftFeats == NULL)
+		{
+			throw "please choose one of the following features für BOV: osift, rsift, sift, sande";
+		}
+	}
+
+	if(csurka)
+	{
+		rfCsurka = new RFCsurka(conf, lfcache);
+	}
+	else
+	{
+		rfCsurka = NULL;
+	}
+
+	if(bov)
+	{
+		rfbov = new RFBoV (conf, lfcache);
+	}
+	else 
+	{
+		rfbov = NULL;
+	}
+	
+	if(bovmoosmann)
+	{
+		rfbovcrdf = new RFBoVCodebook (conf, lfcache);
+	}
+	else
+	{
+		rfbovcrdf = NULL;
+	}
+	
+	// setting classifier
+	fpc = NULL;
+	vclassifier = NULL;
+	
+	if(classifiertype == "RF")
+	{
+		fpc = new FPCRandomForests(conf, "ClassifierForest");
+	}
+	else if(classifiertype == "SMLR")
+	{
+		fpc = new FPCSMLR(conf, "ClassifierSMLR");
+	}
+	else if(classifiertype == "VECC")
+	{
+		vclassifier = CSGeneric::selectVecClassifier ( conf, "vecClassifier" );
+	}
+	else
+	{
+		throw "classifiertype not (yet) supported";
+	}
+	
+	if(fpc != NULL)
+		fpc->setMaxClassNo(classNames->getMaxClassno());
+	else if(vclassifier != NULL)
+		vclassifier->setMaxClassNo(classNames->getMaxClassno());
+
+	cn = md->getClassNames ( "train" );
+	
+	// setting segmentation method
+	RegionSegmentationMethod *tmprsm = new RSMeanShift(conf);
+	rsm = new RSCache ( conf, tmprsm );
+	//rsm = new RSGraphBased(conf);
+
+	// use global optimization (MRF)
+	if(usegcopt)
+		gcopt = new PPGraphCut ( conf );
+	else
+		gcopt = NULL;
+	
+	classifiercache = cache+classifiercache;
+	
+	// read training data or start training
+	if (read_cache)
+	{
+		fprintf(stderr, "SemSegRegion:: Reading classifier data from %s\n", cache.c_str());
+		if(fpc != NULL)
+			fpc->read(classifiercache);
+		else if(vclassifier != NULL)
+			vclassifier->read(classifiercache);
+		
+		if(rfCsurka != NULL)
+		{
+			bool usegmm = conf->gB("Csurka", "usegmm", false);
+			bool usepca = conf->gB("Csurka", "usepca", false);
+			
+			if(usepca || usegmm)
+			{
+				RFCsurka *_rfcsurka = dynamic_cast< RFCsurka * >(rfCsurka);
+				
+				if(usepca)
+				{
+					int pcadim = conf->gI("Csurka", "pcadim", 100);
+					PCA *pca = new PCA(pcadim);
+					string pcadst = cache+"/csurka.pca";
+
+					if (!FileMgt::fileExists(pcadst))
+					{
+						throw(pcadst+" not found");
+					}
+					else
+					{
+						pca->read ( pcadst );
+					}
+					
+					_rfcsurka->setPCA(pca);
+				}
+				
+				if(usegmm)
+				{
+					int gaussians = conf->gI("Csurka", "gaussians", 1024);
+					GMM *g = new GMM( conf, gaussians );
+					string gmmdst = cache+"/csurka.gmm";
+					
+					if ( !g->loadData ( cache+"/gmmSIFT" ) )
+					{
+						throw(gmmdst+" not found");
+					}
+
+					_rfcsurka->setGMM(g);
+				}
+			}
+		}
+		
+		if(rfbov != NULL)
+		{
+			RFBoV *rfbovdyn = dynamic_cast< RFBoV * >(rfbov);
+			
+			int gaussians = conf->gI("SIFTTrain", "gaussians", 512);
+			
+			GMM *g = new GMM( conf, gaussians );
+			PCA *pca = new PCA(100);
+			string pcadst = cache+"/bov.pca";
+	
+			if ( !g->loadData ( cache+"/gmmSIFT" ) || !FileMgt::fileExists(pcadst))
+			{
+				throw("pca or gmm not found");
+			}
+			else
+			{
+				pca->read ( pcadst );
+			}
+			rfbovdyn->setPCA(pca);
+			rfbovdyn->setGMM(g);
+		}
+		
+		fprintf(stderr, "SemSegRegion:: successfully read\n");
+	}
+	else
+	{
+		train(md);
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased Constructor finished" << endl;
+	#endif
+}
+
+SemSegRegionBased::~SemSegRegionBased()
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased Destructor starts" << endl;
+	#endif
+	if (fpc != NULL)
+	{
+		delete fpc;
+	}
+	if(vclassifier != NULL)
+	{
+		delete vclassifier;
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased Destructor finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::train(const MultiDataset *md)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::train starts" << endl;
+	#endif
+	
+	Examples examples;
+	examples.filename = "training";
+	
+	const LabeledSet train = * ( *md ) ["train"];
+	
+	set<int> forbidden_classes;
+	
+	std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
+	if ( forbidden_classes_s == "" )
+	{
+		forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
+	}
+	
+	cn.getSelection ( forbidden_classes_s, forbidden_classes );
+
+	if(gcopt != NULL)
+		gcopt->setClassNo ( cn.numClasses() );
+	
+	LabeledSet::Permutation perm;
+
+	train.getPermutation(perm);
+
+	learnHighLevel(perm);
+
+	//FIXME:Moosmann
+	
+	int imgcounter = 0;
+
+	vector<vector<FeatureType> > feats;
+	// loop over all training images
+
+	for ( LabeledSet::Permutation::const_iterator i = perm.begin();
+			 i != perm.end(); i++,imgcounter++ )
+	{
+		const string fn = i->second->img();
+		Globals::setCurrentImgFN ( fn );
+		cout << fn << endl;
+		NICE::ColorImage cimg(fn);
+		NICE::Matrix mask;
+		RegionGraph rg;
+		rsm->getGraphRepresentation(cimg, mask, rg);
+		
+#ifdef DEMO
+		rsm->visualizeGraphRepresentation(cimg, mask);
+#endif
+
+		// get label
+		const LocalizationResult *locResult = i->second->localization();
+		NICE::Image pixelLabels (cimg.width(), cimg.height());
+		pixelLabels.set(0);
+		locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+		getRegionLabel(mask, rg, pixelLabels);
+		
+		getFeats(cimg, mask, rg, feats);
+		
+//#pragma omp critical
+		for(int i = 0; i < rg.size(); i++)
+		{
+			int classno = rg[i]->getLabel();
+			Example example;
+			example.position = imgcounter;
+			examples.push_back (pair<int, Example> ( classno, example ));
+		}
+//#pragma omp critical
+		if ( gcopt !=NULL )
+			gcopt->trainImage ( rg );
+
+	}
+cout << "train classifier starts" << endl;
+	trainClassifier(feats, examples);
+cout << "train classifier finished" << endl;
+
+	if ( gcopt != NULL )
+		gcopt->finishPP ( cn );
+
+	// clean up
+	/*for(int i = 0; i < (int) examples.size(); i++)
+	{
+		examples[i].second.clean();
+	}*/
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::train finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::getRegionLabel(NICE::Matrix &mask, RegionGraph &rg, NICE::Image &pixelLabels)
+{	
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getRegionLabel starts" << endl;
+	#endif
+	vector<vector<int> > hists;
+	int regionsize = rg.size();
+	int xsize = pixelLabels.width();
+	int ysize = pixelLabels.height();
+
+	for ( int i = 0; i < regionsize; i++ )
+	{
+		vector<int> hist ( cn.numClasses(), 0 );
+		hists.push_back ( hist );
+	}
+
+	for ( int x = 0; x < xsize; x++ )
+	{
+		for ( int y = 0; y < ysize; y++ )
+		{
+			int numb = mask(x,y);
+			hists[numb][pixelLabels.getPixel(x,y)]++;
+		}
+	}
+
+	for ( int i = 0; i < regionsize; i++ )
+	{
+		int maxval = -numeric_limits<int>::max();
+		int smaxval = -numeric_limits<int>::max();
+		int maxpos = -1;
+		int secondpos = -1;
+		for ( int k = 0; k < ( int ) hists[i].size(); k++ )
+		{
+			if ( maxval < hists[i][k] )
+			{
+				secondpos = maxpos;
+				smaxval = maxval;
+				maxval = hists[i][k];
+				maxpos = k;
+			}
+			else
+			{
+				if ( smaxval < hists[i][k] )
+				{
+					smaxval = hists[i][k];
+					secondpos = k;
+				}
+			}
+		}
+
+		// FIXME: das für alle verbotenen Klassen einbauen
+		//if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+
+		if ( cn.text ( maxpos ) == "various" && smaxval > 0)
+			rg[i]->setLabel(secondpos);
+		else
+			rg[i]->setLabel(maxpos);
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getRegionLabel finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::getExample(const vector<vector<FeatureType> > &feats, Examples &examples)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getExample starts" << endl;
+	#endif
+	
+	for(int j = 0; j < (int)feats.size(); j++)
+	{
+		int counter = 0;
+		for(int i = 0; i < (int)feats[0].size(); i++, counter++)
+		{
+			if(examples[counter].second.vec == NULL)
+			{
+				NICE::Vector *vec = new NICE::Vector(feats[j][i].getVec());
+				examples[counter].second.vec = vec;
+			}
+			else
+			{
+				examples[counter].second.vec->append(feats[j][i].getVec());
+			}
+		}
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getExample finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::getFeaturePool( const vector<vector<FeatureType> > &feats, FeaturePool &fp)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getFeaturePool starts" << endl;
+	#endif
+	
+	int olddim = 0;
+	int fulldim = 0;
+	
+	for(int j = 0; j < (int)feats.size(); j++)
+	{
+		fulldim += feats[j][0].getDim();
+	}
+	
+	for(int j = 0; j < (int)feats.size(); j++)
+	{
+		int dimension = feats[j][0].getDim();
+		for ( int i = olddim ; i < olddim+dimension ; i++ )
+		{
+			VectorFeature *f = new VectorFeature ( fulldim );
+			f->feature_index = i;
+			fp.addFeature(f, 1.0 / dimension);
+		}
+		olddim += dimension;
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getFeaturePool finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::trainClassifier(vector<vector<FeatureType> > &feats, Examples & examples)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::trainClassifier starts" << endl;
+	#endif
+	assert (feats.size() > 0);
+	assert (feats[0].size() > 0);
+
+	// delete nonrelevant features
+	for(int i = (int)examples.size()-1; i >= 0; i--)
+	{
+		if(cn.text ( examples[i].first ) == "various")
+		{
+			examples.erase(examples.begin()+i);
+			for(int k = 0; k < (int)feats.size(); k++)
+			{
+				feats[k].erase(feats[k].begin()+i);
+			}
+		}
+	}
+	
+#ifdef WRITEFEATS
+	// mermale in datei schreiben
+	ofstream fout( "trainfeats", ios_base::out );
+	//vector<int> ccounter(cn.getMaxClassno(),0);
+	//int maxv = 100;
+	for(int i = 0; i < (int)examples.size(); i++)
+	{
+		//if(ccounter[examples[i].first]++ < maxv)
+		//{
+			fout << examples[i].first << " ";
+			for(int j = 0; j < (int)feats.size(); j++)
+			{
+				for(int k = 0; k < feats[j][i].getDim(); k++)
+				{
+					fout << feats[j][i].get(k) << " ";
+				}
+			}
+			fout << endl;
+		//}
+	}
+#endif
+	
+	if(fpc != NULL)
+	{
+		FeaturePool fp;
+		getExample(feats, examples);
+		getFeaturePool(feats, fp);
+
+		fpc->train ( fp, examples );
+
+		fp.destroy();
+
+		if(save_cache)
+		{
+			fpc->save(classifiercache);
+		}
+
+//#pragma omp parallel for		
+		for(int i = 0; i < (int)examples.size(); i++)
+		{
+			if(examples[i].second.vec != NULL)
+			{
+				delete examples[i].second.vec;
+				examples[i].second.vec = NULL;
+			}
+		}
+
+	}
+	else if(vclassifier != NULL)
+	{
+		LabeledSetVector lsv;
+		
+//#pragma omp parallel for
+		for(int i = 0; i < (int)feats[0].size(); i++)
+		{
+			NICE::Vector *v = new NICE::Vector(feats[0][i].getVec());
+			for(int j = 1; j < (int)feats.size(); j++)
+			{
+				v->append(feats[j][i].getVec());
+			}
+//#pragma omp critical
+			lsv.add_reference (examples[i].first, v );
+		}
+
+		vclassifier->teach(lsv);
+		vclassifier->finishTeaching();
+		lsv.clear();
+		if(save_cache)
+		{
+			vclassifier->save(classifiercache);
+		}
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::trainClassifier finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::classify(const vector<vector<FeatureType> > &feats, Examples &examples, vector<vector<double> > &probs)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::classify starts" << endl;
+	#endif
+	for(int i = 0; i < (int)feats[0].size(); i++)
+	{
+		Example example;
+		examples.push_back (pair<int, Example> ( -1, example ) );
+	}
+	
+	getExample(feats, examples);
+	
+	int nbcl = classNames->getMaxClassno() + 1;
+	
+	for(int i = 0; i < (int)examples.size(); i++)
+	{
+		vector<double> p;
+		ClassificationResult r;
+			
+		if(fpc != NULL)
+		{
+			r = fpc->classify ( examples[i].second );
+		}
+		else if(vclassifier != NULL)
+		{
+ 			r = vclassifier->classify(*(examples[i].second.vec));
+		}
+	
+		for ( int j = 0 ; j < nbcl; j++ )
+		{
+			p.push_back(r.scores[j]);
+		}
+
+		probs.push_back(p);
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::classify finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::semanticseg(CachedExample *ce, NICE::Image & segresult,	GenericImage<double> & probabilities)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::semanticseg starts" << endl;
+	#endif
+	int xsize, ysize;
+
+	ce->getImageSize ( xsize, ysize );
+
+	probabilities.reInit ( xsize, ysize, classNames->getMaxClassno() +1, true/*allocMem*/ );
+	std::string currentFile = Globals::getCurrentImgFN();
+	NICE::ColorImage cimg(currentFile);
+
+	NICE::Matrix mask;
+	RegionGraph rg;
+	rsm->getGraphRepresentation(cimg, mask, rg);
+#ifdef DEMO
+	rsm->visualizeGraphRepresentation(cimg, mask);
+#endif
+	vector<vector<FeatureType> > feats;
+
+	getFeats(cimg, mask, rg, feats);
+
+#ifdef WRITEFEATS	
+	getRegionLabel(mask, rg, segresult);
+	ofstream fout( "testfeats", ios_base::app );
+
+	for(int i = 0; i < (int)rg.size(); i++)
+	{
+		fout << rg[i]->getLabel() << " ";
+		for(int j = 0; j < (int)feats.size(); j++)
+		{
+			for(int k = 0; k < feats[j][i].getDim(); k++)
+			{
+				fout << feats[j][i].get(k) << " ";
+			}
+		}
+		fout << endl;
+	}
+#endif
+	
+	segresult = NICE::Image(xsize, ysize);
+	segresult.set(0);
+	
+	Examples examples; 
+	
+	vector<vector<double> > probs;
+
+	classify(feats, examples, probs);
+
+	labelRegions(rg, probs);
+
+	if ( gcopt != NULL )
+		gcopt->optimizeImage ( rg, probs );
+
+	labelImage(segresult, mask, rg);
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::semanticseg finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::labelRegions(RegionGraph &rg, vector<vector<double> > &probs)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::labelRegions starts" << endl;
+	#endif
+	for(int i = 0; i < rg.size(); i++)
+	{
+		int bestclass = -1;
+		double bestval = -numeric_limits<int>::max();
+		for(int j = 0; j < (int)probs[i].size(); j++)
+		{
+			if(bestval < probs[i][j])
+			{
+				bestval = probs[i][j];
+				bestclass = j;
+			}
+		}
+		rg[i]->setLabel(bestclass);
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::labelRegions finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::labelImage(NICE::Image &segresult, NICE::Matrix &mask,RegionGraph &rg)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::labelImage starts" << endl;
+	#endif
+	for(int y = 0; y < segresult.height(); y++)
+	{
+		for(int x = 0; x < segresult.width(); x++)
+		{
+			int r = (int)mask(x,y);
+			segresult.setPixel(x,y,rg[r]->getLabel());
+		}
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::labelImage finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::getFeats(const NICE::ColorImage &cimg, const NICE::Matrix &mask, const RegionGraph &rg, vector<vector< FeatureType> > &feats) const
+{	
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getFeats starts" << endl;
+	#endif
+	string fn = Globals::getCurrentImgFN();	
+	NICE::Image img(fn);
+	int featnb = 0;
+
+	const int rgcount = rg.size();
+	if(rfc != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+
+		VVector features;
+		rfc->extractRGB ( cimg, rg, mask, features );
+		
+		assert((int)features.size() == rgcount);
+		
+		for(int j = 0; j < (int)features.size(); j++)
+		{
+			feats[featnb].push_back(FeatureType(features[j]));
+		}
+#ifdef DEMO
+		LFColorWeijer lfc(conf);
+		lfc.visualizeFeatures (cimg);
+#endif
+		
+		featnb++;
+	}
+
+	if(rfbov != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+
+		VVector features;
+		rfbov->extractRGB ( cimg, rg, mask, features );
+		
+		assert((int)features.size() == rgcount);
+		
+		for(int j = 0; j < (int)features.size(); j++)
+		{
+			feats[featnb].push_back(FeatureType(features[j]));
+		}
+		
+		featnb++;
+	}
+
+	if(rfhog != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+
+		VVector features;
+
+		rfhog->extractRGB ( cimg, rg, mask, features );
+
+		assert((int)features.size() == rgcount);
+
+		for(int j = 0; j < (int)features.size(); j++)
+		{
+			feats[featnb].push_back(FeatureType(features[j]));
+		}
+
+		featnb++;
+	}
+
+	if(rfstruct != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+		
+		VVector features;
+		rfstruct->extractRGB ( cimg, rg, mask, features );
+		
+		for(int j = 0; j < (int)features.size(); j++)
+		{
+			feats[featnb].push_back(FeatureType(features[j]));
+		}
+		
+		featnb++;
+	}
+
+	if(rfbovcrdf != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+
+		VVector features;
+		rfbovcrdf->extractRGB ( cimg, rg, mask, features );
+
+		assert((int)features.size() == rgcount);
+		
+		for(int j = 0; j < (int)features.size(); j++)
+		{
+			feats[featnb].push_back(FeatureType(features[j]));
+		}
+		
+		featnb++;
+	}
+
+	if(rfCsurka != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+
+		VVector features;
+
+		rfCsurka->extractRGB ( cimg, rg, mask, features );
+
+		assert((int)features.size() == rgcount);
+
+		for(int j = 0; j < (int)features.size(); j++)
+		{
+			feats[featnb].push_back(FeatureType(features[j]));
+		}
+
+		featnb++;
+
+	}
+	
+	/* Dummy for new features:
+	if(siftFeats != NULL)
+	{
+		if((int)feats.size() <= featnb)
+		{
+			vector<FeatureType> ftv;
+			feats.push_back(ftv);
+		}
+			
+		featnb++;
+	}
+	*/
+	
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::getFeats finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::computeLF(LabeledSet::Permutation perm, VVector &feats, vector<int> &label, Examples &examples, int mode)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::computeLF starts" << endl;
+	#endif
+	string sscales = conf->gS("SIFTTrain", "scales", "1+2.0+3.0");
+	int grid = conf->gI("SIFTTrain", "grid", 20);
+	double fraction = conf->gD("SIFTTrain", "fraction", 1.0);
+
+	set<int> forbidden_classes;
+
+	std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
+	if ( forbidden_classes_s == "" )
+	{
+		forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
+	}
+	cn.getSelection ( forbidden_classes_s, forbidden_classes );
+	cerr << "forbidden: " << forbidden_classes_s << endl;
+				
+	vector<double> scales;
+	string::size_type pos = 0;
+	string::size_type oldpos = 0;
+	while(pos != string::npos)
+	{
+		pos = sscales.find("+", oldpos);
+		string val;
+		if(pos == string::npos)
+			val = sscales.substr(oldpos);
+		else
+			val = sscales.substr(oldpos, pos-oldpos);
+		double d = atof(val.c_str());
+		scales.push_back(d);
+		oldpos = pos+1;
+	}
+
+	int fsize = 0;
+	
+	string save = cache+"/siftTRAIN.dat";
+	string savep = cache +"/siftPostions.dat";
+
+	if(!FileMgt::fileExists(save) || !FileMgt::fileExists(savep))
+	{
+//FIXME: entfernen
+//		vector<int> counter(9,0);
+		for ( LabeledSet::Permutation::const_iterator i = perm.begin();
+					i != perm.end(); i++ )
+		{
+			const string fn = i->second->img();
+			Globals::setCurrentImgFN ( fn );
+
+			NICE::Image img(fn);
+			NICE::ColorImage cimg(fn);
+			VVector features;
+			VVector positions;
+
+			int x0 = grid/2;
+			for(int y = 0; y < (int)img.height(); y+=grid)
+			{
+				for(int x = x0; x < (int)img.width(); x+=grid)
+				{
+					for(int s = 0; s < (int)scales.size(); s++)
+					{
+						double r = (double)rand()/(double)RAND_MAX;
+						if( r < fraction)
+						{
+							fsize++;
+							NICE::Vector vec(3);
+							vec[0] = x;
+							vec[1] = y;
+							vec[2] = scales[s];
+							positions.push_back(vec);
+						}
+					}
+				}
+				if(x0 == 0)
+				{
+					x0 = grid/2;
+				}
+				else
+				{
+					x0 = 0;
+				}
+			}
+
+			siftFeats->getDescriptors(cimg, positions, features);
+
+			assert(positions.size() == features.size());
+			
+			const LocalizationResult *locResult = i->second->localization();
+			NICE::Image pixelLabels (cimg.width(), cimg.height());
+			pixelLabels.set(0);
+			locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+			for(int i = 0; i < (int)features.size(); i++)
+			{
+				int classno = pixelLabels(positions[i][0],positions[i][1]);
+//				if ( cn.text ( classno ) == "various")
+//					continue;
+
+				if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+					continue;
+
+//				counter[classno]++;
+				label.push_back(classno);
+				feats.push_back(features[i]);
+			}
+			assert(label.size() == feats.size());
+		}
+/*		cout << "samples for class: " << endl;
+		for(int i = 0; i < 9; i++)
+		{
+			cout << i << ": " << counter[i] << endl;
+		}
+*/
+		feats.save(save,1);
+		ofstream lout(savep.c_str(),ios_base::out);
+		for(uint i = 0; i < label.size(); i++)
+		{
+			lout << label[i] << " ";
+		}
+		lout.close();
+	}
+	else
+	{
+		feats.read(save,1);
+
+		ifstream lin(savep.c_str(), ios_base::in);
+		label.clear();
+		for(int i = 0; i < (int)feats.size(); i++)
+		{
+			int l;
+			lin >> l;
+			label.push_back(l);
+		}		
+	}
+	
+	if(mode == 1)
+	{
+		convertVVectorToExamples(feats,examples, label);
+	}
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::computeLF finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::learnHighLevel(LabeledSet::Permutation perm)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::learnHighLevel starts" << endl;
+	#endif
+	srand ( time (NULL) );
+	
+	if(rfbov != NULL || rfbovcrdf != NULL || rfCsurka != NULL)
+	{		
+		if(rfbov != NULL)
+		{
+			RFBoV *rfbovdyn = dynamic_cast< RFBoV * >(rfbov);
+			
+			int gaussians = conf->gI("SIFTTrain", "gaussians", 512);
+			int pcadim = conf->gI("SIFTTrain", "pcadim", 50);
+			
+			GMM *g = new GMM( conf, gaussians );
+			PCA *pca = new PCA(pcadim);
+			string pcadst = cache+"/pca.txt";
+			
+			if ( !g->loadData ( cache+"/gmmSIFT" ) || !FileMgt::fileExists( pcadst) )
+			{
+				VVector feats;
+				vector<int> label;
+			
+				Examples ex;
+				
+				computeLF(perm, feats, label, ex, 0);
+				
+				assert(feats.size() > 0);
+				initializePCA(feats, *pca, pcadim, pcadst);
+				
+				transformFeats(feats, *pca);
+				cout << "nb of feats for learning gmm: " << feats.size() << endl;
+				g->computeMixture(feats);
+				
+				if ( save_cache )
+					g->saveData ( cache+"/gmmSIFT" );			
+			}
+			else
+			{
+				pca->read ( pcadst );
+			}
+
+			rfbovdyn->setPCA(pca);
+			rfbovdyn->setGMM(g);
+		}
+		
+		if(rfbovcrdf != NULL || rfCsurka != NULL)
+		{
+			Examples examples;
+			VVector feats;
+			vector<int> label;
+
+			computeLF(perm, feats, label, examples , 1);
+
+			FeaturePool fp;
+			FeaturePool fpsparse;
+
+			int dimension = examples[0].second.vec->size();
+	
+			for ( int i = 0 ; i < dimension ; i++ )
+			{
+				VectorFeature *f = new VectorFeature ( dimension, i );
+				fp.addFeature(f, 1.0 / dimension);
+
+				SparseVectorFeature *fs = new SparseVectorFeature ( dimension, i);
+				//fs->feature_index = i;
+				
+				fpsparse.addFeature(fs, 1.0 / dimension);
+			}
+
+			if(rfbovcrdf != NULL)
+			{
+				RFBoVCodebook *rfbovdyn = dynamic_cast< RFBoVCodebook * >(rfbovcrdf);
+				
+				int maxDepth = conf->gI("BoVMoosmann", "maxdepth", 10);
+				int csize = conf->gI("BoVMoosmann", "codebooksize", 1024);
+				
+				CodebookRandomForest *crdf = new CodebookRandomForest( maxDepth, csize );
+				
+				//RF anlernen
+				FPCRandomForests *fpcrfmoos = new FPCRandomForests(conf, "MoosForest");
+
+				fpcrfmoos->train(fp, examples);
+					
+				crdf->setClusterForest(fpcrfmoos);
+			
+				for(int i = 0; i < (int)examples.size(); i++)
+				{
+					if(examples[i].second.vec != NULL)
+					{
+						delete examples[i].second.vec;
+						examples[i].second.vec = NULL;
+					}
+				}
+				rfbovdyn->setCodebook(crdf);
+			}
+
+			if(rfCsurka != NULL)
+			{
+
+				bool usegmm = conf->gB("Csurka", "usegmm", false);
+				bool usepca = conf->gB("Csurka", "usepca", false);
+			
+				PCA *pca = NULL;
+				GMM *g = NULL;
+				
+				string classifierdst = cache+"/csurka.";
+				
+				if(usepca || usegmm)
+				{
+
+					RFCsurka *_rfcsurka = dynamic_cast< RFCsurka * >(rfCsurka);
+				
+					bool create = false;
+					string gmmdst = cache+"/csurka.gmm";
+					string pcadst = cache+"/csurka.pca";
+
+					int pcadim = conf->gI("Csurka", "pcadim", 100);
+				
+					if(usepca)
+					{
+						pca = new PCA(pcadim);
+					
+						if (!FileMgt::fileExists(pcadst))
+						{
+							create = true;
+						}
+						else
+						{
+							pca->read ( pcadst );
+						}
+					}
+
+					if(usegmm)
+					{
+						int gaussians = conf->gI("Csurka", "gaussians", 1024);
+						g = new GMM( conf, gaussians );
+					
+						if ( !g->loadData ( gmmdst ) )
+						{
+							create = true;
+						}				
+					}
+
+					if(create)
+					{
+						if(usepca)
+						{
+							convertExamplesToVVector(feats,examples, label);
+							initializePCA(feats, *pca, pcadim, pcadst);
+							transformFeats(feats, *pca);
+							convertVVectorToExamples(feats,examples, label);
+						}
+						
+						if(usegmm)
+						{							
+							g->computeMixture(examples);
+							if ( save_cache )
+								g->saveData ( gmmdst );	
+						}
+					}
+
+				
+					if(usepca)
+						_rfcsurka->setPCA(pca);
+				
+					
+					if(usegmm)
+						_rfcsurka->setGMM(g);
+
+				}
+				
+				
+				string classifiertype = conf->gS("Csurka", "classifier", "SMLR");
+				FeaturePoolClassifier *fpcrfCs = NULL;
+				VecClassifier *vecClassifier = NULL;
+				
+				if(classifiertype == "SMLR")
+				{
+					fpcrfCs = new FPCSMLR(conf, "CsurkaSMLR");
+					classifierdst += "smlr";
+				}
+				else if(classifiertype == "RF")
+				{
+					fpcrfCs = new FPCRandomForests(conf, "CsurkaForest");
+					classifierdst += "rf";
+				}
+				else
+				{
+					vecClassifier = GenericClassifierSelection::selectVecClassifier(conf, classifiertype); 
+					classifierdst += "other";
+				}
+
+				RFCsurka *rfcsurka = dynamic_cast< RFCsurka * >(rfCsurka);
+
+				if(usepca)
+				{
+					assert(examples.size() > 0);
+					if((int)examples[0].second.vec->size() != pca->getTargetDim())
+					{
+						for(int i = 0; i < (int)examples.size(); ++i)
+						{
+							*examples[i].second.vec = pca->getFeatureVector ( *examples[i].second.vec, true );
+						}
+					}
+				}
+
+								
+				if ( !FileMgt::fileExists( classifierdst) )
+				{
+					if(usegmm)
+					{
+						if(classifiertype == "SMLR")
+						{
+							for(int i = 0; i < (int)examples.size(); ++i)
+							{
+								examples[i].second.svec = new SparseVector();
+								g->getProbs(*examples[i].second.vec, *examples[i].second.svec);
+								delete examples[i].second.vec;
+								examples[i].second.vec = NULL;
+							}
+						}
+						else
+						{
+							for(int i = 0; i < (int)examples.size(); ++i)
+							{
+								g->getProbs(*examples[i].second.vec, *examples[i].second.vec);
+							}
+						}
+						if(fpcrfCs != NULL)
+						{
+						  fpcrfCs->train(fpsparse, examples);
+						}
+						else
+						{
+						  LabeledSetVector lvec;
+						  convertExamplesToLSet(examples, lvec);
+						  vecClassifier->teach(lvec);
+						  convertLSetToExamples(examples, lvec);
+						  vecClassifier->finishTeaching();
+						}
+					}
+					else
+					{
+						if(fpcrfCs != NULL)
+						{
+						  fpcrfCs->train(fp, examples);
+						}
+						else
+						{
+						  LabeledSetVector lvec;
+						  convertExamplesToLSet(examples, lvec);
+						  vecClassifier->teach(lvec);
+						  convertLSetToExamples(examples, lvec);
+						  vecClassifier->finishTeaching();
+						}
+					}
+
+					if(fpcrfCs != NULL)
+					{
+						fpcrfCs->setMaxClassNo(classNames->getMaxClassno());
+						fpcrfCs->save ( classifierdst );
+					}
+					else
+					{
+						vecClassifier->setMaxClassNo(classNames->getMaxClassno());
+						vecClassifier->save(classifierdst);
+					}
+	
+				}
+				else
+				{
+					if(fpcrfCs != NULL)
+					{
+						fpcrfCs->setMaxClassNo(classNames->getMaxClassno());
+						fpcrfCs->read ( classifierdst );
+					}
+					else
+					{
+						vecClassifier->setMaxClassNo(classNames->getMaxClassno());
+						vecClassifier->read(classifierdst);
+					}
+
+					
+				}
+	
+				if(fpcrfCs != NULL)
+				{
+					rfcsurka->setClassifier(fpcrfCs);
+				}
+				else
+				{
+					rfcsurka->setClassifier(vecClassifier);
+				}
+			}
+			fp.destroy();
+			for(int i = 0; i < (int)examples.size(); i++)
+			{
+				if(examples[i].second.vec != NULL)
+				{
+					delete examples[i].second.vec;
+					examples[i].second.vec = NULL;
+				}
+			}
+		}
+	}
+	#ifdef DEBUG_PRINTS
+	cerr << "SemSegRegionBased::learnHighLevel finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::transformFeats(VVector &feats, PCA &pca)
+{
+	#ifdef DEBUG_PRINTS
+	cerr << "SemSegRegionBased::transformFeats starts"<< endl;
+	#endif
+	for(int i = 0; i < (int)feats.size(); i++)
+	{
+		feats[i] = pca.getFeatureVector ( feats[i], true );
+	}
+	#ifdef DEBUG_PRINTS
+	cerr << "SemSegRegionBased::transformFeats finished" << endl;
+	#endif
+}
+
+void SemSegRegionBased::initializePCA ( const VVector &feats, PCA &pca, int dim, string &fn )
+{
+	#ifdef DEBUG_PRINTS
+	cerr << "SemSegRegionBased::initializePCA starts"<< endl;
+	#endif
+	pca = PCA(dim);
+
+	if (!FileMgt::fileExists(fn))
+	{
+		srand ( time ( NULL ) );
+
+		int featsize = (int)feats.size();
+		int maxfeatures = std::min ( dim*20, featsize );
+
+		NICE::Matrix features ( maxfeatures, (int)feats[0].size() );
+
+		for ( int i = 0; i < maxfeatures; i++ )
+		{
+			int k = rand() % featsize;
+
+			int vsize = (int)feats[k].size();
+			for(int j = 0; j < vsize; j++)
+			{
+				features(i,j) = feats[k][j];
+			}
+		}
+		pca.calculateBasis ( features, dim, 0 );
+
+		if ( save_cache )
+			pca.save ( fn );
+
+	}
+	else
+	{
+		pca.read ( fn );
+	}
+	#ifdef DEBUG_PRINTS
+	cerr << "SemSegRegionBased::initializePCA finished"<< endl;
+	#endif
+}

+ 190 - 0
semseg/SemSegRegionBased.h

@@ -0,0 +1,190 @@
+/**
+* @file SemSegRegionBased.h
+* @brief new semantic segmentation method using regions
+* @author Björn Fröhlich
+* @date 01/29/2010
+*/
+#ifndef SemSegRegionBasedINCLUDE
+#define SemSegRegionBasedINCLUDE
+
+#include "SemanticSegmentation.h"
+
+#include "objrec/classifier/classifierbase/FeaturePoolClassifier.h"
+#include "objrec/classifier/classifierbase/VecClassifier.h"
+
+#include "objrec/segmentation/RegionSegmentationMethod.h"
+
+#include "objrec/math/mathbase/Featuretype.h"
+#include "objrec/features/regionfeatures/RegionFeatures.h"
+#include "objrec/features/localfeatures/LocalFeature.h"
+
+#include "objrec/math/ftransform/PCA.h"
+
+#include "objrec-froehlichexp/semseg/postsegmentation/PPGraphCut.h"
+
+namespace OBJREC
+{
+	class SemSegRegionBased : public SemanticSegmentation
+	{
+		protected:
+	
+			//! destination for saving intermediate steps
+			bool save_cache, read_cache;
+			std::string cache;
+			string classifiercache;
+			
+			//! used ClassNames
+			ClassNames cn;
+
+			//! Classifier
+			VecClassifier *vclassifier;
+			FeaturePoolClassifier *fpc;
+
+			//! Configuration File
+			const Config *conf;
+			
+			//! Segmentation Method
+			RegionSegmentationMethod *rsm;	
+			
+			//! using color Weijer features or not
+			RegionFeatures *rfc;
+			
+			//! using HoGFeatures or not
+			RegionFeatures *rfhog;
+			
+			//! using BoV or not
+			RegionFeatures *rfbov;
+			
+			//! Moosmann Codebook (alternative BoV approach)
+			RegionFeatures *rfbovcrdf;
+			
+			//! old method like used in Csurka
+			RegionFeatures *rfCsurka;
+			
+			//! features for BoV
+			LocalFeature *siftFeats;
+			
+			//! using structure feature
+			RegionFeatures *rfstruct;
+			
+			//! MRF optimization
+			PPGraphCut *gcopt;
+
+		public:
+			/** constructor
+			 *  @param conf needs a configfile
+			 *  @param md and a MultiDataset (contains images and other things)
+			 */
+			SemSegRegionBased(const Config *c, const MultiDataset *md);
+
+			/** simple destructor */
+			virtual ~SemSegRegionBased();
+
+			/** The trainingstep
+			 *  @param md and a MultiDataset (contains images and other things)
+			 */
+			void train(const MultiDataset *md);
+
+			/** The main procedure. Input: Image, Output: Segmented Image with pixelwise labeles and the probabilities
+			 * @param ce image data
+			 * @param segresult result of the semantic segmentation with a label for each pixel
+			 * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+			 */
+			void semanticseg(CachedExample *ce, NICE::Image & segresult,  GenericImage<double> & probabilities);
+			/**
+			 * get all features for an Image and save them in Examples
+			 * @param cimg input image
+			 * @param mask region mask
+			 * @param rg region graph
+			 * @param feats output features
+			 */
+			void getFeats(const NICE::ColorImage &cimg, const NICE::Matrix &mask, const RegionGraph &rg, vector<vector< FeatureType> > &feats) const;
+
+			/**
+			 * computes or reads features and corresponding labels for learnHighLevel()
+			 * @param perm input permutation
+			 * @param feats output features
+			 * @param label output label
+			 * @param examples output examples (including label)
+			 * @param mode mode 1 for examples, mode 0 for VVector
+			 */
+			void computeLF(LabeledSet::Permutation perm, VVector &feats, vector<int> &label, Examples &examples, int mode);
+			
+			/**
+			 * Computes HighLevel Codebooks (i.e. GMM or PCA) if necessary
+			 * @param perm training examples
+			 */
+			void learnHighLevel(LabeledSet::Permutation perm);
+
+			/**
+			 * trains the classifier
+			 * @param feats features
+			 */
+			void trainClassifier(vector<vector<FeatureType> > &feats, Examples &examples);
+			
+			/**
+			 * Convert features into examples
+			 * @param feats input features
+			 * @param examples features as examples
+			 */
+			void getExample(const vector<vector<FeatureType> > &feats, Examples &examples);
+			
+			/**
+			 * create featurepool depending on used features
+			 * @param feats input features
+			 * @param fp feature pool
+			 */
+			void getFeaturePool( const vector<vector<FeatureType> > &feats, FeaturePool &fp);
+
+			/**
+			 * classify the given features
+			 * @param feats input features
+			 * @param examples examples
+			 * @param probs probability for each region
+			 */
+			void classify(const vector<vector<FeatureType> > &feats, Examples &examples, vector<vector<double> > &probs);
+			
+			/**
+			 * set the label of each region the to most probable class
+			 * @param rg 
+			 * @param probs 
+			 */
+			void labelRegions(RegionGraph &rg, vector<vector<double> > &probs);
+			
+			/**
+			 * set label of each pixel to label of corresponding region
+			 * @param segresult result image
+			 * @param mask region mask
+			 * @param rg region graph
+			 */
+			void labelImage(NICE::Image &segresult, NICE::Matrix &mask,RegionGraph &rg);
+			
+			/**
+			 * get the label for each region from the groundtruth for learning step and save them in rg
+			 * @param mask region mask 
+			 * @param rg region graph
+			 * @param pixelLabels Groundtruth images
+			 */
+			void getRegionLabel(NICE::Matrix &mask, RegionGraph &rg, NICE::Image &pixelLabels);
+			
+			/**
+			 * train pca
+			 * @param feats input features
+			 * @param pca pca
+			 * @param dim new dimension
+			 * @param fn destination filename
+			 */
+			void initializePCA ( const VVector &feats, PCA &pca, int dim, string &fn );
+			
+			/**
+			 * transform features using a given pca
+			 * @param feats input and output features
+			 * @param pca 
+			 */
+			void transformFeats(VVector &feats, PCA &pca);
+
+	};
+
+} // namespace
+
+#endif

+ 233 - 0
semseg/SemSegSTF.cpp

@@ -0,0 +1,233 @@
+/** 
+* @file SemSegSTF.cpp
+* @brief Localization system
+* @author Erik Rodner
+* @date 02/11/2008
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+
+#include "SemSegSTF.h"
+#include "objrec/baselib/Globals.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/ProgressBar.h"
+#include "objrec/baselib/StringTools.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/cbaselib/PascalResults.h"
+
+#include "objrec/features/fpfeatures/PixelPairFeature.h"
+#include "objrec/features/fpfeatures/SemanticFeature.h"
+
+#include "objrec/features/fpfeatures/FIGradients.h"
+
+#include "FIShotton.h"
+
+#include "SemSegTools.h"
+
+using namespace OBJREC;
+
+using namespace std;
+using namespace NICE;
+
+
+
+SemSegSTF::SemSegSTF( const Config *conf, 
+		    const MultiDataset *md )
+    : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
+{
+    use_semantic_features = conf->gB("bost", "use_semantic_features", true );
+    use_pixelpair_features = conf->gB("bost", "use_pixelpair_features", true );
+    subsamplex = conf->gI("bost", "subsamplex", 5);
+    subsampley = conf->gI("bost", "subsampley", 5);
+    numClasses = md->getClassNames("train").numClasses();
+
+    read_pixel_cache = conf->gB("FPCPixel", "read_cache", false );
+    cachepixel = conf->gS("FPCPixel", "cache", "fpc.data" );
+
+    read_seg_cache = conf->gB("FPCSeg", "read_cache", true );
+    cacheseg = conf->gS("FPCSeg", "cache", "segforest.data" );
+
+    Examples examples;
+    vector<CachedExample *> imgexamples;
+
+    fpcPixel = new FPCRandomForests ( conf, "FPCPixel" );
+    fpcPixel->setMaxClassNo ( classNames->getMaxClassno() );
+    if ( !read_pixel_cache || !read_seg_cache ) 
+    {
+	// Generate Positioned Examples
+	SemSegTools::collectTrainingExamples ( conf, "FPCPixel", *((*md)["train"]), *classNames, 
+	    examples, imgexamples );
+    }
+
+    if ( ! read_pixel_cache ) 
+    {	
+	///////////////////////////////////
+	// Train Single Pixel Classifier
+	//////////////////////////////////
+
+	FeaturePool fp;
+	for ( vector<CachedExample *>::const_iterator k = imgexamples.begin(); 
+			k != imgexamples.end();
+			k++ )
+		fillCachePixel (*k);
+    
+	PixelPairFeature hf (conf);
+	hf.explode ( fp );
+
+	fpcPixel->train ( fp, examples );
+	fpcPixel->save ( cachepixel );
+
+	fp.destroy();
+    } else {
+	fprintf (stderr, "SemSegSTF:: Reading pixel classifier data from %s\n", cachepixel.c_str() );
+	fpcPixel->read ( cachepixel );
+    }
+    
+    fpcSeg = new FPCRandomForests ( conf, "FPCSeg" );
+    fpcSeg->setMaxClassNo ( classNames->getMaxClassno() );
+    maxdepthSegmentationForest = conf->gI("bost", "maxdepth", 5);
+    maxdepthSegmentationForestScores = conf->gI("bost", "maxdepth_scores", 9999);
+
+    if ( ! read_seg_cache ) 
+    {
+	///////////////////////////////////
+	// Train Segmentation Forest
+	//////////////////////////////////
+
+	fprintf (stderr, "Calculating Prior Statistics\n");
+	ProgressBar pbseg ("Calculating Prior Statistics");
+	pbseg.show();
+	for ( vector<CachedExample *>::const_iterator k = imgexamples.begin(); 
+			    k != imgexamples.end();
+			    k++ )
+	{
+	    pbseg.update ( imgexamples.size() );
+	    fillCacheSegmentation ( *k );
+	}
+	pbseg.hide();
+	
+	FeaturePool fp;
+
+	if ( use_semantic_features )
+	{
+	    set<int> classnos;
+	    classNames->getSelection ( conf->gS("FPCSeg", "train_selection")
+		, classnos );
+	    SemanticFeature sf ( conf, &classnos );
+	    sf.explode ( fp );
+	}
+
+	fprintf (stderr, "Training Segmentation Forest\n");
+
+	fpcSeg->train ( fp, examples );
+	fpcSeg->save ( cacheseg );
+
+	// clean up memory !!
+	for ( vector<CachedExample *>::iterator i = imgexamples.begin();
+	    	    i != imgexamples.end();
+		    i++ )
+	    delete ( *i );
+
+	fp.destroy();
+	
+    } else {
+	fprintf (stderr, "SemSegSTF:: Reading region classifier data from %s\n", cacheseg.c_str() );
+	fpcSeg->read ( cacheseg );
+	fprintf (stderr, "SemSegSTF:: successfully read\n" );
+    }
+}
+
+SemSegSTF::~SemSegSTF()
+{
+}
+
+void SemSegSTF::fillCacheSegmentation ( CachedExample *ce )
+{
+    FIShotton::buildSemanticMap ( ce,
+		      fpcPixel,
+		      subsamplex,
+		      subsampley, 
+		      numClasses );
+}
+
+void SemSegSTF::fillCachePixel ( CachedExample *ce )
+{
+}
+
+void SemSegSTF::semanticseg ( CachedExample *ce, 
+			   NICE::Image & segresult,
+			   GenericImage<double> & probabilities )
+{
+    int xsize;
+    int ysize;
+    ce->getImageSize ( xsize, ysize );
+    
+    int numClasses = classNames->numClasses();
+    
+    fillCachePixel ( ce );
+    fillCacheSegmentation ( ce );
+
+    fprintf (stderr, "BoST classification !\n");
+
+    Example pce ( ce, 0, 0 );
+
+    int xsize_s = xsize / subsamplex;
+    int ysize_s = ysize / subsampley;
+    ClassificationResult *results = new ClassificationResult [xsize_s*ysize_s];
+
+    /** classify each pixel of the image */
+    FullVector prior ( classNames->getMaxClassno() );
+    
+    probabilities.reInit ( xsize_s, ysize_s, numClasses, true );
+    probabilities.setAll ( 0 );
+
+    long offset_s = 0;
+    for ( int ys = 0 ; ys < ysize_s ; ys ++ ) 
+	for ( int xs = 0 ; xs < xsize_s ; xs++,offset_s++ )  
+	{
+	    int x = xs * subsamplex;
+	    int y = ys * subsampley;
+	    pce.x = x ; pce.y = y ; 
+	    results[offset_s] = fpcSeg->classify ( pce );
+
+	    for ( int i = 0 ; i < results[offset_s].scores.size(); i++ )
+		probabilities.data[i][offset_s] = results[offset_s].scores[i];
+
+	    /*
+	    if ( imagePriorMethod != IMAGE_PRIOR_NONE ) 
+		prior.add ( results[offset_s].scores );
+	    */
+	}
+
+   
+    fprintf (stderr, "BoST classification ready\n");
+
+    /** save results */
+    segresult.resize(xsize_s, ysize_s); 
+    segresult.set( classNames->classno("various") );
+
+    long int offset = 0;
+    for ( int y = 0 ; y < ysize_s ; y++ ) 
+	for ( int x = 0 ; x < xsize_s ; x++,offset++ )  
+	{
+	    double maxvalue = - numeric_limits<double>::max();
+	    int maxindex = 0;
+	    for ( int i = 0 ; i < (int)probabilities.numChannels; i++ )
+		if ( probabilities.data[i][offset] > maxvalue )
+		{
+		    maxindex = i;
+		    maxvalue = probabilities.data[i][offset];
+		}
+
+	    segresult.setPixel(x,y,maxindex);
+	}
+
+}

+ 98 - 0
semseg/SemSegSTF.h

@@ -0,0 +1,98 @@
+/** 
+* @file SemSegSTF.h
+* @brief Localization system
+* @author Erik Rodner
+* @date 02/11/2008
+
+*/
+#ifndef SemSegSTFINCLUDE
+#define SemSegSTFINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+ 
+#include "SemanticSegmentation.h"
+#include "objrec/classifier/fpclassifier/randomforest/DecisionNode.h"
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+
+namespace OBJREC {
+
+/** Localization system */
+class SemSegSTF : public SemanticSegmentation
+{
+    protected:
+ 
+	int numClasses;
+	int maxdepthSegmentationForest;
+	int maxdepthSegmentationForestScores;
+
+	bool use_semantic_features;
+	bool use_pixelpair_features;
+
+	int subsamplex;
+	int subsampley;
+
+	bool read_seg_cache;
+	bool read_pixel_cache;
+	// refactor-nice.pl: check this substitution
+	// old: string cacheseg;
+	std::string cacheseg;
+	// refactor-nice.pl: check this substitution
+	// old: string cachepixel;
+	std::string cachepixel;
+
+	// refactor-nice.pl: check this substitution
+	// old: string resultsdatadir;
+	std::string resultsdatadir;
+
+	double alphaDetectionPrior;
+	double alphaImagePrior;
+	
+	int imagePriorMethod;
+	int priorK;
+
+	int detectionPriorMethod;
+
+	/** 
+	    assign all pixels with
+	    normalized entropy $E/log(n)$ above
+	    this threshold to the
+	    background class 
+	*/
+	double entropyThreshold;
+
+	int backgroundModelType;
+
+	map<DecisionNode *, pair<long, int> > index;
+	FPCRandomForests *fpcSeg;
+	FPCRandomForests *fpcPixel;
+
+	map<string, LocalizationResult *> detresults;
+
+	void fillCachePixel ( CachedExample *ce );
+	void fillCacheSegmentation ( CachedExample *ce );
+
+    public:
+	/** simple constructor */
+	SemSegSTF( const Config *conf,
+  	              const MultiDataset *md );
+      
+	/** simple destructor */
+	virtual ~SemSegSTF();
+
+	void semanticseg ( CachedExample *ce, 
+			   // refactor-nice.pl: check this substitution
+			   // old: Image & segresult,
+			   NICE::Image & segresult,
+			   GenericImage<double> & probabilities );
+
+};
+
+
+} // namespace
+
+#endif

+ 115 - 0
semseg/SemSegTools.cpp

@@ -0,0 +1,115 @@
+/** 
+* @file SemSegTools.cpp
+* @brief tools for semantic segmentation
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#include <iostream>
+
+#include "SemSegTools.h"
+
+using namespace OBJREC;
+
+using namespace std;
+using namespace NICE;
+
+#undef DEBUG_LOCALIZATION
+
+void SemSegTools::collectTrainingExamples ( 
+			  const Config * conf,
+			  const std::string & section,
+			  const LabeledSet & train,
+			  const ClassNames & cn,
+			  Examples & examples,
+			  vector<CachedExample *> & imgexamples )
+{
+    assert ( train.count() > 0 );
+    examples.clear();
+    imgexamples.clear();
+
+    int grid_size_x = conf->gI(section, "grid_size_x", 5 );
+    int grid_size_y = conf->gI(section, "grid_size_y", 5 );
+    int grid_border_x = conf->gI(section, "grid_border_x", 20 );
+    int grid_border_y = conf->gI(section, "grid_border_y", 20 );
+
+    std::string selection = conf->gS(section, "train_selection" );
+
+    set<int> classnoSelection;
+    cn.getSelection ( selection, classnoSelection );
+    
+    bool useExcludedAsBG = conf->gB(section, "use_excluded_as_background", false );
+
+    int backgroundClassNo = 0;
+    
+    if ( useExcludedAsBG ) 
+    {
+	backgroundClassNo = cn.classno("various");
+	assert ( backgroundClassNo >= 0 );
+    }
+
+    LOOP_ALL_S (train)
+    {
+	EACH_INFO(image_classno,imgInfo);
+	std::string imgfn = imgInfo.img();
+
+	if ( ! imgInfo.hasLocalizationInfo() ) {
+	    fprintf (stderr, "WARNING: NO localization info found for %s !\n",
+		imgfn.c_str() );
+	    continue;	
+	}
+
+	int xsize, ysize;
+	CachedExample *ce = new CachedExample ( imgfn );
+	ce->getImageSize ( xsize, ysize );
+	imgexamples.push_back ( ce );
+
+	const LocalizationResult *locResult = imgInfo.localization();
+	if ( locResult->size() <= 0 ) {
+	    fprintf (stderr, "WARNING: NO ground truth polygons found for %s !\n",
+		imgfn.c_str());
+	    continue;	
+	}
+
+	fprintf (stderr, "SemSegTools: Collecting pixel examples from localization info: %s\n", 
+	    imgfn.c_str() );
+
+	NICE::Image pixelLabels (xsize, ysize);
+	pixelLabels.set(0);
+	locResult->calcLabeledImage ( pixelLabels, cn.getBackgroundClass() );
+
+#ifdef DEBUG_LOCALIZATION
+	NICE::Image img (imgfn);
+	showImage(img);
+	showImage(pixelLabels);
+#endif
+
+	Example pce ( ce, 0, 0 );
+	for ( int x = 0 ; x < xsize ; x += grid_size_x ) 
+	    for ( int y = 0 ; y < ysize ; y += grid_size_y )  
+	    {
+		if ( (x >= grid_border_x) &&
+		    ( y >= grid_border_y ) && ( x < xsize - grid_border_x ) &&
+		    ( y < ysize - grid_border_x ) )
+		{
+		    pce.x = x; pce.y = y;
+		    int classno = pixelLabels.getPixel(x,y);
+
+		    if ( classnoSelection.find(classno) != classnoSelection.end() ) {
+			examples.push_back ( pair<int, Example> (
+			    classno, 
+			    pce // FIXME: offset handling
+			) );
+		    } else if ( useExcludedAsBG ) {
+			examples.push_back ( pair<int, Example> (
+			    backgroundClassNo, 
+			    pce // FIXME: offset handling
+			) );
+		    } 
+		}
+	    }
+    }
+
+    fprintf (stderr, "total number of examples: %d\n", (int)examples.size() );
+}
+

+ 55 - 0
semseg/SemSegTools.h

@@ -0,0 +1,55 @@
+/** 
+* @file SemSegTools.h
+* @brief tools for semantic segmentation
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef SEMSEGTOOLSINCLUDE
+#define SEMSEGTOOLSINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+  
+#include "objrec/baselib/Config.h"
+#include "objrec/cbaselib/MultiDataset.h"
+#include "objrec/cbaselib/Example.h"
+#include "objrec/cbaselib/CachedExample.h"
+
+
+namespace OBJREC {
+
+/** tools for semantic segmentation */
+class SemSegTools
+{
+
+    protected:
+
+    public:
+ 
+	/** collect pixel-wise training examples 
+	    from a set of images 
+	    @param conf includes settings about grid size etc.
+	    @param section section of the config
+	    @param train set of training images with localization information
+	    @param cn classNames object
+	    @param examples resulting pixel-wise examples
+	    @param imgexamples image based caching structure referenced by pixel-wise examples
+	*/
+	static void collectTrainingExamples ( 
+			  const Config * conf,
+			  const std::string & section,
+			  const LabeledSet & train,
+			  const ClassNames & cn,
+			  Examples & examples,
+			  vector<CachedExample *> & imgexamples );
+    
+};
+
+
+} // namespace
+
+#endif

+ 184 - 0
semseg/SemanticSegmentation.cpp

@@ -0,0 +1,184 @@
+/** 
+* @file SemanticSegmentation.cpp
+* @brief abstract interface for semantic segmentation algorithms
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#include <iostream>
+
+#include "SemanticSegmentation.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/Globals.h"
+
+using namespace OBJREC;
+
+using namespace std;
+
+using namespace NICE;
+void SemanticSegmentation::convertLSetToSparseExamples(Examples &examples, LabeledSetVector &lvec)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertLSetToExamples starts" << endl;
+	#endif
+	for( map< int, vector<NICE::Vector *> >::iterator iter = lvec.begin(); iter != lvec.end(); ++iter ) 
+	{
+		for(int j = 0; j < (int)iter->second.size(); j++)
+		{
+			Vector &tmp = *(iter->second[j]);
+			int dim = tmp.size();
+			SparseVector *vec = new SparseVector(dim);
+			for(int j = 0; j < dim; j++)
+			{
+				if(tmp[j] != 0.0)
+				{
+					(*vec)[j] = tmp[j];
+				}
+			}
+			Example ex;
+			ex.svec = vec;
+			examples.push_back(pair<int, Example> ( iter->first, ex));
+		}
+	}	
+	lvec.clear();
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertLSetToExamples finished" << endl;
+	#endif
+}
+
+void SemanticSegmentation::convertLSetToExamples(Examples &examples, LabeledSetVector &lvec)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertLSetToExamples starts" << endl;
+	#endif
+	for( map< int, vector<NICE::Vector *> >::iterator iter = lvec.begin(); iter != lvec.end(); ++iter ) 
+	{
+		for(int j = 0; j < iter->second.size(); j++)
+		{
+			NICE::Vector *vec = new NICE::Vector(*(iter->second[j]));
+			Example ex(vec);
+			examples.push_back(pair<int, Example> ( iter->first, ex));
+		}
+	}	
+	lvec.clear();
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertLSetToExamples finished" << endl;
+	#endif
+}
+
+void SemanticSegmentation::convertExamplesToLSet(Examples &examples, LabeledSetVector &lvec)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertExamplesToLSet starts" << endl;
+	#endif
+	lvec.clear();
+	for(int i = 0; i < (int)examples.size(); i++)
+	{
+		if(examples[i].second.vec != NULL)
+		{
+			lvec.add(examples[i].first, *examples[i].second.vec);
+			delete examples[i].second.vec;
+			examples[i].second.vec = NULL;
+		}
+		else
+		{
+			if(examples[i].second.svec != NULL)
+			{
+				throw("Transform SVEC to VEC not yet implemented");
+			}
+			else
+			{
+				throw("no features for LabeledSet");
+			}
+		}
+
+	}
+	examples.clear();  
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertExamplesToLSet finished" << endl;
+	#endif
+}
+
+void SemanticSegmentation::convertExamplesToVVector(VVector &feats,Examples &examples, vector<int> &label)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertExamplesToVVector starts" << endl;
+	#endif
+	feats.clear();
+	label.clear();
+	for(int i = 0; i < (int)examples.size(); i++)
+	{
+		label.push_back(examples[i].first);
+		feats.push_back(*examples[i].second.vec);
+		delete examples[i].second.vec;
+		examples[i].second.vec = NULL;
+	}
+	examples.clear();
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertExamplesToVVector finished" << endl;
+	#endif
+}
+
+void SemanticSegmentation::convertVVectorToExamples(VVector &feats,Examples &examples, vector<int> &label)
+{
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertVVectorToExamples starts" << endl;
+	#endif
+	for(int i = 0; i < (int)feats.size(); i++)
+	{
+		NICE::Vector *v = new NICE::Vector(feats[i]);
+		Example ex(v);
+		ex.position = 0; //TODO: hier mal was besseres überlegen, damit Klassifikator wieder Bildspezifisch lernt
+		examples.push_back (pair<int, Example> ( label[i], ex));
+		feats[i].clear();
+	}
+	feats.clear();
+	label.clear();
+	#ifdef DEBUG_PRINTS
+	cout << "SemSegRegionBased::convertVVectorToExamples finished" << endl;
+	#endif
+}
+
+SemanticSegmentation::SemanticSegmentation( const Config *conf,
+					    const ClassNames *classNames )
+{
+    this->classNames = classNames;
+
+    Preprocess::Init ( conf );
+
+    std::string imagetype_s = conf->gS("main", "imagetype", "rgb");
+
+    if (imagetype_s == "rgb")
+          imagetype = IMAGETYPE_RGB;
+    else if ( imagetype_s == "gray" )
+          imagetype = IMAGETYPE_GRAY; 
+    else {
+	fprintf (stderr, "SemanticSegmentation:: unknown image type option\n");
+	exit(-1);
+    }
+}
+
+SemanticSegmentation::~SemanticSegmentation()
+{
+}
+
+void SemanticSegmentation::semanticseg ( const std::string & filename,
+			   NICE::Image & segresult,
+			   GenericImage<double> & probabilities)
+{
+    Globals::setCurrentImgFN(filename);
+    CachedExample *ce;
+    if ( imagetype == IMAGETYPE_RGB ) 
+    {
+	NICE::ColorImage img = Preprocess::ReadImgAdvRGB ( filename );
+	ce = new CachedExample ( img );
+    } else {
+
+	NICE::Image img = Preprocess::ReadImgAdv ( filename );
+	ce = new CachedExample ( img );
+    }
+    fprintf (stderr, "Starting Semantic Segmentation !\n");
+    semanticseg ( ce, segresult, probabilities);
+    delete ce;
+}
+

+ 82 - 0
semseg/SemanticSegmentation.h

@@ -0,0 +1,82 @@
+/** 
+* @file SemanticSegmentation.h
+* @brief abstract interface for semantic segmentation algorithms
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef SEMANTICSEGMENTATIONINCLUDE
+#define SEMANTICSEGMENTATIONINCLUDE
+
+#include <objrec/nice.h>
+ 
+#include "objrec/cbaselib/MultiDataset.h"
+#include "objrec/cbaselib/LocalizationResult.h"
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/cbaselib/Example.h"
+ 
+
+namespace OBJREC {
+
+/** abstract interface for semantic segmentation algorithms */
+class SemanticSegmentation
+{
+
+    protected:
+	/** accessible class names and information about
+	    number of classes etc. */
+	const ClassNames *classNames;
+
+	/** enum type for imagetype */
+	enum {
+	    IMAGETYPE_RGB = 0,
+	    IMAGETYPE_GRAY
+	};
+
+	/** whether to load images with color information */
+	int imagetype;
+
+    public:
+  
+	/** simple constructor 
+	    @param conf global settings
+	    @param classNames this ClassNames object while be stored as a attribute
+	*/
+	SemanticSegmentation( const Config *conf, 
+			      const ClassNames *classNames );
+      
+	/** simple destructor */
+	virtual ~SemanticSegmentation();
+
+	/** this function has to be overloaded by all subclasses 
+	    @param ce image data
+	    @param segresult result of the semantic segmentation with a label for each
+		   pixel
+	    @param probabilities multi-channel image with one channel for each class and
+			         corresponding probabilities for each pixel
+	*/
+	virtual void semanticseg ( CachedExample *ce, 
+			   NICE::Image & segresult,
+			   GenericImage<double> & probabilities ) = 0;
+			   
+	/**
+	 * convert different datatypes
+	 */
+	void convertVVectorToExamples(VVector &feats,Examples &examples, vector<int> &label);
+	void convertExamplesToVVector(VVector &feats,Examples &examples, vector<int> &label);
+	void convertExamplesToLSet(Examples &examples, LabeledSetVector &lvec);
+	void convertLSetToExamples(Examples &examples, LabeledSetVector &lvec);
+	void convertLSetToSparseExamples(Examples &examples, LabeledSetVector &lvec);
+
+	
+	/** load img from file call localize(CachedExample *ce) etc. */
+	void semanticseg ( const std::string & filename,
+			   NICE::Image & segresult,
+	  GenericImage<double> & probabilities);
+
+};
+
+
+} // namespace
+
+#endif

+ 7 - 0
semseg/libdepend.inc

@@ -0,0 +1,7 @@
+$(call PKG_DEPEND_EXT,OPENMP)
+$(call PKG_DEPEND_INT,objrec/iclassifier)
+$(call PKG_DEPEND_INT,objrec/segmentation)
+$(call PKG_DEPEND_INT,objrec/fourier)
+$(call PKG_DEPEND_INT,objrec/features)
+$(call PKG_DEPEND_INT,objrec/semanticsegmentation)
+#$(call PKG_DEPEND_INT,objrec-froehlichexp/classifier)

+ 8 - 0
semseg/postsegmentation/Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
semseg/postsegmentation/Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 200 - 0
semseg/postsegmentation/PPGraphCut.cpp

@@ -0,0 +1,200 @@
+
+#include "PPGraphCut.h"
+
+#include "objrec/segmentation/RegionGraph.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+void PPGraphCut::setClassNo(int _classno)
+{
+	classno = _classno;
+
+	coocurence = new double[classno*classno];
+	
+	for(int i = 0; i < classno*classno; i++)
+	{
+		coocurence[i] = 0.0;
+	}
+
+}
+
+PPGraphCut::PPGraphCut()
+{
+	conf = new Config();
+	Init();
+}
+
+PPGraphCut::PPGraphCut(const Config *_conf):conf(_conf)
+{
+	Init();
+}
+
+void PPGraphCut::Init()
+{
+	std::string section = "PostProcess";
+}
+
+PPGraphCut::~PPGraphCut()
+{
+	
+}
+
+void PPGraphCut::optimizeImage(RegionGraph &regions, vector<vector<double> > & probabilities)
+{
+	vector<Node*> nodes;
+	regions.get(nodes);
+
+	GCoptimizationGeneralGraph graphcut(nodes.size(), classno);
+
+	graphcut.setSmoothCost(coocurence);
+	
+	map<pair<int,int>, int> pairs;
+	
+	for(int i = 0; i < (int) nodes.size(); i++)
+	{
+		vector<Node*> nbs;
+		nodes[i]->getNeighbors(nbs);
+		int pos1 = nodes[i]->getNumber();
+		for(int j = 0; j < (int)nbs.size(); j++)
+		{
+			int pos2 = nbs[j]->getNumber();
+			pair<int,int> p(std::min(pos1,pos2),std::max(pos1,pos2));
+			map<pair<int,int>, int>::iterator iter = pairs.find(p);
+			if(iter == pairs.end())
+			{
+				pairs.insert(make_pair(p,1));
+				graphcut.setNeighbors(pos1, pos2,1.0);
+			}
+		}
+		for(int l = 0; l < classno; l++)
+		{
+			double val = probabilities[i][l];
+			if(val <= 0.0)
+				val = 1e-10;
+			val = -log(val);
+			graphcut.setDataCost(pos1, l, val);
+		}
+		graphcut.setLabel(pos1, nodes[i]->getLabel());
+	}
+
+	graphcut.swap(20);
+
+	//MRF::EnergyVal E_smooth = graphcut->smoothnessEnergy();
+
+	//MRF::EnergyVal E_data   = graphcut->dataEnergy();
+
+	for (int i = 0; i < (int)nodes.size(); i++ )
+	{
+		regions[i]->setLabel(graphcut.whatLabel(i));
+	}
+}
+
+void PPGraphCut::optimizeImage(Examples &regions, NICE::Matrix &mask, GenericImage<double> & probabilities)
+{
+	RegionGraph g;
+	g.computeGraph(regions, mask);
+
+	vector<vector<double> > probs;
+	
+	for(int p = 0; p < (int)regions.size(); p++)
+	{
+		vector<double> pr;
+		for(int l = 0; l < classno; l++)
+		{
+			pr.push_back(probabilities.get(regions[p].second.x, regions[p].second.y, l));
+		}
+		probs.push_back(pr);
+	}
+
+	optimizeImage(g, probs);
+}
+
+void PPGraphCut::trainImage(RegionGraph &g)
+{
+	vector<Node*> nodes;
+	g.get(nodes);
+
+	for(int i = 0; i < (int) nodes.size(); i++)
+	{
+		vector<Node*> nbs;
+		nodes[i]->getNeighbors(nbs);
+		for(int j = 0; j < (int)nbs.size(); j++)
+		{
+			//if(nodes[i]->getLabel() != nbs[j]->getLabel())
+			coocurence[nodes[i]->getLabel()*classno+nbs[j]->getLabel()]+=1.0;
+		}
+	}
+}
+
+void PPGraphCut::trainImage(Examples &regions, NICE::Matrix &mask)
+{
+	// coocurence Matrix bestimmen
+	RegionGraph g;
+	g.computeGraph(regions, mask);
+	trainImage(g);
+}
+		
+void PPGraphCut::finishPP(ClassNames &cn)
+{
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			cout << coocurence[classno*i+j] << " ";
+		}
+		cout << endl;
+	}
+	cout << endl;
+	
+	double weight = conf->gD( "PPGC", "weight", 0.01 );
+	double maxv =  -numeric_limits<double>::max();
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			if(j == i)
+				coocurence[classno*i+j] = 0.0;
+			else
+				maxv = std::max(maxv, coocurence[classno*i+j]);
+		}
+	}
+	
+	maxv+=1+1e-10;
+	
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			if(j == i)
+				coocurence[classno*i+j] = 0.0;
+			else
+				coocurence[classno*i+j] = -weight*(log(( coocurence[classno*i+j]+1.0)/maxv));
+		}
+	}
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			cout << coocurence[classno*i+j] << " ";
+		}
+		cout << endl;
+	}
+	//GetChar();
+}
+
+void PPGraphCut::restore (istream & is, int format)
+{
+	
+}
+		
+void PPGraphCut::store (ostream & os, int format) const
+{
+	
+}
+
+void PPGraphCut::clear()
+{
+	
+}

+ 127 - 0
semseg/postsegmentation/PPGraphCut.h

@@ -0,0 +1,127 @@
+/**
+ * @file PPGraphCut.h
+ * @brief a post procession step after semantic segmentation which use a variant of GraphCut
+ * @author Björn Fröhlich
+ * @date 09/08/2009
+
+ */
+#ifndef PPGRAPHCUTINCLUDE
+#define PPGRAPHCUTINCLUDE
+
+#include <objrec/nice.h>
+
+#include "objrec/image/GenericImage.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+#include <objrec/cbaselib/VectorFeature.h>
+
+#include "objrec/cbaselib/ClassNames.h"
+
+#include "objrec/segmentation/RSMeanShift.h"
+
+#include "objrec/mrf/mrfmin/GCoptimization.h"
+
+
+namespace OBJREC {
+
+class PPGraphCut : public Persistent
+{
+
+	protected:	
+		//! the configfile
+		const Config *conf;
+		
+		//! count of classes
+		int classno;
+		
+		//! Shape features
+		Examples shapefeats;
+		
+		//! classifier for shape features
+		FPCRandomForests *rf;
+		
+		double *coocurence;
+				
+	public:
+  
+		/** simple constructor */
+		PPGraphCut();
+		
+		/** simple constructor */
+		PPGraphCut(const Config *_conf);
+      
+		/** simple destructor */
+		~PPGraphCut();
+
+		/**
+		 * set the count of classes
+		 * @param _classno count of classes
+		 */
+		void setClassNo(int _classno);
+     
+		/** initialize the RelativeLocationPrior Variables*/
+		void Init();
+				
+		/**
+		 * train region
+		 * @param regions input regions with size and position
+		 * @param mask
+		 */
+		void trainImage(Examples &regions, NICE::Matrix &mask);
+		
+		/**
+		 * train region
+		 * @param regions input regions with size and position
+		 */
+		void trainImage(RegionGraph &regions);
+		
+		
+		/**
+		 * finish the priors maps
+		 */
+		void finishPP(ClassNames &cn);
+					
+		/**
+		 * use shape pp
+		 * @param regions 
+		 * @param mask 
+		 * @param probabilities probability maps for each pixel
+		 */
+		void optimizeImage(Examples &regions, NICE::Matrix &mask, GenericImage<double> & probabilities);
+		
+		/**
+		 * use shape pp
+		 * @param regions 
+		 * @param mask 
+		 * @param probabilities for each region
+		 */
+		void optimizeImage(RegionGraph &regions, vector<vector<double> > & probabilities);
+		
+		/**
+		 * load data from an input stream
+		 * @param is input stream
+		 * @param format 
+		 */
+		void restore (istream & is, int format = 0);
+		
+		/**
+		 * write data to an output stream
+		 * @param os outputstream
+		 * @param format 
+		 */
+		void store (ostream & os, int format = 0) const;
+		
+		/**
+		 * clear all informations
+		 */
+		void clear ();
+};
+
+} //namespace
+
+#endif

+ 275 - 0
semseg/postsegmentation/PPSuperregion.cpp

@@ -0,0 +1,275 @@
+#include "PPSuperregion.h"
+
+#include <core/iceconversion/convertice.h>
+
+#include "objrec/segmentation/RegionGraph.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+PPSuperregion::PPSuperregion()
+{
+	conf = new Config();
+	Init();
+}
+
+PPSuperregion::PPSuperregion(const Config *_conf):conf(_conf)
+{
+	Init();
+}
+
+void PPSuperregion::Init()
+{
+	std::string section = "PostProcessSG";
+	rf = new FPCRandomForests( conf, "ShapeRF" );
+}
+
+PPSuperregion::~PPSuperregion()
+{
+}
+
+void PPSuperregion::optimizeShape(Examples &regions, NICE::Matrix &mask, GenericImage<double> & probabilities)
+{
+
+	vector<ice::Region> superregions;
+	vector<double> probs;
+	vector<int> classes;
+	NICE::Matrix smask;
+	getSuperregions(regions, mask, superregions, classes, smask);
+	
+	for(int i = 0; i < (int)superregions.size(); i++)
+	{
+		ice::Moments m;
+		superregions[i].CalcMoments(m);
+
+		NICE::Vector tmp = makeEVector(m.AffineHuInvariants());
+		NICE::Vector *tmp2 = new NICE::Vector(tmp);
+		Example tex(tmp2);		
+		
+		ClassificationResult r = rf->classify ( tex );
+
+		probs.push_back(r.scores[classes[i]]);
+	}
+
+	vector<ice::Region> orgregions;
+	for(int i = 0; i < (int)regions.size(); i++)
+	{
+		orgregions.push_back(ice::Region());
+	}
+	
+	for(int y = 0; y < (int)mask.cols(); y++)
+	{
+		for(int x = 0; x < (int)mask.rows(); x++)
+		{
+			int pos = mask(x,y);
+			orgregions[pos].Add(x,y);
+		}
+	}
+
+	// maps the regions to their superregions
+	vector<int> regmapsreg(regions.size(), 0);
+	for(int y = 0; y < (int)smask.cols(); y++)
+	{
+		for(int x = 0; x < (int)smask.rows(); x++)
+		{
+			int r = mask(x,y);
+			int sr = smask(x,y);
+			regmapsreg[r] = sr;
+		}
+	}
+	
+	RegionGraph g;
+	g.computeGraph(regions, mask);
+	
+	vector<Node*> nodes;
+	g.get(nodes);
+
+	bool change = true;
+	int k = 0;
+	while(change && k < 100)
+	{
+		k++;
+		change = false;
+		int anders = 0;
+		for(int i = 0; i < (int) nodes.size(); i++)
+		{
+
+			set<int> sr;
+			int regnb = nodes[i]->getRegion();
+			int orgreg = regmapsreg[regnb];
+
+			if(nodes[i]->isAtBorder())
+			{
+				vector<Node*> nbs;
+				nodes[i]->getNeighbors(nbs);
+				for(int j = 0; j < (int)nbs.size(); j++)
+					sr.insert(regmapsreg[nbs[j]->getRegion()]);
+			}
+	
+			vector<double> otherprobs;
+
+			ice::Region re = superregions[orgreg];
+			re.Del(orgregions[regnb]);
+
+			ice::Moments m;
+
+			if(re.Area() > 0)
+			{
+				re.CalcMoments(m);
+
+				NICE::Vector tmp = makeEVector( m.AffineHuInvariants());
+				NICE::Vector *tmp2 = new NICE::Vector(tmp);
+				Example tex(tmp2);
+				ClassificationResult r = rf->classify ( tex );
+				tex.vec = NULL;
+				delete tmp2;
+				
+				double val = probabilities.get(regions[regnb].second.x, regions[regnb].second.y, classes[orgreg]) * r.scores[classes[orgreg]];
+				
+				otherprobs.push_back(val);
+				if(otherprobs[0] < probs[orgreg])
+					continue;
+			}
+			
+			for( set<int>::const_iterator iter = sr.begin();iter != sr.end();++iter )
+			{
+				ice::Moments m2;
+				ice::Region re2 = superregions[regmapsreg[*iter]];
+				re2.Add(orgregions[regnb]);
+				re2.CalcMoments(m2);
+				NICE::Vector tmp = makeEVector(m2.AffineHuInvariants());
+				NICE::Vector *tmp2 = new NICE::Vector(tmp);
+				Example tex(tmp2);
+				ClassificationResult r2 = rf->classify ( tex );
+				tex.vec = NULL;
+				delete tmp2;
+				
+				double val = probabilities.get(regions[regnb].second.x, regions[regnb].second.y, classes[*iter]) * r2.scores[classes[*iter]];
+				
+				otherprobs.push_back(val);
+			}
+
+			int k = 1;
+			int best = -1;
+			double bestval = -1.0;
+			for( set<int>::const_iterator iter = sr.begin();iter != sr.end();++iter, k++ ) 
+			{
+				if(otherprobs[k] > probs[*iter])
+				{
+					if(bestval < otherprobs[k])
+					{
+						bestval = otherprobs[k];
+						best = *iter;
+					}
+				}
+			}
+			
+			if(best < 0 || bestval <= 0.0)
+				continue;
+			
+			change = true;
+
+			probs[best] = bestval;
+
+			superregions[best].Add(orgregions[regnb]);
+
+			probs[orgreg] = otherprobs[0];
+
+			superregions[orgreg].Del(orgregions[regnb]);
+
+			regmapsreg[regnb] = best;
+
+			nodes[i]->setLabel(classes[best]);
+			anders++;
+		}
+	}
+	
+	for(int i = 0; i < (int)regions.size(); i++)
+	{
+		regions[i].first = classes[regmapsreg[i]];
+	}
+}
+
+void PPSuperregion::getSuperregions(const Examples &regions, const NICE::Matrix &mask, vector<ice::Region> &superregions, vector<int> &classes, NICE::Matrix &smask)
+{
+	NICE::Image tmp (mask.rows(), mask.cols());
+	tmp.set(0);
+	NICE::ColorImage m2 (tmp, tmp, tmp);
+	for(int y = 0; y < (int)mask.cols(); y++)
+	{
+		for(int x = 0; x < (int)mask.rows(); x++)
+		{
+			int pos = mask(x,y);
+
+			m2.setPixel(x,y,0,regions[pos].first);
+			m2.setPixel(x,y,1,regions[pos].first);
+			m2.setPixel(x,y,2,regions[pos].first);
+		}
+	}
+
+	RSMeanShift rs(conf);
+	int count = rs.transformSegmentedImg( m2, smask);
+	
+	classes.resize(count);
+	for(int i = 0; i < count; i++)
+	{
+		superregions.push_back(ice::Region());
+	}
+	
+	for(int y = 0; y < (int)smask.cols(); y++)
+	{
+		for(int x = 0; x < (int)smask.rows(); x++)
+		{
+			int pos = smask(x,y);
+			superregions[pos].Add(x,y);
+			classes[pos] = regions[mask(x,y)].first;
+		}
+	}
+}
+
+void PPSuperregion::trainShape(Examples &regions, NICE::Matrix &mask)
+{
+	// bestimme Superregionen
+	vector<ice::Region> superregions;
+	vector<int> classes;
+	// refactor-nice.pl: check this substitution
+	// old: Image smask;
+	NICE::Matrix smask;
+	getSuperregions(regions, mask, superregions, classes, smask);
+	
+	// berechne die Momente der Superregionen und speichere diese als Merkmale ab
+	for(int i = 0; i < (int)superregions.size(); i++)
+	{
+		ice::Moments m;
+		superregions[i].CalcMoments(m);
+		NICE::Vector tmp = makeEVector(m.AffineHuInvariants());
+		NICE::Vector *tmp2 = new NICE::Vector(tmp);
+		shapefeats.push_back(pair<int, Example>(classes[i], Example(tmp2)));
+	}
+}
+		
+void PPSuperregion::finishShape(ClassNames &cn)
+{
+	//Lerne Klassifikator mit dem den Formmerkmalen an
+	FeaturePool fp;
+	Feature *f = new VectorFeature ( 7 );
+	f->explode ( fp );
+	delete f;
+	rf->train ( fp, shapefeats);
+}
+
+void PPSuperregion::restore (istream & is, int format)
+{
+	
+}
+		
+void PPSuperregion::store (ostream & os, int format) const
+{
+	
+}
+
+void PPSuperregion::clear()
+{
+	
+}

+ 117 - 0
semseg/postsegmentation/PPSuperregion.h

@@ -0,0 +1,117 @@
+/**
+ * @file PPSuperregion.h
+ * @brief a post procession step after semantic segmentation which use a variant of Region Growing
+ * @author Björn Fröhlich
+ * @date 08/19/2009
+
+ */
+#ifndef PPSUPERREGIONINCLUDE
+#define PPSUPERREGIONINCLUDE
+
+#include <objrec/nice.h>
+
+#include "objrec/image/GenericImage.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+#include "objrec/cbaselib/VectorFeature.h"
+
+#include "objrec/cbaselib/ClassNames.h"
+
+#include "objrec/segmentation/RSMeanShift.h"
+
+#include <image_nonvis.h>
+
+namespace OBJREC {
+
+class PPSuperregion : public Persistent
+{
+
+	protected:	
+		//! the configfile
+		const Config *conf;
+		
+		//! count of classes
+		int classno;
+		
+		//! Shape features
+		Examples shapefeats;
+		
+		//! classifier for shape features
+		FPCRandomForests *rf;
+				
+	public:
+  
+		/** simple constructor */
+		PPSuperregion();
+		
+		/** simple constructor */
+		PPSuperregion(const Config *_conf);
+      
+		/** simple destructor */
+		~PPSuperregion();
+
+		/**
+		 * set the count of classes
+		 * @param _classno count of classes
+		 */
+		void setClassNo(int _classno);
+     
+		/** initialize the RelativeLocationPrior Variables*/
+		void Init();
+
+		/**
+		 * combines connected regions with the same label to superregions
+		 * @param regions the input regions
+		 * @param mask the mask for the regions
+		 * @param superregions the superregions
+		 * @param classes the classlabels of the superregions
+		 */
+		void getSuperregions(const Examples &regions, const NICE::Matrix &mask, vector<ice::Region> &superregions, vector<int> &classes, NICE::Matrix &smask);
+				
+		/**
+		 * Lerne Form der Regionen an
+		 * @param regions input regions with size and position
+		 * @param mask 
+		 */
+		void trainShape(Examples &regions, NICE::Matrix &mask);
+		
+		/**
+		 * finish the priors maps
+		 */
+		void finishShape(ClassNames &cn);
+					
+		/**
+		 * use shape pp
+		 * @param regions 
+		 * @param mask 
+		 */
+		void optimizeShape(Examples &regions, NICE::Matrix &mask, GenericImage<double> & probabilities);
+		
+		/**
+		 * load data from an input stream
+		 * @param is input stream
+		 * @param format 
+		 */
+		void restore (istream & is, int format = 0);
+		
+		/**
+		 * write data to an output stream
+		 * @param os outputstream
+		 * @param format 
+		 */
+		void store (ostream & os, int format = 0) const;
+		
+		/**
+		 * clear all informations
+		 */
+		void clear ();
+};
+
+} //namespace
+
+#endif

+ 75 - 0
semseg/postsegmentation/PSSBackgroundModel.cpp

@@ -0,0 +1,75 @@
+/** 
+* @file PSSBackgroundModel.cpp
+* @brief simple background models
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+
+#include "PSSBackgroundModel.h"
+
+using namespace OBJREC;
+
+using namespace std;
+// refactor-nice.pl: check this substitution
+// old: using namespace ice;
+using namespace NICE;
+
+
+
+PSSBackgroundModel::PSSBackgroundModel( int backgroundModelType, double threshold, int backgroundClass )
+{
+    this->backgroundModelType = backgroundModelType;
+    this->threshold = threshold;
+    this->backgroundClass = backgroundClass;
+}
+
+PSSBackgroundModel::~PSSBackgroundModel()
+{
+}
+
+	
+// refactor-nice.pl: check this substitution
+// old: void PSSBackgroundModel::postprocess ( Image & result, GenericImage<double> & probabilities )
+void PSSBackgroundModel::postprocess ( NICE::Image & result, GenericImage<double> & probabilities )
+{
+    if ( backgroundModelType == BGM_FIXED_ENTROPY_THRESHOLD )
+    {
+	if ( threshold >= 1.0 ) return;
+
+	int numClasses = probabilities.numChannels;
+	double t = log(numClasses)*threshold;
+	int xsize = probabilities.xsize;
+	int ysize = probabilities.ysize;
+	int offset_s = 0;
+	for (  int ys = 0 ; ys < ysize ; ys ++ ) 
+	    for ( int xs = 0 ; xs < xsize ; xs++,offset_s++ )  
+	    {
+		double entropy = 0.0;
+		double sum = 0.0;
+		for ( int i = 0 ; i < numClasses ; i++ )
+		{
+		    double val = probabilities.data[i][offset_s];
+		    if ( val <= 0.0 ) continue;
+		    entropy -= val*log(val);
+		    sum += val;
+		}
+		entropy /= sum;
+		entropy += log(sum);
+		
+		if ( entropy > t )
+		    result.setPixel(xs,ys,backgroundClass);
+	    }
+    } else if ( backgroundModelType == BGM_ADAPTIVE_ENTROPY_THRESHOLD ) {
+	fprintf (stderr, "not yet implemented !!\n");
+	exit(-1);
+    }
+
+}

+ 56 - 0
semseg/postsegmentation/PSSBackgroundModel.h

@@ -0,0 +1,56 @@
+/** 
+* @file PSSBackgroundModel.h
+* @brief simple background models
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef PSSBACKGROUNDMODELINCLUDE
+#define PSSBACKGROUNDMODELINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+ 
+#include "PostSemSeg.h"
+
+
+namespace OBJREC {
+
+/** simple background models */
+class PSSBackgroundModel : public PostSemSeg
+{
+
+    protected:
+	int backgroundModelType;
+	double threshold;
+	int backgroundClass;
+
+    public:
+	enum {
+	    BGM_FIXED_ENTROPY_THRESHOLD = 0,
+	    BGM_ADAPTIVE_ENTROPY_THRESHOLD,
+	    BGM_NONE
+	};
+
+	/** 
+	    @param backgroundModelType select one method from the enum type of this class
+	    @param threshold some methods need a threshold or parameter
+	*/
+	PSSBackgroundModel ( int backgroundModelType, double threshold, int backgroundClass );
+      
+	/** simple destructor */
+	virtual ~PSSBackgroundModel();
+	
+	// refactor-nice.pl: check this substitution
+	// old: virtual void postprocess ( Image & result, GenericImage<double> & probabilities );
+	virtual void postprocess ( NICE::Image & result, GenericImage<double> & probabilities );
+     
+};
+
+
+} // namespace
+
+#endif

+ 124 - 0
semseg/postsegmentation/PSSImageLevelPrior.cpp

@@ -0,0 +1,124 @@
+/** 
+* @file PSSImageLevelPrior.cpp
+* @brief incorporate prior from image categorization method
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+#include <set>
+#include <assert.h>
+#include <algorithm>
+#include "PSSImageLevelPrior.h"
+
+using namespace OBJREC;
+
+using namespace std;
+// refactor-nice.pl: check this substitution
+// old: using namespace ice;
+using namespace NICE;
+
+
+
+PSSImageLevelPrior::PSSImageLevelPrior( int imagePriorMethod, int priorK, double alphaImagePrior )
+{
+    this->imagePriorMethod = imagePriorMethod;
+    this->priorK = priorK;
+    this->alphaImagePrior = alphaImagePrior;
+}
+
+PSSImageLevelPrior::~PSSImageLevelPrior()
+{
+}
+
+void PSSImageLevelPrior::setPrior ( FullVector & prior )
+{
+    this->prior = prior;
+}
+
+void PSSImageLevelPrior::postprocess ( NICE::Image & result, GenericImage<double> & probabilities )
+{
+    assert ( prior.size() == (int)probabilities.numChannels );
+    int xsize = probabilities.xsize;
+    int ysize = probabilities.ysize;
+
+    if ( imagePriorMethod == IMAGE_PRIOR_BEST_K ) 
+    {
+	vector<int> indices;
+	prior.getSortedIndices ( indices );
+
+	reverse ( indices.begin(), indices.end() );
+	set<int> bestComponents;
+	vector<int>::const_iterator j = indices.begin();
+	if ( indices.size() > (size_t)priorK )
+	    advance ( j, priorK );
+	else
+	    j = indices.end();
+
+	for ( vector<int>::const_iterator jj = indices.begin();
+			    jj != j ; jj++ )
+	    bestComponents.insert ( *jj );
+	
+	int offset_s = 0;
+
+	for (  int ys = 0 ; ys < ysize ; ys ++ ) 
+	    for ( int xs = 0 ; xs < xsize ; xs++,offset_s++ )  
+	    {
+		int maxindex = 0;
+		double maxvalue = - numeric_limits<double>::max();
+		double sum = 0.0;
+		for ( int i = 0 ; i < (int)probabilities.numChannels ; i++ )
+		{
+		    if ( bestComponents.find(i) == bestComponents.end()  )
+			probabilities.data[i][offset_s] = 0.0;
+		    sum += probabilities.data[i][offset_s];
+
+		    if ( probabilities.data[i][offset_s] > maxvalue )
+		    {
+			maxindex = i;
+			maxvalue = probabilities.data[i][offset_s];
+		    }
+		}
+	    	
+		if ( sum > 1e-11 )
+		    for ( int i = 0 ; i < (int)probabilities.numChannels ; i++ )
+		    {
+			probabilities.data[i][offset_s] /= sum;
+		    }
+		
+		result.setPixel(xs,ys,maxindex);
+	    }
+    } else if ( imagePriorMethod == IMAGE_PRIOR_PSEUDOPROB ) {
+	int offset_s = 0;
+	for (  int ys = 0 ; ys < ysize ; ys ++ ) 
+	    for ( int xs = 0 ; xs < xsize ; xs++,offset_s++ )  
+	    {
+		int maxindex = 0;
+		double maxvalue = - numeric_limits<double>::max();
+		double sum = 0.0;
+
+		for ( int i = 0 ; i < (int)probabilities.numChannels ; i++ )
+		{
+		    probabilities.data[i][offset_s] *= pow ( prior[i], alphaImagePrior );
+		    sum += probabilities.data[i][offset_s];
+		    if ( probabilities.data[i][offset_s] > maxvalue )
+                    {
+                        maxindex = i;
+                        maxvalue = probabilities.data[i][offset_s];
+		    }										                    
+		}
+		if ( sum > 1e-11 )
+		    for ( int i = 0 ; i < (int)probabilities.numChannels ; i++ )
+		    {
+			probabilities.data[i][offset_s] /= sum;
+		    }
+		result.setPixel(xs,ys,maxindex);
+	    }
+    }
+}

+ 58 - 0
semseg/postsegmentation/PSSImageLevelPrior.h

@@ -0,0 +1,58 @@
+/** 
+* @file PSSImageLevelPrior.h
+* @brief incorporate prior from image categorization method
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef PSSIMAGELEVELPRIORINCLUDE
+#define PSSIMAGELEVELPRIORINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+  
+#include "PostSemSeg.h"
+#include "objrec/math/mathbase/FullVector.h"
+
+
+namespace OBJREC {
+
+/** incorporate prior from image categorization method */
+class PSSImageLevelPrior : public PostSemSeg
+{
+
+    protected:
+	int imagePriorMethod;
+	FullVector prior;
+
+	int priorK;
+	double alphaImagePrior;
+
+    public:
+	
+	enum {
+	    IMAGE_PRIOR_BEST_K = 0,
+	    IMAGE_PRIOR_PSEUDOPROB
+	};
+  
+	/** simple constructor */
+	PSSImageLevelPrior( int imagePriorMethod, int priorK, double alphaImagePrior );
+      
+	/** simple destructor */
+	virtual ~PSSImageLevelPrior();
+	
+	void setPrior ( FullVector & prior );
+
+	// refactor-nice.pl: check this substitution
+	// old: void postprocess ( Image & result, GenericImage<double> & probabilities );
+	void postprocess ( NICE::Image & result, GenericImage<double> & probabilities );
+     
+};
+
+
+} // namespace
+
+#endif

+ 168 - 0
semseg/postsegmentation/PSSLocalizationPrior.cpp

@@ -0,0 +1,168 @@
+/** 
+* @file PSSLocalizationPrior.cpp
+* @brief incorporate prior from localization results
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+#include <limits>
+
+#include "PSSLocalizationPrior.h"
+#include "objrec/baselib/StringTools.h"
+#include "objrec/baselib/Globals.h"
+#include "objrec/baselib/FileMgt.h"
+#include "objrec/cbaselib/PascalResults.h"
+
+using namespace OBJREC;
+
+using namespace std;
+// refactor-nice.pl: check this substitution
+// old: using namespace ice;
+using namespace NICE;
+
+
+
+// refactor-nice.pl: check this substitution
+// old: PSSLocalizationPrior::PSSLocalizationPrior( const string & detectiondir, 
+PSSLocalizationPrior::PSSLocalizationPrior( const std::string & detectiondir, 
+					    const ClassNames *classNames,
+					    double alphaDetectionPrior,
+					    int subsamplex, int subsampley )
+{
+    this->subsamplex = subsampley;
+    this->subsampley = subsamplex;
+    this->alphaDetectionPrior = alphaDetectionPrior;
+    loadDetectionResults ( detectiondir, detresults, classNames );
+}
+
+PSSLocalizationPrior::~PSSLocalizationPrior()
+{
+}
+
+// refactor-nice.pl: check this substitution
+// old: void PSSLocalizationPrior::loadDetectionResults ( const string & dir,
+void PSSLocalizationPrior::loadDetectionResults ( const std::string & dir,
+				       map<string, LocalizationResult *> & results,
+				       const ClassNames *classNames )
+{
+    vector<string> files;
+    FileMgt::DirectoryRecursive ( files, dir );
+    int backgroundClassNo = classNames->getBackgroundClass();
+
+    for ( vector<string>::const_iterator i = files.begin();
+	    i != files.end(); i++ )
+    {
+	// refactor-nice.pl: check this substitution
+	// old: string file = *i;
+	std::string file = *i;
+	// refactor-nice.pl: check this substitution
+	// old: string classtext = StringTools::baseName ( file, false );
+	std::string classtext = StringTools::baseName ( file, false );
+	int classno = classNames->classno(classtext);
+	if ( classno < 0 ) {
+	    fprintf (stderr, "Unable to find class %s\n", classtext.c_str() );
+	    fprintf (stderr, "dir %s file %s classtext %s\n", dir.c_str(),
+		file.c_str(), classtext.c_str() );
+	}
+	PascalResults::read ( results, file, classno, backgroundClassNo, true /*calibrate*/ );
+    }
+}
+
+	
+// refactor-nice.pl: check this substitution
+// old: void PSSLocalizationPrior::postprocess ( Image & result, GenericImage<double> & probabilities )
+void PSSLocalizationPrior::postprocess ( NICE::Image & result, GenericImage<double> & probabilities )
+{
+    // refactor-nice.pl: check this substitution
+    // old: string currentFilename = Globals::getCurrentImgFN();
+    std::string currentFilename = Globals::getCurrentImgFN();
+    // refactor-nice.pl: check this substitution
+    // old: string base = StringTools::baseName ( currentFilename, false );
+    std::string base = StringTools::baseName ( currentFilename, false );
+    map<string, LocalizationResult *>::const_iterator i = detresults.find ( base );
+
+    if ( i == detresults.end() )
+    {
+	fprintf (stderr, "NO detection results found for %s !\n", base.c_str());
+	return;
+    }
+
+    fprintf (stderr, "Infering detection prior\n");
+	
+    LocalizationResult *ldet = i->second;
+
+    int maxClassNo = probabilities.numChannels - 1;
+
+    int xsize = probabilities.xsize;
+    int ysize = probabilities.ysize;
+    FullVector *priormap = new FullVector [ xsize * ysize ];
+    for ( long k = 0 ; k < xsize * ysize ; k++ )
+	priormap[k].reinit(maxClassNo);
+
+    for ( LocalizationResult::const_iterator j = ldet->begin();
+					    j != ldet->end();
+					    j++ )
+    {
+	const SingleLocalizationResult *slr = *j;
+
+	int xi, yi, xa, ya;
+	const NICE::Region & r = slr->getRegion();
+	int classno = slr->r->classno;
+	double confidence = slr->r->confidence();
+	r.getRect ( xi, yi, xa, ya );
+
+	for ( int y = yi; y <= ya; y++ )
+	    for ( int x = xi; x <= xa; x++ )
+	    {
+		if ( (y<0) || (x<0) || (x>xsize-1) || (y>ysize-1) )
+		    continue;
+		if ( r.inside ( x*subsamplex, y*subsampley ) )
+		    priormap[x + y*xsize][classno] += confidence;
+	    }
+
+	long k = 0;
+	for ( int y = 0 ; y < ysize ; y++ )
+	    for ( int x = 0 ; x < xsize ; x++,k++ )
+	    {
+		FullVector & prior  = priormap[k];
+
+		if ( prior.sum() < 10e-6 )
+		    continue;
+
+		prior.normalize();
+
+		double sum = 0.0;
+		for ( int i = 0 ; i < (int)probabilities.numChannels; i++ )
+		{
+		    probabilities.data[i][k] *= pow ( prior[i], alphaDetectionPrior ); 
+		    sum += probabilities.data[i][k];
+		}
+
+		if ( sum < 10e-6 )
+		    continue;
+
+		int maxindex = 0;
+		double maxvalue = - numeric_limits<double>::max();
+		for ( int i = 0 ; i < (int)probabilities.numChannels; i++ )
+		{
+		    probabilities.data[i][k] /= sum;
+		    if ( probabilities.data[i][k] > maxvalue )
+		    {
+			maxindex = i;
+			maxvalue = probabilities.data[i][k];
+		    }
+		}
+		result.setPixel(x,y,maxindex);
+	    }
+	
+    }
+
+    delete [] priormap;
+}

+ 62 - 0
semseg/postsegmentation/PSSLocalizationPrior.h

@@ -0,0 +1,62 @@
+/** 
+* @file PSSLocalizationPrior.h
+* @brief incorporate prior from localization results
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef PSSLOCALIZATIONPRIORINCLUDE
+#define PSSLOCALIZATIONPRIORINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+ 
+#include "objrec/cbaselib/LocalizationResult.h"
+#include "PostSemSeg.h"
+
+
+namespace OBJREC {
+
+/** incorporate prior from localization results */
+class PSSLocalizationPrior : public PostSemSeg
+{
+
+    protected:
+	int subsamplex;
+	int subsampley;
+	double alphaDetectionPrior;
+	map<string, LocalizationResult *> detresults;
+
+
+	// refactor-nice.pl: check this substitution
+	// old: void loadDetectionResults ( const string & dir,
+	void loadDetectionResults ( const std::string & dir,
+			       map<string, LocalizationResult *> & results,
+			       const ClassNames *classNames );
+
+    public:
+  
+	/** simple constructor */
+	// refactor-nice.pl: check this substitution
+	// old: PSSLocalizationPrior( const string & detectiondir, const ClassNames *classNames,
+	PSSLocalizationPrior( const std::string & detectiondir, const ClassNames *classNames,
+			      double alphaDetectionPrior,  int subsamplex = 1, int subsampley = 1 );
+      
+	/** simple destructor */
+	virtual ~PSSLocalizationPrior();
+	
+	// refactor-nice.pl: check this substitution
+	// old: virtual void postprocess ( Image & result, GenericImage<double> & probabilities );
+	virtual void postprocess ( NICE::Image & result, GenericImage<double> & probabilities );
+
+
+     
+};
+
+
+} // namespace
+
+#endif

+ 34 - 0
semseg/postsegmentation/PSSQueue.cpp

@@ -0,0 +1,34 @@
+/** 
+* @file PSSQueue.cpp
+* @brief application of multiple post semantic segmentation methods
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+
+#include "PSSQueue.h"
+
+using namespace OBJREC;
+
+using namespace std;
+// refactor-nice.pl: check this substitution
+// old: using namespace ice;
+using namespace NICE;
+
+
+
+PSSQueue::PSSQueue()
+{
+}
+
+PSSQueue::~PSSQueue()
+{
+}
+

+ 39 - 0
semseg/postsegmentation/PSSQueue.h

@@ -0,0 +1,39 @@
+/** 
+* @file PSSQueue.h
+* @brief application of multiple post semantic segmentation methods
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef PSSQUEUEINCLUDE
+#define PSSQUEUEINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+  
+
+namespace OBJREC {
+
+/** application of multiple post semantic segmentation methods */
+class PSSQueue
+{
+
+    protected:
+
+    public:
+  
+	/** simple constructor */
+	PSSQueue();
+      
+	/** simple destructor */
+	virtual ~PSSQueue();
+     
+};
+
+
+} // namespace
+
+#endif

+ 34 - 0
semseg/postsegmentation/PostSemSeg.cpp

@@ -0,0 +1,34 @@
+/** 
+* @file PostSemSeg.cpp
+* @brief abstract interface for post processing steps concerning semantic segmentation routines
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include <iostream>
+
+#include "PostSemSeg.h"
+
+using namespace OBJREC;
+
+using namespace std;
+// refactor-nice.pl: check this substitution
+// old: using namespace ice;
+using namespace NICE;
+
+
+
+PostSemSeg::PostSemSeg()
+{
+}
+
+PostSemSeg::~PostSemSeg()
+{
+}
+

+ 45 - 0
semseg/postsegmentation/PostSemSeg.h

@@ -0,0 +1,45 @@
+/** 
+* @file PostSemSeg.h
+* @brief abstract interface for post processing steps concerning semantic segmentation routines
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef POSTSEMSEGINCLUDE
+#define POSTSEMSEGINCLUDE
+
+#ifdef NOVISUAL
+#include <objrec/nice_nonvis.h>
+#else
+#include <objrec/nice.h>
+#endif
+
+#include "objrec/image/GenericImage.h"
+
+
+namespace OBJREC {
+
+/** abstract interface for post processing steps concerning semantic segmentation routines */
+class PostSemSeg
+{
+
+    protected:
+
+    public:
+  
+	/** simple constructor */
+	PostSemSeg();
+      
+	/** simple destructor */
+	virtual ~PostSemSeg();
+
+	// refactor-nice.pl: check this substitution
+	// old: virtual void postprocess ( Image & result, GenericImage<double> & probabilities ) = 0;
+	virtual void postprocess ( NICE::Image & result, GenericImage<double> & probabilities ) = 0;
+     
+};
+
+
+} // namespace
+
+#endif

+ 559 - 0
semseg/postsegmentation/RelativeLocationPrior.cpp

@@ -0,0 +1,559 @@
+#include "RelativeLocationPrior.h"
+#include "objrec/fourier/FourierLibrary.h"
+#include "objrec/baselib/ICETools.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+RelativeLocationPrior::RelativeLocationPrior()
+{
+	conf = new Config();
+	mapsize = 200;
+}
+
+RelativeLocationPrior::RelativeLocationPrior(const Config *_conf):conf(_conf)
+{
+}
+
+void RelativeLocationPrior::setClassNo(int _classno)
+{
+	classno = _classno; 
+	Init();
+}
+
+void RelativeLocationPrior::Init()
+{
+	std::string section = "PostProcessRLP";
+	mapsize = conf->gI(section, "mapsize", 200 );
+
+	featdim = classno*3;
+
+	//Priorsmaps erzeugen
+	for(int i = 0; i < classno; i++)
+	{
+		GenericImage<double> *tmp  = new GenericImage<double>(mapsize, mapsize, classno, true);
+		tmp->setAll(0.0);
+		priormaps.push_back(tmp);
+	}
+}
+
+RelativeLocationPrior::~RelativeLocationPrior()
+{
+	for(int i = 0; i < classno; i++)
+	{
+		delete priormaps[i];
+	}
+}
+
+void RelativeLocationPrior::trainPriorsMaps(Examples &regions, int xsize, int ysize)
+{	
+	for(int j = 0; j < (int)regions.size(); j++)
+	{
+		for(int i = 0; i < (int)regions.size(); i++)
+		{
+			if(i == j) 
+				continue;
+			
+			int x = regions[i].second.x - regions[j].second.x;
+			int y = regions[i].second.y - regions[j].second.y;
+			
+			convertCoords(x, xsize);
+			convertCoords(y, ysize);
+			
+			priormaps[regions[i].first]->set(x, y, priormaps[regions[i].first]->get(x, y, regions[j].first)+1.0/*regions[j].second.weight*/, regions[j].first);
+		}
+	}
+}
+
+void RelativeLocationPrior::finishPriorsMaps(ClassNames &cn)
+{
+	// Priormaps normalisieren
+	double alpha = 5;
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			double val = 0.0;
+
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					val = std::max(val,priormaps[i]->get(x, y, j));
+				}
+			}
+			if(val != 0.0)
+			{
+				for(int x = 0; x < mapsize; x++)
+				{
+					for(int y = 0; y < mapsize; y++)
+					{
+						double old = priormaps[i]->get(x, y, j);
+						
+#undef DIRICHLET
+#ifdef DIRICHLET
+						old = (old+alpha)/(val+classno*alpha);
+#else
+						old /= val;
+#endif
+						priormaps[i]->set(x, y, old, j);
+					}
+				}
+			}
+		}
+	}
+		
+	double sigma = 0.1*(double)mapsize; // 10% der Breite/Höhe der Maps
+	
+	// alle Priormaps weichzeichnen
+	for(int j = 0; j < classno; j++)
+	{
+		for(int i = 0; i < classno; i++)
+		{
+			NICE::FloatImage tmp(mapsize, mapsize);
+			tmp.set(0.0);
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					tmp.setPixelQuick(x,y, priormaps[j]->get(x, y, i));
+				}
+			}
+
+			NICE::FloatImage out;
+			FourierLibrary::gaussFilterD(tmp, out, sigma);
+					
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					priormaps[j]->set(x, y, out.getPixel(x,y), i);
+				}
+			}
+		}
+	}
+	
+	// Summe aller Pixel an einer Position über jede Klasse = 1 
+	for(int i = 0; i < classno; i++)
+	{
+		for(int x = 0; x < mapsize; x++)
+		{
+			for(int y = 0; y < mapsize; y++)
+			{
+				double val = 0.0;
+				for(int j = 0; j < classno; j++)
+				{
+					val += priormaps[i]->get(x, y, j);
+				}
+				if(val != 0.0)
+				{
+					for(int j = 0; j < classno; j++)
+					{
+						double old = priormaps[i]->get(x, y, j);
+						old /= val;
+						priormaps[i]->set(x, y, old, j);
+					}
+				}
+			}
+		}
+	}
+	
+#undef VISDEBUG
+#ifdef VISDEBUG
+#ifndef NOVISUAL
+	NICE::ColorImage rgbim((classno-1)*(mapsize+10), (classno-1)*(mapsize+10));
+	
+	double maxval = -numeric_limits<double>::max();
+	double minval = numeric_limits<double>::max();
+	
+	for(int j = 0; j < classno; j++)
+	{
+		if(j == 6) continue;
+		for(int i = 0; i < classno; i++)
+		{
+			if(i == 6) continue;
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					double val = priormaps[j]->get(x, y, i);
+					maxval = std::max(val, maxval);
+					minval = std::min(val, minval);
+				}
+			}
+		}
+	}
+	
+	int jcounter = 0;
+	for(int j = 0; j < classno; j++)
+	{
+		if(j == 6) continue;
+		int icounter = 0;
+		for(int i = 0; i < classno; i++)
+		{
+			if(i == 6) continue;
+			
+			NICE::FloatImage tmp(mapsize, mapsize);
+			tmp.set(0.0);
+			
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					tmp.setPixel(x, y, priormaps[j]->get(x, y, i));
+				}
+			}
+
+			tmp.setPixel(0,0,maxval);
+			tmp.setPixel(0,1,minval);
+			cout << "i: " << cn.text(i) << endl;
+			NICE::ColorImage imgrgb2 (mapsize, mapsize);
+			ICETools::convertToRGB(tmp, imgrgb2);
+			
+			imgrgb2.setPixel(0,0,2,imgrgb2.getPixel(1,0,2));
+			imgrgb2.setPixel(0,1,2,imgrgb2.getPixel(1,1,2));
+			imgrgb2.setPixel(0,0,0,imgrgb2.getPixel(1,0,0));
+			imgrgb2.setPixel(0,1,0,imgrgb2.getPixel(1,1,0));
+			imgrgb2.setPixel(0,0,1,imgrgb2.getPixel(1,0,1));
+			imgrgb2.setPixel(0,1,1,imgrgb2.getPixel(1,1,1));
+			
+			for(int y = 0; y < mapsize; y++)
+			{
+				for(int x = 0; x < mapsize; x++)
+				{
+					rgbim.setPixel(x+jcounter*(mapsize+10),y+icounter*(mapsize+10),2,imgrgb2.getPixel(x,y,2));
+					rgbim.setPixel(x+jcounter*(mapsize+10),y+icounter*(mapsize+10),0,imgrgb2.getPixel(x,y,0));
+					rgbim.setPixel(x+jcounter*(mapsize+10),y+icounter*(mapsize+10),1,imgrgb2.getPixel(x,y,1));
+				}
+			}
+			icounter++;
+		}
+		jcounter++;
+	}
+	rgbim.write("tmp.ppm");
+#endif
+#endif
+}
+
+void RelativeLocationPrior::trainClassifier(Examples &regions, GenericImage<double> & probabilities)
+{
+	// für alle Regionen einen Merkmalsvektor erzeugen und diesen der Trainingsmenge hinzufügen
+	getFeature(regions, probabilities);
+
+	for(int i = 0; i < (int)regions.size(); i++)
+	{
+		trainingsdata.push_back(pair<int, Example>(regions[i].first, regions[i].second));
+		regions[i].second.svec = NULL;
+	}
+}
+
+void RelativeLocationPrior::finishClassifier()
+{
+	//////////////////////////////
+	// Klassifikatoren anlernen //
+	//////////////////////////////
+	FeaturePool fp;
+	Feature *f = new SparseVectorFeature ( featdim );
+	f->explode ( fp );
+	delete f;
+	
+	//feature size
+	int s = 3;
+
+	classifiers.resize(classno);
+	for(int i = 0; i < classno; i++)
+	{
+		classifiers[i] = SLR(conf, "ClassifierSMLR");
+		Examples ex2;
+		int countex = 0;
+		for(int j = 0; j < (int)trainingsdata.size(); j++)
+		{
+			Example e;
+			int z = 0;
+			e.svec = new SparseVector(s+1);
+			for(int k = i*s; k < i*s+s; k++, z++)
+			{
+				double val = trainingsdata[j].second.svec->get(k);
+				if(val != 0.0)
+					(*e.svec)[z] = val;
+			}
+			(*e.svec)[s] = 1.0;
+			
+			ex2.push_back ( pair<int, Example> ( trainingsdata[j].first, e ) );
+			
+			if(trainingsdata[j].first == i)
+				countex++;
+		}
+				
+		if(ex2.size() <= 2 || countex < 1)
+			continue;
+		
+		classifiers[i].train(fp, ex2, i);
+				
+		for(int j = 0; j < (int)ex2.size(); j++)
+		{
+			delete ex2[j].second.svec;
+			ex2[j].second.svec = NULL;
+		}
+	}
+	
+	trainingsdata.clear();
+}
+
+void RelativeLocationPrior::postprocess ( Examples &regions, GenericImage<double> & probabilities)
+{
+	getFeature(regions, probabilities);
+	
+	int s = 3;
+	
+	for(int i = 0; i < (int) regions.size(); i++)
+	{
+		FullVector overall_distribution(classno+1);
+		overall_distribution[classno] = 0.0;
+		
+		double maxp = -numeric_limits<double>::max();
+		int bestclass = 0;
+	
+		double sum  = 0.0;
+
+		for(int c = 0; c < classno; c++)
+		{		
+			Example e;
+			int z = 0;
+			e.svec = new SparseVector(s+1);
+			for(int k = c*s; k < c*s+s; k++, z++)
+			{
+				double val = regions[i].second.svec->get(k);
+				if(val != 0.0)
+					(*e.svec)[z] = val;
+			}
+			(*e.svec)[s] = 1.0;
+			
+			overall_distribution[c] = classifiers[c].classify(e);
+		
+			sum += overall_distribution[c];
+		
+			if(maxp < overall_distribution[c])
+			{
+				bestclass = c;
+				maxp = overall_distribution[c];
+			}
+			delete e.svec;
+			e.svec = NULL;
+		}
+
+		for(int c = 0; c < classno; c++)
+		{
+			overall_distribution[c] /= sum;
+		}
+
+		ClassificationResult r = ClassificationResult( bestclass, overall_distribution );
+
+		if(bestclass < 0)
+		{
+			regions[i].second.svec->store(cout);cout << endl;
+			cout << "fehler: besclass=" << bestclass << endl;			
+			for(int j = 0; j < (int)probabilities.numChannels; j++)
+			{
+				cout << "j: " << j << " score: " << r.scores[j] << endl;
+			}
+		}
+		regions[i].first = bestclass;
+	}
+}
+
+void RelativeLocationPrior::convertCoords(int &x, int xsize)
+{
+	x = (int)round((double(x)+(double)xsize)/(2.0*(double)xsize) * ((double)mapsize-1.0));
+
+	x = std::min(x, mapsize-1);
+	x = std::max(x,0);
+}
+
+void RelativeLocationPrior::getFeature(Examples &regions, GenericImage<double> & probabilities)
+{
+
+	int xsize, ysize;
+	xsize = probabilities.xsize;
+	ysize = probabilities.ysize;
+	
+	// get best classes
+	vector<int> bestclasses(regions.size(), -1);
+	for(int r = 0; r < (int)regions.size(); r++)
+	{
+		double maxval = -numeric_limits<double>::max();
+		for(int c = 0; c < (int)probabilities.numChannels; c++)	
+		{
+			double val = probabilities.get(regions[r].second.x, regions[r].second.y, c);
+			if(maxval < val)
+			{
+				bestclasses[r] = c;
+				maxval = val;
+			}
+		}
+	}
+	
+	vector<double> alpha;
+	for(int r = 0; r < (int)regions.size(); r++)
+	{
+		double tmpalpha = probabilities.get(regions[r].second.x,regions[r].second.y,bestclasses[r]) *regions[r].second.weight;
+		
+		alpha.push_back(tmpalpha);
+	}
+
+	//erzeuge f_relloc
+	vector<vector<double> > vother;
+	vector<vector<double> > vself;		
+	for(int i = 0; i < (int)regions.size(); i++)
+	{
+		vector<double> v,w;
+		vother.push_back(v);
+		vself.push_back(w);
+		for( int c = 0; c < classno; c++)
+		{
+			double tmp_vother = 0.0;
+			double tmp_self = 0.0;
+			
+			for(int j = 0; j < (int)regions.size(); j++)
+			{
+				if(j == i)
+					continue;
+				
+				int x = regions[i].second.x - regions[j].second.x;
+				int y = regions[i].second.y - regions[j].second.y;
+					
+				convertCoords(x, xsize);
+				convertCoords(y, ysize);
+
+				double val = priormaps[c]->get(x, y, bestclasses[j]) * alpha[j]; ;
+
+				if(bestclasses[j] == bestclasses[i])//Objektbestandteile
+				{
+					tmp_self += val;
+				}
+				else//Kontextinformationen
+				{
+					tmp_vother += val;
+				}
+			}
+			
+			if(fabs(tmp_self) < 10e-7)
+				tmp_self = 10e-7;
+			if(fabs(tmp_vother) < 10e-7)
+				tmp_vother = 10e-7;
+			
+			vother[i].push_back(tmp_vother);
+			vself[i].push_back(tmp_self);
+		}
+	}
+
+	for(int r = 0; r < (int)regions.size(); r++)
+	{
+		if(regions[r].second.svec !=NULL)
+		{
+			delete regions[r].second.svec;
+			regions[r].second.svec = NULL;
+		}		
+		if(regions[r].second.vec !=NULL)
+		{
+			delete regions[r].second.vec;
+			regions[r].second.vec = NULL;
+		}
+
+		regions[r].second.svec = new SparseVector(classno*3);
+		
+		int counter = 0; 
+		
+		for (int i = 0; i < classno; i++)
+		{
+			//appearence feature (old probability for each class
+			double fapp = log(probabilities.get(regions[r].second.x,regions[r].second.y,i));
+			
+			if(fabs(fapp) > 10e-7)
+				(*(regions[r].second.svec))[counter] = fapp;
+			counter++;
+			
+			double val = log(vother[r][i]);
+			
+			if(fabs(val) > 10e-7)
+				(*(regions[r].second.svec))[counter] = val;
+			counter++;
+			
+			val =log(vself[r][i]);
+			
+			if(fabs(val) > 10e-7)
+				(*(regions[r].second.svec))[counter] = val;
+			counter++;
+		}
+	}
+}
+
+void RelativeLocationPrior::restore (istream & is, int format)
+{
+	is >> classno;
+	is >> mapsize;
+	is >> featdim;
+	
+	//Priorsmaps erzeugen
+	for(int i = 0; i < classno; i++)
+	{
+		GenericImage<double> *tmp  = new GenericImage<double>(mapsize, mapsize, classno, true);
+		tmp->setAll(0.0);
+		priormaps.push_back(tmp);
+	}
+	
+	double val;
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					
+					is >> val;
+					priormaps[i]->set(x, y, val, j);
+				}
+			}
+		}
+	}
+	
+	classifiers.resize(classno);
+	for(int i = 0; i < classno; i++)
+	{
+		classifiers[i] = SLR();
+		classifiers[i].restore(is, format);
+	}
+}
+
+void RelativeLocationPrior::store (ostream & os, int format) const
+{
+	os << classno << " ";
+	os << mapsize << " ";
+	os << featdim << endl;
+	for(int i = 0; i < classno; i++)
+	{
+		for(int j = 0; j < classno; j++)
+		{
+			for(int x = 0; x < mapsize; x++)
+			{
+				for(int y = 0; y < mapsize; y++)
+				{
+					os << priormaps[i]->get(x, y, j) << " ";
+				}
+			}
+		}
+	}
+	
+	for(int i = 0; i < classno; i++)
+	{
+		classifiers[i].store(os, format);
+	}
+}
+
+void RelativeLocationPrior::clear ()
+{
+	
+}

+ 136 - 0
semseg/postsegmentation/RelativeLocationPrior.h

@@ -0,0 +1,136 @@
+/**
+ * @file RelativeLocationPrior.h
+ * @brief a post procession step after semantic segmentation which use relative location priors
+ * @author Björn Fröhlich
+ * @date 06/10/2009
+
+ */
+#ifndef RELATIVELOCATIONPRIORINCLUDE
+#define RELATIVELOCATIONPRIORINCLUDE
+
+#include <objrec/nice.h>
+
+#include "objrec/image/GenericImage.h"
+
+#include "objrec/cbaselib/CachedExample.h"
+#include "objrec/baselib/Preprocess.h"
+#include "objrec/baselib/Globals.h"
+
+#include "objrec/classifier/fpclassifier/logisticregression/SLR.h"
+#include "objrec/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+#include "objrec/features/fpfeatures/SparseVectorFeature.h"
+
+#include "objrec/cbaselib/ClassNames.h"
+
+namespace OBJREC {
+
+class RelativeLocationPrior : public Persistent
+{
+
+	protected:
+		//! the priormaps
+		vector<GenericImage<double> *> priormaps;
+		
+		//! the configfile
+		const Config *conf;
+		
+		//! count of classes
+		int classno;
+		
+		//! size of the priormaps (mapsize x mapsize)
+		int mapsize;
+		
+		//! convert Image coordinates to priormaps coordinates
+		void convertCoords(int &x, int xsize);
+		
+		//! the trainingsdata will be added subsequently to this object
+		Examples trainingsdata;
+		
+		//! the one vs all sparse logistic classifiers
+		vector<SLR> classifiers;
+    
+		//! dimension of the features
+		int featdim;
+						
+	public:
+  
+		/** simple constructor */
+		RelativeLocationPrior();
+		
+		/** simple constructor */
+		RelativeLocationPrior(const Config *_conf);
+      
+		/** simple destructor */
+		~RelativeLocationPrior();
+
+		/**
+		 * set the count of classes
+		 * @param _classno count of classes
+		 */
+		void setClassNo(int _classno);
+     
+		/** initialize the RelativeLocationPrior Variables*/
+		void Init();
+		
+		/**
+		 * Bestimme aus dem Trainingsbild, die location priors maps
+		 * @param regions input regions with size, position and label
+		 */
+		void trainPriorsMaps(Examples &regions, int xsize, int ysize);
+		
+		/**
+		 * finish the priors maps
+		 */
+		void finishPriorsMaps(ClassNames &cn);
+		
+		/**
+		 * Bestimme aus dem Trainingsbild, die location priors maps
+		 * @param regions input regions with size and position
+		 * @param probabilities the probabiltiy maps
+		 */
+		void trainClassifier(Examples &regions, GenericImage<double> & probabilities);
+		
+		/**
+		 * finish the classfiers
+		 */
+		void finishClassifier();
+		
+		/**
+		 * appends the featurevector to the given example
+		 * @param regions input regions with size and position
+		 * @param probabilities the probabiltiy maps
+		 */
+		void getFeature(Examples &regions, GenericImage<double> & probabilities);
+		
+		/**
+		 * uses the rlp for reclassification
+		 * @param regions 
+		 * @param result 
+		 * @param probabilities 
+		 */
+		void postprocess ( Examples &regions, GenericImage<double> & probabilities);
+		
+		/**
+		 * load data from an input stream
+		 * @param is input stream
+		 * @param format 
+		 */
+		void restore (istream & is, int format = 0);
+		
+		/**
+		 * write data to an output stream
+		 * @param os outputstream
+		 * @param format 
+		 */
+		void store (ostream & os, int format = 0) const;
+		
+		/**
+		 * clear all informations
+		 */
+		void clear ();
+};
+
+} //namespace
+
+#endif

+ 3 - 0
semseg/postsegmentation/libdepend.inc

@@ -0,0 +1,3 @@
+$(call PKG_DEPEND_EXT,ICE)
+$(call PKG_DEPEND_INT,objrec/cbaselib)
+$(call PKG_DEPEND_INT,objrec/mrf)