Jelajahi Sumber

re-added all other semseg methods

Sven Sickert 11 tahun lalu
induk
melakukan
19e0434066
34 mengubah file dengan 14675 tambahan dan 0 penghapusan
  1. 98 0
      progs/classtest.cpp
  2. 123 0
      progs/getRelevantClasses.cpp
  3. 556 0
      progs/testActiveSemanticSegmentation.cpp
  4. 284 0
      progs/testActiveSemanticSegmentationBinary.cpp
  5. 332 0
      progs/testClassifier.cpp
  6. 346 0
      progs/testClassifierGMM.cpp
  7. 29 0
      progs/testNICE.cpp
  8. 51 0
      progs/testRF.cpp
  9. 337 0
      progs/testSemanticSegmentation.cpp
  10. 1972 0
      semseg/SemSegContextTree.cpp
  11. 248 0
      semseg/SemSegContextTree.h
  12. 2368 0
      semseg/SemSegCsurka.cpp
  13. 249 0
      semseg/SemSegCsurka.h
  14. 108 0
      semseg/SemSegLocal.cpp
  15. 48 0
      semseg/SemSegLocal.h
  16. 2237 0
      semseg/SemSegNovelty.cpp
  17. 308 0
      semseg/SemSegNovelty.h
  18. 1527 0
      semseg/SemSegNoveltyBinary.cpp
  19. 245 0
      semseg/SemSegNoveltyBinary.h
  20. 263 0
      semseg/operations/Operations.cpp
  21. 1173 0
      semseg/operations/Operations.h
  22. 8 0
      semseg/postsegmentation/Makefile
  23. 103 0
      semseg/postsegmentation/Makefile.inc
  24. 199 0
      semseg/postsegmentation/PPGraphCut.cpp
  25. 126 0
      semseg/postsegmentation/PPGraphCut.h
  26. 286 0
      semseg/postsegmentation/PPSuperregion.cpp
  27. 121 0
      semseg/postsegmentation/PPSuperregion.h
  28. 115 0
      semseg/postsegmentation/PSSImageLevelPrior.cpp
  29. 51 0
      semseg/postsegmentation/PSSImageLevelPrior.h
  30. 27 0
      semseg/postsegmentation/PostSemSeg.cpp
  31. 38 0
      semseg/postsegmentation/PostSemSeg.h
  32. 561 0
      semseg/postsegmentation/RelativeLocationPrior.cpp
  33. 135 0
      semseg/postsegmentation/RelativeLocationPrior.h
  34. 3 0
      semseg/postsegmentation/libdepend.inc

+ 98 - 0
progs/classtest.cpp

@@ -0,0 +1,98 @@
+#include <iostream>
+#include <fstream>
+#include <core/vector/VectorT.h>
+#include <limits>
+
+using namespace std;
+
+using namespace NICE;
+
+template<class ElementType>
+
+class SparseVectorT : public VectorT<ElementType> {
+  size_t dsize;
+
+public:
+  SparseVectorT( const size_t size, const ElementType& element ): VectorT<ElementType>( size, element ) {
+    dsize = 5;
+  }
+
+  virtual inline size_t size() const {
+    return dsize;
+  }
+};
+
+
+void printit( VectorT<double> &e )
+{
+  //cout << e.size() << endl;
+  size_t a = 0;
+
+  for ( int i = 0; i < numeric_limits<int>::max(); i++ )
+  {
+    a = e.size();
+  }
+}
+
+int main( int argc, char **argv )
+{
+  VectorT<double> k( 2, 2.0 );
+  /*cout << "print1 vector:" << endl;
+  cout << e.size()<< endl;*/
+
+  //SparseVectorT<double> k(2,2.0);
+  /* cout << "print2 sparse:" << endl;
+   cout << k.size()<< endl;*/
+  /*
+   cout << "print3 vectormethode:" << endl;
+   printit(e);
+
+   cout << "print4 sparsemethode:" << endl;*/
+  printit( k );
+  return 0;
+}
+
+/*
+
+class Elter
+{
+protected:
+ int t1;
+ int t2;
+
+public:
+ Elter():t1(1),t2(2){}
+ virtual void print(){cout << "t1: " << t1 << endl;}
+};
+
+class Kind:public Elter
+{
+public:
+ Kind(){t1 = 3; t2 = 4;}
+ virtual void print(){cout << "t2: " << t2 << endl;}
+};
+
+void printit(Elter &e)
+{
+ e.print();
+}
+
+int main(int argc, char **argv)
+{
+ Elter e;
+ cout << "print1 elter:" << endl;
+ e.print();
+
+ Kind k;
+ cout << "print2 kind:" << endl;
+ k.print();
+
+ cout << "print3 eltermethode:" << endl;
+ printit(e);
+
+
+ cout << "print3 kindmethode:" << endl;
+ printit(k);
+ return 0;
+}
+*/

+ 123 - 0
progs/getRelevantClasses.cpp

@@ -0,0 +1,123 @@
+// Beispielhafter Aufruf: BUILD_x86_64/progs/testSemanticSegmentation -config <CONFIGFILE>
+
+/**
+* @file testSemanticSegmentation.cpp
+* @brief test semantic segmentation routines
+* @author Erik Rodner
+* @date 03/20/2008
+*/
+
+#ifdef NICE_USELIB_OPENMP
+#include <omp.h>
+#endif
+
+#include "core/basics/Config.h"
+#include <core/basics/StringTools.h>
+#include <vislearning/baselib/ICETools.h>
+
+#include "vislearning/cbaselib/MultiDataset.h"
+#include "core/image/MultiChannelImageT.h"
+
+#include <fstream>
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+/**
+ test semantic segmentation routines
+*/
+int main( int argc, char **argv )
+{
+  Config conf( argc, argv );
+
+  MultiDataset md( &conf );
+
+  const ClassNames & classNames = md.getClassNames( "train" );
+
+  const LabeledSet *testFiles = md["test"];
+
+  set<int> forbidden_classes;
+
+  std::string forbidden_classes_s = conf.gS( "analysis", "forbidden_classes", "" );
+
+  classNames.getSelection( forbidden_classes_s, forbidden_classes );
+
+  LOOP_ALL_S( *testFiles )
+  {
+    EACH_INFO( classno, info );
+
+    std::string file = info.img();
+
+    NICE::Image lm;
+    NICE::MultiChannelImageT<double> probabilities;
+
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
+
+      lm.resize( l_gt->xsize, l_gt->ysize );
+      lm.set( 0 );
+      l_gt->calcLabeledImage( lm, classNames.getBackgroundClass() );
+    }
+
+    NICE::Image lm_gt;
+
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
+
+      lm_gt.resize( l_gt->xsize, l_gt->ysize );
+      lm_gt.set( 0 );
+
+      fprintf( stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n" );
+      l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+    }
+
+    set<int> classes;
+
+    for ( int x = 0; x < lm_gt.width(); x++ )
+    {
+      for ( int y = 0; y < lm_gt.height(); y++ )
+      {
+        classes.insert( lm_gt.getPixel( x, y ) );
+      }
+    }
+
+
+
+    // write allowed classes
+    string cndir = conf.gS( "SemSegCsurka", "cndir", "" );
+
+    std::vector< std::string > list;
+
+    StringTools::split( file, '/', list );
+
+    cout << cndir << "/" << list.back() << ".dat" << endl;
+
+    string cname = list.back();
+
+    if ( cndir != "" )
+    {
+      string fname = cndir + "/" + cname + ".dat";
+      cout << fname << endl;
+      ofstream outfile( fname.c_str() );
+
+      set<int>::iterator theIterator;
+
+      for ( theIterator = classes.begin(); theIterator != classes.end(); theIterator++ ) {
+        outfile << *theIterator << endl;
+      }
+
+    }
+    else
+    {
+      cerr << "please define directory for writing filenames in config: SemSegCsurka::cndir" << endl;
+      exit( -1 );
+    }
+  }
+
+  return 0;
+}

+ 556 - 0
progs/testActiveSemanticSegmentation.cpp

@@ -0,0 +1,556 @@
+// Beispielhafter Aufruf: BUILD_x86_64/progs/testActiveSemanticSegmentation -config <CONFIGFILE>
+
+/**
+* @file testActiveSemanticSegmentation.cpp
+* @brief test semantic segmentation routines with actively selecting regions for labeling
+* @author Alexander Freytag
+* @date 27-02-2013
+*/
+
+#ifdef NICE_USELIB_OPENMP
+#include <omp.h>
+#endif
+
+#include "core/basics/Config.h"
+#include "core/basics/StringTools.h"
+#include <vislearning/baselib/ICETools.h>
+
+#include <semseg/semseg/SemanticSegmentation.h>
+#include <semseg/semseg/SemSegLocal.h>
+#include <semseg/semseg/SemSegCsurka.h>
+#include <semseg/semseg/SemSegNovelty.h>
+#include <semseg/semseg/SemSegNoveltyBinary.h>
+#include <semseg/semseg/SemSegContextTree.h>
+
+#include "core/image/FilterT.h"
+
+#include <core/basics/ResourceStatistics.h>
+
+#include <fstream>
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+void updateMatrix( const NICE::Image & img, const NICE::Image & gt,
+                   NICE::Matrix & M, const set<int> & forbidden_classes )
+{
+  double subsamplex = gt.width() / ( double )img.width();
+  double subsampley = gt.height() / ( double )img.height();
+
+  for ( int y = 0 ; y < gt.height() ; y++ )
+    for ( int x = 0 ; x < gt.width() ; x++ )
+    {
+      int xx = ( int )( x / subsamplex );
+      int yy = ( int )( y / subsampley );
+
+      if ( xx < 0 ) xx = 0;
+
+      if ( yy < 0 ) yy = 0;
+
+      if ( xx > img.width() - 1 ) xx = img.width() - 1;
+
+      if ( yy > img.height() - 1 ) yy = img.height() - 1;
+
+      int cimg = img.getPixel( xx, yy );
+
+      int gimg = gt.getPixel( x, y );
+
+      if ( forbidden_classes.find( gimg ) == forbidden_classes.end() )
+      {
+        M( gimg, cimg )++;
+      }
+    }
+}
+
+/**
+ test semantic segmentation routines
+*/
+int main( int argc, char **argv )
+{
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+  
+  ResourceStatistics rs;
+  
+  bool show_result = conf.gB( "debug", "show_results", false );
+
+  bool write_results = conf.gB( "debug", "write_results", false );
+
+  bool write_results_pascal = conf.gB( "debug", "write_results_pascal", false );
+
+  std::string resultdir = conf.gS( "debug", "resultdir", "." );
+  
+  //how often do we want to iterate between sem-seg and active query?
+  int activeIterations = conf.gI("main", "activeIterations", 1 );
+    
+  if ( write_results )
+  {
+    cerr << "Writing Results to " << resultdir << endl;
+  }
+
+  MultiDataset md( &conf );
+
+  const ClassNames & classNames = md.getClassNames( "train" );
+
+  string method = conf.gS( "main", "method", "SSCsurka" );
+
+  //currently, we only allow SemSegNovelty, because it implements addNovelExamples()
+  SemanticSegmentation *semseg = NULL;
+  
+      Timer timer;
+      timer.start();
+  if ( method == "SSCsurka" )
+  {
+    semseg = new SemSegCsurka( &conf, &md );
+  }
+  else if ( method == "SSContext" )
+  {
+    semseg = new SemSegContextTree( &conf, &md );
+  }
+  else if( method == "SSNovelty" )
+  {
+    semseg = new SemSegNovelty( &conf, &md );
+  }
+  else if( method == "SSNoveltyBinary" )
+  {
+    semseg = new SemSegNoveltyBinary( &conf, &md );
+  }  
+  timer.stop();
+  std::cerr << "AL time for training: " << timer.getLast() << std::endl;
+
+  const LabeledSet *testFiles = md["test"];
+
+  NICE::Matrix M( classNames.getMaxClassno() + 1, classNames.getMaxClassno() + 1 );
+
+  M.set( 0 );
+
+  std::set<int> forbidden_classes;
+  std::string forbidden_classes_s = conf.gS( "analysis", "forbidden_classesTrain", "" );
+  classNames.getSelection( forbidden_classes_s, forbidden_classes );
+  
+  std::set<int> forbidden_classesForActiveLearning;
+  std::string forbidden_classesForActiveLearning_s = conf.gS( "analysis", "forbidden_classesForActiveLearning", "" );
+  classNames.getSelection( forbidden_classesForActiveLearning_s, forbidden_classesForActiveLearning );
+  
+
+  for (int iterationCount = 0; iterationCount < activeIterations; iterationCount++)
+  {
+      //TODO shouldn't we clean the confusion matrix at the beginning of each iteration?
+    
+    std::cerr << "SemSeg AL Iteration: " << iterationCount << std::endl;
+    semseg->setIterationCountSuffix(iterationCount);
+    
+//     ProgressBar pb( "Semantic Segmentation Analysis" );
+// 
+//     pb.show();
+
+    int fileno = 0;
+
+    std::cerr << "start looping over all files" << std::endl;
+    LOOP_ALL_S( *testFiles )
+    {
+      EACH_INFO( classno, info );
+      std::string file = info.img();
+
+      NICE::Image lm;
+      NICE::MultiChannelImageT<double> probabilities;
+
+      if ( info.hasLocalizationInfo() )
+      {
+        const LocalizationResult *l_gt = info.localization();
+
+        lm.resize( l_gt->xsize, l_gt->ysize );
+        //lm.set( 0 );
+        l_gt->calcLabeledImage( lm, classNames.getBackgroundClass() );
+      }
+
+      semseg->semanticseg( file, lm, probabilities );
+
+      fprintf( stderr, "testSemanticSegmentation: Segmentation finished !\n" );
+
+      //ground truth image, needed for updating the confusion matrix
+      //TODO check whether this is really needed, since we computed such a label image already within SemSegNovelty
+      NICE::Image lm_gt;
+
+      if ( info.hasLocalizationInfo() )
+      {
+        const LocalizationResult *l_gt = info.localization();
+
+        lm_gt.resize( l_gt->xsize, l_gt->ysize );
+        lm_gt.set( 0 );
+
+        fprintf( stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n" );
+        l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+      }
+// // // 
+// // //       std::string fname = StringTools::baseName( file, false );
+// // // 
+// // //       if ( write_results_pascal )
+// // //       {
+// // // 
+// // //         NICE::Image pascal_lm( lm.width(), lm.height() );
+// // //         int backgroundClass = classNames.getBackgroundClass();
+// // // 
+// // //         for ( int y = 0 ; y < lm.height(); y++ )
+// // //           for ( int x = 0 ; x < lm.width(); x++ )
+// // //           {
+// // //             int v = lm.getPixel( x, y );
+// // // 
+// // //             if ( v == backgroundClass )
+// // //               pascal_lm.setPixel( x, y, 255 );
+// // //             else
+// // //               pascal_lm.setPixel( x, y, 255 - v - 1 );
+// // //           }
+// // // 
+// // //         char filename[1024];
+// // // 
+// // //         char *format = ( char * )"pgm";
+// // //         sprintf( filename, "%s/%s.%s", resultdir.c_str(), fname.c_str(), format );
+// // // 
+// // //         pascal_lm.write( filename );
+// // //       }
+// // // 
+      if ( show_result || write_results )
+      {
+        NICE::ColorImage orig( file );
+        NICE::ColorImage rgb;
+        NICE::ColorImage rgb_gt;
+
+        classNames.labelToRGB( lm, rgb );
+
+        classNames.labelToRGB( lm_gt, rgb_gt );
+
+        if ( write_results )
+        {
+  //         char filename[1024];
+  //         char *format = ( char * )"ppm";
+  //         sprintf( filename, "%06d.%s", fileno, format );
+  //         std::string origfilename = resultdir + "/orig_" + string( filename );
+  //         cerr << "Writing to file " << origfilename << endl;
+  //         orig.write( origfilename );
+  //         rgb.write( resultdir + "/result_" + string( filename ) );
+  //         rgb_gt.write( resultdir + "/groundtruth_" + string( filename ) );
+          
+          std::stringstream out;       
+          std::vector< std::string > myList;
+          StringTools::split ( Globals::getCurrentImgFN (), '/', myList );
+          out << resultdir << "/" << myList.back();
+          cerr << "Writing to file " << resultdir << "/"<< myList.back() << endl;
+          
+          std::string noveltyMethodString = conf.gS( "SemSegNovelty",  "noveltyMethod", "gp-variance");
+          orig.write ( out.str() + "_orig.ppm" );
+          rgb.write ( out.str() + "_" + noveltyMethodString + "_result_run_" + NICE::intToString(iterationCount) + ".ppm" );
+          rgb_gt.write ( out.str() + "_groundtruth.ppm" );
+        }
+
+        if ( show_result )
+        {
+  #ifndef NOVISUAL
+          showImage( rgb, "Result" );
+          showImage( rgb_gt, "Groundtruth" );
+          showImage( orig, "Input" );
+  #endif
+        }
+      }
+
+  //#pragma omp critical
+      updateMatrix( lm, lm_gt, M, forbidden_classes );
+
+      std::cerr << M << std::endl;
+
+      fileno++;
+
+//       pb.update( testFiles->count() );
+    } //Loop over all test images
+
+//     pb.hide();
+
+    //**********************************************
+    //                  EVALUATION 
+    //   COMPUTE CONFUSION MAT AND FINAL SCORES
+    //**********************************************
+    timer.start();
+    
+    long maxMemory;
+    rs.getMaximumMemory(maxMemory);
+    cerr << "Maximum memory used: " << maxMemory << " KB" << endl;
+    
+    double overall = 0.0;
+    double sumall = 0.0;
+
+    for ( int r = 0; r < ( int )M.rows(); r++ )
+    {
+      for ( int c = 0; c < ( int )M.cols(); c++ )
+      {
+        if ( r == c )
+          overall += M( r, c );
+
+        sumall += M( r, c );
+      }
+    }
+
+    overall /= sumall;
+
+    // normalizing M using rows
+
+    for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+    {
+      double sum = 0.0;
+
+      for ( int c = 0 ; c < ( int )M.cols() ; c++ )
+        sum += M( r, c );
+
+      if ( fabs( sum ) > 1e-4 )
+        for ( int c = 0 ; c < ( int )M.cols() ; c++ )
+          M( r, c ) /= sum;
+    }
+
+    std::cerr << M << std::endl;
+
+    double avg_perf = 0.0;
+    int classes_trained = 0;
+
+    for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+    {
+      if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+      {
+        avg_perf += M( r, r );
+        double lsum = 0.0;
+        for(int r2 = 0; r2 < ( int )M.rows(); r2++)
+        {
+          lsum += M(r,r2);
+        }
+        if(lsum != 0.0)
+        {
+          classes_trained++;
+        }
+      }
+    }
+
+    if ( write_results )
+    {
+      ofstream fout(( resultdir + "/res.txt" ).c_str(), ios::out );
+      fout <<  "overall: " << overall << endl;
+      fout << "Average Performance " << avg_perf / ( classes_trained ) << endl;
+      fout << "Lower Bound " << 1.0  / classes_trained << endl;
+
+      for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+      {
+        if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+        {
+          std::string classname = classNames.text( r );
+          fout << classname.c_str() << ": " << M( r, r ) << endl;
+        }
+      }
+
+      fout.close();
+    }
+
+    fprintf( stderr, "overall: %f\n", overall );
+
+    fprintf( stderr, "Average Performance %f\n", avg_perf / ( classes_trained ) );
+    //fprintf(stderr, "Lower Bound %f\n", 1.0 / classes_trained);
+
+    for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+    {
+      if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+      {
+        std::string classname = classNames.text( r );
+        fprintf( stderr, "%s: %f\n", classname.c_str(), M( r, r ) );
+      }
+    }
+    
+    timer.stop();
+    std::cout << "AL time for evaluation: " << timer.getLastAbsolute() << std::endl;
+    
+    //**********************************************
+    //          READ QUERY SCORE IMAGES
+    //   AND SELECT THE REGION TO BE LABELED
+    //**********************************************
+    //NOTE this is not needed anymore, since we store everything within SemSegNovelty
+    //However, it is still needed if we use the NN-classifier for the feature learning approach
+    
+//     string alSection = "SemSegNovelty";
+//     std::string noveltyMethodString = conf.gS( alSection,  "noveltyMethod", "gp-variance");
+//     std::string uncertdir = conf.gS("debug", "resultdir", "result");
+//     int testWSize = conf.gI(alSection, "test_window_size", 10);   
+//     
+//     float maxVal(0);
+//     int maxValX(0);
+//     int maxValY(0);
+//     std::vector<ImageInfo *>::const_iterator maxValInfoIt = testFiles->begin()->second.begin();
+//     
+//     
+//     for(LabeledSet::const_iterator outerIt = testFiles->begin() ; outerIt != testFiles->end() ; outerIt++)
+//     {
+//       for ( std::vector<ImageInfo *>::const_iterator imageIt = outerIt->second.begin(); imageIt != outerIt->second.end(); imageIt++ )    
+//       {
+//         const ImageInfo & (info) = *(*imageIt);
+//         
+//         std::string file = info.img();
+//         
+//         std::stringstream dest;
+//         std::vector< std::string > list2;
+//         StringTools::split ( file, '/', list2 );
+//         dest << uncertdir << "/" << list2.back();      
+//         
+//         FloatImage noveltyImage;
+//         noveltyImage.readRaw(dest.str() + "_run_" +  NICE::intToString(iterationCount) + "_" + noveltyMethodString+".rawfloat");
+//         
+//         int xsize ( noveltyImage.width() );
+//         int ysize ( noveltyImage.height() );
+//         
+//         //compute the GT-image to ensure that we only query "useful" new features, i.e., not query background or similar "forbidden" stuff
+//         NICE::Image lm_gt;
+//         if ( (*maxValInfoIt)->hasLocalizationInfo() )
+//         {
+//           const LocalizationResult *l_gt = (*maxValInfoIt)->localization();
+// 
+//           lm_gt.resize( l_gt->xsize, l_gt->ysize );
+//           lm_gt.set( 0 );
+// 
+//           l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+//         }                
+//         
+//         for ( int y = 0; y < ysize; y += testWSize )
+//         {
+//           for ( int x = 0; x < xsize; x += testWSize)
+//           {
+//             if ( (noveltyImage ( x, y ) > maxVal) && (  forbidden_classesForActiveLearning.find ( lm_gt(x, y) ) == forbidden_classesForActiveLearning.end() ) )
+//             {
+//               maxVal =  noveltyImage ( x, y );
+//               maxValX = x;
+//               maxValY = y;
+//               maxValInfoIt = imageIt;
+//             }
+//           }
+//         }
+//         
+//       }//iterate over inner loop
+//     }//iterate over testFiles
+// 
+//     
+//       std::cerr << "maxVal: " << maxVal << " maxValX: " << maxValX << " maxValY: " << maxValY << " maxValInfo: " << (*maxValInfoIt)->img() << std::endl;
+    
+    //**********************************************
+    //          INCLUDE THE NEW INFORMATION
+    //           AND UPDATE THE CLASSIFIER
+    //**********************************************    
+      
+     timer.start();
+     semseg->addNovelExamples(); 
+     
+     timer.stop();
+     std::cout << "AL time for incremental update: " << timer.getLastAbsolute() << std::endl;
+     //alternatively, we could call the destructor of semseg, and create it again, which does the same thing 
+     // (add new features, save the classifier, re-read it after initialization)
+     //BUT this would not setup the forbidden and known classes properly!!! We should fix that!
+     
+     const Examples * novelExamples = semseg->getNovelExamples(); 
+//      std::cerr << " ==================================== " << std::endl;
+//      std::cerr << "new examples to be added: " << std::endl;
+//      for ( uint i = 0 ; i < novelExamples->size() ; i++ )
+//      {
+//         std::cerr << (*novelExamples)[i].first << " "; (*novelExamples)[i].second.store(std::cerr);
+//      }
+//      std::cerr << " ==================================== " << std::endl;
+     
+    //check which classes will be added using the features from the novel region
+    std::set<int> newClassNumbers;
+    newClassNumbers.clear(); //just to be sure  
+    for ( uint i = 0 ; i < novelExamples->size() ; i++ )
+    {
+      if (newClassNumbers.find( (*novelExamples)[i].first /* classNumber*/) == newClassNumbers.end() )
+      {
+        newClassNumbers.insert( (*novelExamples)[i].first );
+      }
+    }
+
+    //accept the new classes as valid information
+    for (std::set<int>::const_iterator clNoIt = newClassNumbers.begin(); clNoIt != newClassNumbers.end(); clNoIt++)
+    {
+      if ( forbidden_classes.find ( *clNoIt ) != forbidden_classes.end() )
+      {
+        forbidden_classes.erase(*clNoIt);
+      }
+    }       
+      
+    //NOTE Below comes the old version:
+    // it is not needed anymore, since we store everything within SemSegNovelty
+    //However, it is still needed if we use the NN-classifier for the feature learning approach      
+//     //  ----------------------------------------------------
+//     //  therefore, we first recompute the features for the whole image and
+//     //take the one which we desire
+//       
+//     //this is NOT efficient, but a nice and easy first step
+//       
+//     NICE::ColorImage img ( (*maxValInfoIt)->img() );
+//     
+//     MultiChannelImageT<double> feats;
+// 
+//     // extract features
+//     LFColorWeijer * featExtract = new LFColorWeijer ( &conf );
+//     featExtract->getFeats ( img, feats );
+//     int featdim = feats.channels();
+//     feats.addChannel(featdim);
+// 
+//     for (int c = 0; c < featdim; c++)
+//     {
+//       ImageT<double> tmp = feats[c];
+//       ImageT<double> tmp2 = feats[c+featdim];
+// 
+//       NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
+//     }
+//     featdim += featdim;
+// 
+//     // compute integral images
+//     for ( int c = 0; c < featdim; c++ )
+//     {
+//       feats.calcIntegral ( c );
+//     }    
+//     
+//     //  ----------------------------------------------------
+//     //now take the feature
+//     NICE::Vector newFeature(featdim);
+//     for ( int f = 0; f < featdim; f++ )
+//     {
+//       double val = feats.getIntegralValue ( maxValX - testWSize, maxValY - testWSize, maxValX + testWSize, maxValY + testWSize, f );
+//       newFeature[f] = val;
+//     }
+//     newFeature.normalizeL1();    
+//     
+//     NICE::Image lm_gt;
+//     // take the gt class number as well    
+//     if ( (*maxValInfoIt)->hasLocalizationInfo() )
+//     {
+//       const LocalizationResult *l_gt = (*maxValInfoIt)->localization();
+// 
+//       lm_gt.resize( l_gt->xsize, l_gt->ysize );
+//       lm_gt.set( 0 );
+// 
+//       l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+//     }
+//     int classNoGT = lm_gt(maxValX, maxValY);
+//     std::cerr << "class number GT: " << classNoGT << std::endl;
+//     
+//     
+//     semseg->addNewExample(newFeature, classNoGT);
+//     
+//     //accept the new class as valid information
+//     if ( forbidden_classes.find ( classNoGT ) != forbidden_classes.end() )
+//     {
+//       forbidden_classes.erase(classNoGT);
+//     }    
+    
+    std::cerr << "iteration finished - start the next round" << std::endl;
+    
+  } //iterationCount
+
+  delete semseg;
+
+  return 0;
+}

+ 284 - 0
progs/testActiveSemanticSegmentationBinary.cpp

@@ -0,0 +1,284 @@
+// Beispielhafter Aufruf: BUILD_x86_64/progs/testActiveSemanticSegmentationBinary -config <CONFIGFILE>
+
+/**
+* @file testActiveSemanticSegmentationBinary.cpp
+* @brief test semantic segmentation routines with actively selecting regions for labeling
+* @author Alexander Freytag
+* @date 27-02-2013
+*/
+
+#ifdef NICE_USELIB_OPENMP
+#include <omp.h>
+#endif
+
+#include "core/basics/Config.h"
+#include "core/basics/StringTools.h"
+#include <vislearning/baselib/ICETools.h>
+
+#include <semseg/semseg/SemanticSegmentation.h>
+#include <semseg/semseg/SemSegLocal.h>
+#include <semseg/semseg/SemSegCsurka.h>
+#include <semseg/semseg/SemSegNovelty.h>
+#include <semseg/semseg/SemSegNoveltyBinary.h>
+#include <semseg/semseg/SemSegContextTree.h>
+
+#include "core/image/FilterT.h"
+
+#include <core/basics/ResourceStatistics.h>
+
+#include <fstream>
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+/**
+ test semantic segmentation routines
+*/
+int main( int argc, char **argv )
+{
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+  
+  ResourceStatistics rs;
+  
+  NICE::MatrixT<double> matTemp;
+  std::cerr << "foo " << std::endl;
+  std::cerr << matTemp << std::endl;
+  
+  bool show_result = conf.gB( "debug", "show_results", false );
+
+  bool write_results = conf.gB( "debug", "write_results", false );
+
+  bool write_results_pascal = conf.gB( "debug", "write_results_pascal", false );
+
+  std::string resultdir = conf.gS( "debug", "resultdir", "." );
+  
+  //how often do we want to iterate between sem-seg and active query?
+  int activeIterations = conf.gI("main", "activeIterations", 1 );
+    
+  if ( write_results )
+  {
+    cerr << "Writing Results to " << resultdir << endl;
+  }
+
+  MultiDataset md( &conf );
+
+  const ClassNames & classNames = md.getClassNames( "train" );
+
+  string method = conf.gS( "main", "method", "SSCsurka" );
+
+  //currently, we only allow SemSegNoveltyBinary, because it implements addNovelExamples()
+  SemSegNoveltyBinary *semseg = NULL;
+  
+  Timer timer;
+  timer.start();
+
+  semseg = new SemSegNoveltyBinary( &conf, &md );
+
+  timer.stop();
+  
+  std::cerr << "AL time for training: " << timer.getLast() << std::endl;
+
+  const LabeledSet *testFiles = md["test"];
+
+  std::set<int> forbidden_classes;
+  std::string forbidden_classes_s = conf.gS( "analysis", "forbidden_classesTrain", "" );
+  classNames.getSelection( forbidden_classes_s, forbidden_classes );
+  
+  std::set<int> forbidden_classesForActiveLearning;
+  std::string forbidden_classesForActiveLearning_s = conf.gS( "analysis", "forbidden_classesForActiveLearning", "" );
+  classNames.getSelection( forbidden_classesForActiveLearning_s, forbidden_classesForActiveLearning );
+  
+  
+  int positiveClass;
+  
+  //check whether we have a single positive class
+  std::string positiveClass_s = conf.gS ( "SemSegNoveltyBinary", "positiveClass", "" );
+  std::set<int> positiveClassNumberTmp;
+  classNames.getSelection ( positiveClass_s, positiveClassNumberTmp );  
+
+  switch ( positiveClassNumberTmp.size() )
+  {
+    case 0:
+    {
+      positiveClass = 0;
+//       std::cerr << "no positive class given, assume 0 as positive class" << std::endl;
+      break;
+    }
+    case 1:
+    {
+      positiveClass = *(positiveClassNumberTmp.begin());
+//       std::cerr << "positive class will be number" << positiveClass << " with the name: " << positiveClass_s << std::endl;
+      break;
+    }
+    default:
+    {
+      //we specified more than a single positive class. right now, this is not what we are interested in, but 
+      //in theory we could also accept this and convert positiveClass into a set of ints of possible positive classes
+      positiveClass = 0;
+//       std::cerr << "no positive class given, assume 0 as positive class" << std::endl;
+      break;
+    }
+  }  
+ 
+  
+  std::cerr << "number of AL iterations: " << activeIterations << std::endl;
+  for (int iterationCount = 0; iterationCount < activeIterations; iterationCount++)
+  {    
+    std::cerr << "SemSeg AL Iteration: " << iterationCount << std::endl;
+    semseg->setIterationCountSuffix(iterationCount);
+
+    int fileno = 0;
+
+    std::cerr << "start looping over all files" << std::endl;
+    LOOP_ALL_S( *testFiles )
+    {
+      EACH_INFO( classno, info );
+      std::string file = info.img();
+
+      NICE::Image lm;
+      NICE::MultiChannelImageT<double> probabilities;
+
+      if ( info.hasLocalizationInfo() )
+      {
+        const LocalizationResult *l_gt = info.localization();
+
+        lm.resize( l_gt->xsize, l_gt->ysize );
+        //lm.set( 0 );
+        l_gt->calcLabeledImage( lm, classNames.getBackgroundClass() );
+      }
+
+      ((SemanticSegmentation*)semseg)->semanticseg( file, lm, probabilities );
+
+      fprintf( stderr, "testSemanticSegmentation: Segmentation finished !\n" );
+
+      //ground truth image, needed for updating the confusion matrix
+      //TODO check whether this is really needed, since we computed such a label image already within SemSegNovelty
+      NICE::Image lm_gt;
+
+      if ( info.hasLocalizationInfo() )
+      {
+        const LocalizationResult *l_gt = info.localization();
+
+        lm_gt.resize( l_gt->xsize, l_gt->ysize );
+        lm_gt.set( 0 );
+
+        fprintf( stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n" );
+        l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+      }
+
+      if ( show_result || write_results )
+      {
+        NICE::ColorImage orig( file );
+        NICE::ColorImage rgb;
+        NICE::ColorImage rgb_gt;
+
+        classNames.labelToRGB( lm, rgb );
+
+        classNames.labelToRGB( lm_gt, rgb_gt );
+
+        if ( write_results )
+        {
+          
+          std::stringstream out;       
+          std::vector< std::string > myList;
+          StringTools::split ( Globals::getCurrentImgFN (), '/', myList );
+          out << resultdir << "/" << myList.back();
+          cerr << "Writing to file " << resultdir << "/"<< myList.back() << endl;
+          
+          std::string noveltyMethodString = conf.gS( "SemSegNoveltyBinary",  "noveltyMethod", "gp-variance");
+          orig.write ( out.str() + "_orig.ppm" );
+          rgb.write ( out.str() + "_" + noveltyMethodString + "_result_run_" + NICE::intToString(iterationCount) + ".ppm" );
+          rgb_gt.write ( out.str() + "_groundtruth.ppm" );
+        }
+
+        if ( show_result )
+        {
+  #ifndef NOVISUAL
+          showImage( rgb, "Result" );
+          showImage( rgb_gt, "Groundtruth" );
+          showImage( orig, "Input" );
+  #endif
+        }
+      }
+
+
+      fileno++;
+
+    } //Loop over all test images
+
+
+    //**********************************************
+    //                  EVALUATION 
+    //   COMPUTE CONFUSION MAT AND FINAL SCORES
+    //**********************************************
+    timer.start();
+    
+    double score = semseg->getAUCPerformance();
+    std::cerr << "auc scores of run : " << iterationCount << " : " << score << std::endl;
+    
+    long maxMemory;
+    rs.getMaximumMemory(maxMemory);
+    cerr << "Maximum memory used: " << maxMemory << " KB" << endl;
+
+    
+    timer.stop();
+    std::cout << "AL time for evaluation: " << timer.getLastAbsolute() << std::endl;
+    
+
+    
+    //**********************************************
+    //          INCLUDE THE NEW INFORMATION
+    //           AND UPDATE THE CLASSIFIER
+    //**********************************************    
+      
+     timer.start();
+     semseg->addNovelExamples(); 
+     
+     timer.stop();
+     std::cout << "AL time for incremental update: " << timer.getLastAbsolute() << std::endl;
+     //alternatively, we could call the destructor of semseg, and create it again, which does the same thing 
+     // (add new features, save the classifier, re-read it after initialization)
+     //BUT this would not setup the forbidden and known classes properly!!! We should fix that!
+     
+     const Examples * novelExamples = semseg->getNovelExamples(); 
+//      std::cerr << " ==================================== " << std::endl;
+//      std::cerr << "new examples to be added: " << std::endl;
+//      for ( uint i = 0 ; i < novelExamples->size() ; i++ )
+//      {
+//         std::cerr << (*novelExamples)[i].first << " "; (*novelExamples)[i].second.store(std::cerr);
+//      }
+//      std::cerr << " ==================================== " << std::endl;
+     
+    //check which classes will be added using the features from the novel region
+    std::set<int> newClassNumbers;
+    newClassNumbers.clear(); //just to be sure  
+    for ( uint i = 0 ; i < novelExamples->size() ; i++ )
+    {
+      if (newClassNumbers.find( (*novelExamples)[i].first /* classNumber*/) == newClassNumbers.end() )
+      {
+        newClassNumbers.insert( (*novelExamples)[i].first );
+      }
+    }
+
+    //accept the new classes as valid information
+    for (std::set<int>::const_iterator clNoIt = newClassNumbers.begin(); clNoIt != newClassNumbers.end(); clNoIt++)
+    {
+      if ( forbidden_classes.find ( *clNoIt ) != forbidden_classes.end() )
+      {
+        forbidden_classes.erase(*clNoIt);
+      }
+    }       
+
+    std::cerr << "iteration finished - start the next round" << std::endl;
+    
+  } //iterationCount
+
+  delete semseg;
+
+  return 0;
+}

+ 332 - 0
progs/testClassifier.cpp

@@ -0,0 +1,332 @@
+/**
+* @file testClassifier.cpp
+* @brief main program for classifier evaluation
+* @author Erik Rodner
+* @date 2007-10-12
+*/
+
+#include <fstream>
+#include <iostream>
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include "vislearning/classifier/genericClassifierSelection.h"
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/cbaselib/MutualInformation.h>
+
+#include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
+#include <vislearning/classifier/fpclassifier/randomforest/FPCRandomForestTransfer.h>
+#include <vislearning/classifier/classifierinterfaces/VCFeaturePool.h>
+
+#include "core/basics/Config.h"
+#include <vislearning/baselib/Preprocess.h>
+#include <core/basics/StringTools.h>
+
+#undef DEBUG
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+void binarizeVector( NICE::Vector & xout, const NICE::Vector & x, const NICE::Vector & thresholds )
+{
+  xout.resize( x.size() );
+
+  for ( size_t i = 0 ; i < x.size() ; i++ )
+    if ( fabs( x[i] ) > thresholds[i] )
+      xout[i] = 1.0;
+    else
+      xout[i] = 0.0;
+}
+
+void binarizeSet( LabeledSetVector & dst, const LabeledSetVector & src, const NICE::Vector & thresholds )
+{
+  LOOP_ALL( src )
+  {
+    EACH( classno, x );
+    NICE::Vector dstv;
+    binarizeVector( dstv, x, thresholds );
+    dst.add( classno, dstv );
+  }
+}
+
+int main( int argc, char **argv )
+{
+  fprintf( stderr, "testClassifier: init\n" );
+
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+
+  string wekafile = conf.gS( "main", "weka", "" );
+  string trainfn = conf.gS( "main", "train", "train.vec" );
+  string testfn = conf.gS( "main", "test", "test.vec" );
+  int format = conf.gI( "main", "format", 0 );
+  bool binarize = conf.gB( "main", "binarize", false );
+  int wekaclass = conf.gI( "main", "wekaclass", 1 );
+  string classifier_cache = conf.gS( "main", "classifiercache", "" );
+  string classifier_cache_in = conf.gS( "main", "classifierin", "" );
+  int numRuns = conf.gI( "main", "runs", 1 );
+  string writeImgNet = conf.gS( "main", "imgnet", "" );
+
+  // classno:text,classno:text,...
+  string classes = conf.gS( "main", "classes", "" );
+  int classesnb = conf.gI( "main", "classes", 0 );
+  string classesconf = conf.gS( "main", "classesconf", "" );
+
+  fprintf( stderr, "testClassifier: reading config\n" );
+  Preprocess::Init( &conf );
+
+  fprintf( stderr, "testClassifier: reading multi dataset\n" );
+  int testMaxClassNo;
+  int trainMaxClassNo;
+
+  ClassNames *classNames;
+
+  if ( classes.size() == 0 && classesnb != 0 )
+  {
+    classNames = new ClassNames();
+
+    for ( int classno = 0 ; classno < classesnb ; classno++ )
+    {
+      classNames->addClass( classno, StringTools::convertToString<int> ( classno ), StringTools::convertToString<int> ( classno ) );
+    }
+
+    trainMaxClassNo = classNames->getMaxClassno();
+
+    testMaxClassNo = trainMaxClassNo;
+  }
+  else
+    if ( classes.size() > 0 )
+    {
+      classNames = new ClassNames();
+
+      vector<string> classes_sub;
+      StringTools::split( string( classes ), ',', classes_sub );
+
+      for ( vector<string>::const_iterator i = classes_sub.begin();
+            i != classes_sub.end(); i++ )
+      {
+        vector<string> desc;
+        StringTools::split( *i, ':', desc );
+
+        if ( desc.size() != 2 )
+          break;
+
+        int classno = StringTools::convert<int> ( desc[0] );
+
+        classNames->addClass( classno, desc[1], desc[1] );
+      }
+
+      trainMaxClassNo = classNames->getMaxClassno();
+
+      testMaxClassNo = trainMaxClassNo;
+
+      classNames->store( cout );
+    }
+    else if ( classesconf.size() > 0 ) {
+      classNames = new ClassNames();
+      Config cConf( classesconf );
+      classNames->readFromConfig( cConf, "*" );
+      trainMaxClassNo = classNames->getMaxClassno();
+      testMaxClassNo = trainMaxClassNo;
+    }
+    else
+    {
+      MultiDataset md( &conf );
+      classNames = new ClassNames( md.getClassNames( "train" ), "*" );
+      testMaxClassNo = md.getClassNames( "test" ).getMaxClassno();
+      trainMaxClassNo = md.getClassNames( "train" ).getMaxClassno();
+    }
+
+  LabeledSetVector train;
+
+  if ( classifier_cache_in.size() <= 0 )
+  {
+    fprintf( stderr, "testClassifier: Reading training dataset from %s\n", trainfn.c_str() );
+    train.read( trainfn, format );
+    train.printInformation();
+  } else {
+    fprintf( stderr, "testClassifier: skipping training set %s\n", trainfn.c_str() );
+  }
+
+  LabeledSetVector test;
+
+  fprintf( stderr, "testClassifier: Reading test dataset from %s\n", testfn.c_str() );
+  test.read( testfn, format );
+
+  ClassificationResults cresults;
+
+  ofstream outinet;
+
+  if ( writeImgNet.length() > 0 )
+  {
+    outinet.open( writeImgNet.c_str() );
+  }
+
+  for ( int runs = 0 ; runs < numRuns ; runs++ ) {
+    VecClassifier *vec_classifier = NULL;
+
+    if ( conf.gS( "main", "classifier" ) == "random_forest_transfer" )
+    {
+      FeaturePoolClassifier *fpc = new FPCRandomForestTransfer( &conf, classNames );
+      vec_classifier = new VCFeaturePool( &conf, fpc );
+    } else {
+      string classifierselection = conf.gS("main","classifier");
+      vec_classifier = GenericClassifierSelection::selectVecClassifier( &conf, classifierselection );
+    }
+
+    NICE::Vector thresholds;
+
+    if ( classifier_cache_in.size() <= 0 )
+    {
+      if ( binarize ) {
+        LabeledSetVector trainbin;
+        NICE::Vector mis;
+        MutualInformation mi;
+        fprintf( stderr, "testClassifier: computing mutual information\n" );
+        mi.computeThresholdsOverall( train, thresholds, mis );
+        fprintf( stderr, "testClassifier: done!\n" );
+        binarizeSet( trainbin, train, thresholds );
+        vec_classifier->teach( trainbin );
+      } else {
+
+        vec_classifier->teach( train );
+
+      }
+
+      vec_classifier->finishTeaching();
+
+      if ( classifier_cache.size() > 0 )
+        vec_classifier->save( classifier_cache );
+    } else {
+      vec_classifier->setMaxClassNo( classNames->getMaxClassno() );
+      vec_classifier->read( classifier_cache_in );
+    }
+
+    ProgressBar pb( "Classification" );
+
+    pb.show();
+
+    std::vector<int> count( testMaxClassNo + 1, 0 );
+
+    std::vector<int> correct( testMaxClassNo + 1, 0 );
+
+    MatrixT<int> confusionMatrix( testMaxClassNo + 1, trainMaxClassNo + 1, 0 );
+
+    int n = test.count();
+    LOOP_ALL( test )
+    {
+      EACH( classno, v );
+      pb.update( n );
+#ifdef DEBUG
+      fprintf( stderr, "\tclassification\n" );
+#endif
+      ClassificationResult r;
+
+      if ( binarize )
+      {
+        NICE::Vector vout;
+        binarizeVector( vout, v, thresholds );
+        r = vec_classifier->classify( vout );
+      } else {
+        r = vec_classifier->classify( v );
+      }
+
+      r.classno_groundtruth = classno;
+
+      r.classname = classNames->text( r.classno );
+
+#ifdef DEBUG
+
+      if ( r.classno == classno )
+        fprintf( stderr, "+ classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+      else
+        fprintf( stderr, "- classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+
+      r.scores.store( cerr );
+
+#endif
+
+      if ( writeImgNet.length() > 0 )
+      {
+        for ( int z = 1; z < r.scores.size() - 1; z++ )
+        {
+          outinet << r.scores[z] << " ";
+        }
+
+        outinet << r.scores[r.scores.size()-1] << endl;
+      }
+
+      if ( r.classno >= 0 )
+      {
+        if ( classno == r.classno ) correct[classno]++;
+
+        count[classno]++;
+
+        if ( r.ok() ) {
+          confusionMatrix( classno, r.classno )++;
+        }
+
+        cresults.push_back( r );
+      }
+    }
+
+    pb.hide();
+
+    if ( wekafile.size() > 0 )
+    {
+      string wekafile_s = wekafile;
+
+      if ( numRuns > 1 )
+        wekafile_s = wekafile_s + "." + StringTools::convertToString<int>( runs ) + ".txt";
+
+      cresults.writeWEKA( wekafile_s, wekaclass );
+    }
+
+    int count_total = 0;
+
+    int correct_total = 0;
+    int classes_tested = 0;
+    double avg_recognition = 0.0;
+
+    for ( size_t classno = 0; classno < correct.size(); classno++ )
+    {
+      if ( count[classno] == 0 ) {
+        fprintf( stdout, "class %d not tested !!\n", ( int )classno );
+      } else {
+        fprintf( stdout, "classification result class %d (\"%s\") : %5.2f %%\n",
+                 ( int )classno, classNames->text( classno ).c_str(), correct[classno]*100.0 / count[classno] );
+        avg_recognition += correct[classno] / ( double )count[classno];
+        classes_tested++;
+      }
+
+      count_total += count[classno];
+
+      correct_total += correct[classno];
+    }
+
+    avg_recognition /= classes_tested;
+
+
+    fprintf( stdout, "overall recognition rate : %-5.3f %%\n", correct_total*100.0 / count_total );
+    fprintf( stdout, "average recognition rate : %-5.3f %%\n", avg_recognition*100 );
+    fprintf( stdout, "total:%d misclassified:%d\n", count_total, count_total - correct_total );
+
+    int max_count = *( max_element( count.begin(), count.end() ) );
+    fprintf( stdout, "no of classes : %d\n", classNames->numClasses() );
+    fprintf( stdout, "lower bound 1 : %f\n", 100.0 / ( classNames->numClasses() ) );
+    fprintf( stdout, "lower bound 2 : %f\n", max_count * 100.0 / ( double ) count_total );
+
+    cout << confusionMatrix << endl;
+
+    delete vec_classifier;
+  }
+
+  delete classNames;
+
+  return 0;
+}

+ 346 - 0
progs/testClassifierGMM.cpp

@@ -0,0 +1,346 @@
+/**
+* @file testClassifier.cpp
+* @brief main program for classifier evaluation
+* @author Erik Rodner
+* @date 2007-10-12
+*/
+
+#include <fstream>
+#include <iostream>
+
+#include <vislearning/cbaselib/MultiDataset.h>
+#include "vislearning/classifier/genericClassifierSelection.h"
+#include <vislearning/cbaselib/ClassificationResults.h>
+#include <vislearning/cbaselib/MutualInformation.h>
+
+#include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
+#include <vislearning/classifier/fpclassifier/randomforest/FPCRandomForestTransfer.h>
+#include <vislearning/classifier/classifierinterfaces/VCFeaturePool.h>
+
+#include "core/basics/Config.h"
+#include <vislearning/baselib/Preprocess.h>
+#include <core/basics/StringTools.h>
+
+#include "vislearning/math/cluster/GMM.h"
+
+#undef DEBUG
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+void binarizeVector( NICE::Vector & xout, const NICE::Vector & x, const NICE::Vector & thresholds )
+{
+  xout.resize( x.size() );
+
+  for ( size_t i = 0 ; i < x.size() ; i++ )
+    if ( fabs( x[i] ) > thresholds[i] )
+      xout[i] = 1.0;
+    else
+      xout[i] = 0.0;
+}
+
+void binarizeSet( LabeledSetVector & dst, const LabeledSetVector & src, const NICE::Vector & thresholds )
+{
+  LOOP_ALL( src )
+  {
+    EACH( classno, x );
+    NICE::Vector dstv;
+    binarizeVector( dstv, x, thresholds );
+    dst.add( classno, dstv );
+  }
+}
+
+int main( int argc, char **argv )
+{
+  fprintf( stderr, "testClassifier: init\n" );
+
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+
+  string wekafile = conf.gS( "main", "weka", "" );
+  string trainfn = conf.gS( "main", "train", "train.vec" );
+  string testfn = conf.gS( "main", "test", "test.vec" );
+  int format = conf.gI( "main", "format", 0 );
+  bool binarize = conf.gB( "main", "binarize", false );
+  int wekaclass = conf.gI( "main", "wekaclass", 1 );
+  string classifier_cache = conf.gS( "main", "classifiercache", "" );
+  string classifier_cache_in = conf.gS( "main", "classifierin", "" );
+  int numRuns = conf.gI( "main", "runs", 1 );
+
+  // classno:text,classno:text,...
+  string classes = conf.gS( "main", "classes", "" );
+  int classesnb = conf.gI( "main", "classes", 0 );
+  string classesconf = conf.gS( "main", "classesconf", "" );
+
+  fprintf( stderr, "testClassifier: reading config\n" );
+  Preprocess::Init( &conf );
+
+  fprintf( stderr, "testClassifier: reading multi dataset\n" );
+  int testMaxClassNo;
+  int trainMaxClassNo;
+
+
+  ClassNames *classNames;
+
+  if ( classes.size() == 0 && classesnb != 0 )
+  {
+    classNames = new ClassNames();
+
+    for ( int classno = 0 ; classno < classesnb ; classno++ )
+    {
+      classNames->addClass( classno, StringTools::convertToString<int> ( classno ), StringTools::convertToString<int> ( classno ) );
+    }
+
+    trainMaxClassNo = classNames->getMaxClassno();
+
+    testMaxClassNo = trainMaxClassNo;
+  }
+  else
+    if ( classes.size() > 0 )
+    {
+      classNames = new ClassNames();
+
+      vector<string> classes_sub;
+      StringTools::split( string( classes ), ',', classes_sub );
+
+      for ( vector<string>::const_iterator i = classes_sub.begin();
+            i != classes_sub.end(); i++ )
+      {
+        vector<string> desc;
+        StringTools::split( *i, ':', desc );
+
+        if ( desc.size() != 2 )
+          break;
+
+        int classno = StringTools::convert<int> ( desc[0] );
+
+        classNames->addClass( classno, desc[1], desc[1] );
+      }
+
+      trainMaxClassNo = classNames->getMaxClassno();
+
+      testMaxClassNo = trainMaxClassNo;
+
+      classNames->store( cout );
+    }
+    else if ( classesconf.size() > 0 ) {
+      classNames = new ClassNames();
+      Config cConf( classesconf );
+      classNames->readFromConfig( cConf, "*" );
+      trainMaxClassNo = classNames->getMaxClassno();
+      testMaxClassNo = trainMaxClassNo;
+    }
+    else
+    {
+      MultiDataset md( &conf );
+      classNames = new ClassNames( md.getClassNames( "train" ), "*" );
+      testMaxClassNo = md.getClassNames( "test" ).getMaxClassno();
+      trainMaxClassNo = md.getClassNames( "train" ).getMaxClassno();
+    }
+
+  LabeledSetVector train;
+
+  if ( classifier_cache_in.size() <= 0 )
+  {
+    fprintf( stderr, "testClassifier: Reading training dataset from %s\n", trainfn.c_str() );
+    train.read( trainfn, format );
+    train.printInformation();
+  } else {
+    fprintf( stderr, "testClassifier: skipping training set %s\n", trainfn.c_str() );
+  }
+
+  LabeledSetVector test;
+
+  fprintf( stderr, "testClassifier: Reading test dataset from %s\n", testfn.c_str() );
+  test.read( testfn, format );
+
+  GMM *gmm = NULL;
+  int nbgmm = conf.gI( "main", "gmm", 0 );
+
+  if ( nbgmm > 0 )
+  {
+    gmm = new GMM( &conf, nbgmm );
+    VVector vset;
+    Vector l;
+    train.getFlatRepresentation( vset, l );
+    gmm->computeMixture( vset );
+
+    map<int, vector<NICE::Vector *> >::iterator iter;
+
+    for ( iter = train.begin(); iter != train.end(); ++iter )
+    {
+      for ( uint i = 0; i < iter->second.size(); ++i )
+      {
+        gmm->getProbs( *( iter->second[i] ), *( iter->second[i] ) );
+      }
+    }
+
+    for ( iter = test.begin(); iter != test.end(); ++iter )
+    {
+      for ( uint i = 0; i < iter->second.size(); ++i )
+      {
+        gmm->getProbs( *( iter->second[i] ), *( iter->second[i] ) );
+      }
+    }
+  }
+
+  ClassificationResults cresults;
+
+
+  for ( int runs = 0 ; runs < numRuns ; runs++ ) {
+    VecClassifier *vec_classifier = NULL;
+
+    if ( conf.gS( "main", "classifier" ) == "random_forest_transfer" )
+    {
+      FeaturePoolClassifier *fpc = new FPCRandomForestTransfer( &conf, classNames );
+      vec_classifier = new VCFeaturePool( &conf, fpc );
+    } else {
+      vec_classifier = GenericClassifierSelection::selectVecClassifier( &conf, "main" );
+    }
+
+    NICE::Vector thresholds;
+
+    if ( classifier_cache_in.size() <= 0 )
+    {
+      if ( binarize ) {
+        LabeledSetVector trainbin;
+        NICE::Vector mis;
+        MutualInformation mi;
+        fprintf( stderr, "testClassifier: computing mutual information\n" );
+        mi.computeThresholdsOverall( train, thresholds, mis );
+        fprintf( stderr, "testClassifier: done!\n" );
+        binarizeSet( trainbin, train, thresholds );
+        vec_classifier->teach( trainbin );
+      } else {
+
+        vec_classifier->teach( train );
+
+      }
+
+      vec_classifier->finishTeaching();
+
+      if ( classifier_cache.size() > 0 )
+        vec_classifier->save( classifier_cache );
+    } else {
+      vec_classifier->setMaxClassNo( classNames->getMaxClassno() );
+      vec_classifier->read( classifier_cache_in );
+    }
+
+    ProgressBar pb( "Classification" );
+
+    pb.show();
+
+    std::vector<int> count( testMaxClassNo + 1, 0 );
+
+    std::vector<int> correct( testMaxClassNo + 1, 0 );
+
+    MatrixT<int> confusionMatrix( testMaxClassNo + 1, trainMaxClassNo + 1, 0 );
+
+    int n = test.count();
+    LOOP_ALL( test )
+    {
+      EACH( classno, v );
+      pb.update( n );
+
+      fprintf( stderr, "\tclassification\n" );
+      ClassificationResult r;
+
+      if ( binarize )
+      {
+        NICE::Vector vout;
+        binarizeVector( vout, v, thresholds );
+        r = vec_classifier->classify( vout );
+      } else {
+        r = vec_classifier->classify( v );
+      }
+
+      r.classno_groundtruth = classno;
+
+      r.classname = classNames->text( r.classno );
+
+#ifdef DEBUG
+
+      if ( r.classno == classno )
+        fprintf( stderr, "+ classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+      else
+        fprintf( stderr, "- classification %d (\"%s\") <-> %d (\"%s\") score=%f\n", classno,
+                 classNames->text( classno ).c_str(), r.classno, r.classname.c_str(), r.scores[r.classno] );
+
+#endif
+
+      r.scores.store( cerr );
+
+      if ( r.classno >= 0 )
+      {
+        if ( classno == r.classno ) correct[classno]++;
+
+        count[classno]++;
+
+        if ( r.ok() ) {
+          confusionMatrix( classno, r.classno )++;
+        }
+
+        cresults.push_back( r );
+      }
+    }
+
+    pb.hide();
+
+    if ( wekafile.size() > 0 )
+    {
+      string wekafile_s = wekafile;
+
+      if ( numRuns > 1 )
+        wekafile_s = wekafile_s + "." + StringTools::convertToString<int>( runs ) + ".txt";
+
+      cresults.writeWEKA( wekafile_s, wekaclass );
+    }
+
+    int count_total = 0;
+
+    int correct_total = 0;
+    int classes_tested = 0;
+    double avg_recognition = 0.0;
+
+    for ( size_t classno = 0; classno < correct.size(); classno++ )
+    {
+      if ( count[classno] == 0 ) {
+        fprintf( stdout, "class %d not tested !!\n", ( int )classno );
+      } else {
+        fprintf( stdout, "classification result class %d (\"%s\") : %5.2f %%\n",
+                 ( int )classno, classNames->text( classno ).c_str(), correct[classno]*100.0 / count[classno] );
+        avg_recognition += correct[classno] / ( double )count[classno];
+        classes_tested++;
+      }
+
+      count_total += count[classno];
+
+      correct_total += correct[classno];
+    }
+
+    avg_recognition /= classes_tested;
+
+
+    fprintf( stdout, "overall recognition rate : %-5.3f %%\n", correct_total*100.0 / count_total );
+    fprintf( stdout, "average recognition rate : %-5.3f %%\n", avg_recognition*100 );
+    fprintf( stdout, "total:%d misclassified:%d\n", count_total, count_total - correct_total );
+
+    int max_count = *( max_element( count.begin(), count.end() ) );
+    fprintf( stdout, "no of classes : %d\n", classNames->numClasses() );
+    fprintf( stdout, "lower bound 1 : %f\n", 100.0 / ( classNames->numClasses() ) );
+    fprintf( stdout, "lower bound 2 : %f\n", max_count * 100.0 / ( double ) count_total );
+
+    cout << confusionMatrix << endl;
+
+    delete vec_classifier;
+  }
+
+  delete classNames;
+
+  return 0;
+}

+ 29 - 0
progs/testNICE.cpp

@@ -0,0 +1,29 @@
+#include <core/image/ImageT.h>
+#include <core/imagedisplay/ImageDisplay.h>
+#include "unistd.h"
+#undef DEBUG
+
+using namespace NICE;
+using namespace std;
+
+int main ( int argc, char **argv )
+{
+  std::set_terminate ( __gnu_cxx::__verbose_terminate_handler );
+  Image tmp ( 500, 1000 );
+  tmp.set ( 0 );
+
+  //showImage(tmp);
+  displayImage ( tmp );
+
+  for ( int i = 0; i < 256; i++ )
+  {
+    tmp.set ( i );
+    sleep ( 10 );
+  }
+
+  cout << "fin" << endl;
+  getchar();
+
+
+  return 0;
+}

+ 51 - 0
progs/testRF.cpp

@@ -0,0 +1,51 @@
+/**
+* @file testRF.cpp
+* @brief test random forest implementation
+* @author Björn Fröhlich
+* @date 06/08/2010
+*/
+#include "core/basics/Config.h"
+
+#include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
+
+#include "vislearning/baselib/Globals.h"
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+int main( int argc, char **argv )
+{
+  if ( argc < 1 )
+  {
+    cerr << "Bitte Datei angeben" << endl;
+    return -1;
+  }
+
+  string filename;
+
+  filename += argv[1];
+  cout << "file: " << filename << endl;
+
+  Config *conf = new Config();
+
+  FeaturePoolClassifier *fpcrfCs = new FPCRandomForests( conf, "CsurkaForest" );
+
+  //Vector *vec = new Vector(384);
+  //Example ex(vec);
+
+  fpcrfCs->setMaxClassNo( 8 );
+  fpcrfCs->read( filename );
+  /*
+  ClassificationResult r;
+
+  if(fpcrfCs != NULL)
+  {
+   r = fpcrfCs->classify ( ex );
+  }
+  */
+  return 0;
+}

+ 337 - 0
progs/testSemanticSegmentation.cpp

@@ -0,0 +1,337 @@
+// Beispielhafter Aufruf: BUILD_x86_64/progs/testSemanticSegmentation -config <CONFIGFILE>
+
+/**
+* @file testSemanticSegmentation.cpp
+* @brief test semantic segmentation routines
+* @author Erik Rodner
+* @date 03/20/2008
+*/
+
+#ifdef NICE_USELIB_OPENMP
+#include <omp.h>
+#endif
+
+// STL includes
+#include <fstream>
+
+// nice-core includes
+#include <core/basics/Config.h>
+#include <core/basics/StringTools.h>
+#include <core/basics/ResourceStatistics.h>
+
+// nice-vislearning includes
+#include <vislearning/baselib/ICETools.h>
+
+// nice-semseg includes
+#include <semseg/semseg/SemanticSegmentation.h>
+#include <semseg/semseg/SemSegLocal.h>
+#include <semseg/semseg/SemSegCsurka.h>
+#include <semseg/semseg/SemSegNovelty.h>
+#include <semseg/semseg/SemSegContextTree.h>
+
+
+
+
+
+using namespace OBJREC;
+
+using namespace NICE;
+
+using namespace std;
+
+void updateMatrix( const NICE::Image & img, const NICE::Image & gt,
+                   NICE::Matrix & M, const set<int> & forbidden_classes )
+{
+  double subsamplex = gt.width() / ( double )img.width();
+  double subsampley = gt.height() / ( double )img.height();
+
+  for ( int y = 0 ; y < gt.height() ; y++ )
+    for ( int x = 0 ; x < gt.width() ; x++ )
+    {
+      int xx = ( int )( x / subsamplex );
+      int yy = ( int )( y / subsampley );
+
+      if ( xx < 0 ) xx = 0;
+
+      if ( yy < 0 ) yy = 0;
+
+      if ( xx > img.width() - 1 ) xx = img.width() - 1;
+
+      if ( yy > img.height() - 1 ) yy = img.height() - 1;
+
+      int cimg = img.getPixel( xx, yy );
+
+      int gimg = gt.getPixel( x, y );
+
+      if ( forbidden_classes.find( gimg ) == forbidden_classes.end() )
+      {
+        M( gimg, cimg )++;
+      }
+    }
+}
+
+/**
+ test semantic segmentation routines
+*/
+int main( int argc, char **argv )
+{
+  std::set_terminate( __gnu_cxx::__verbose_terminate_handler );
+
+  Config conf( argc, argv );
+  
+  ResourceStatistics rs;
+  
+  bool show_result = conf.gB( "debug", "show_results", false );
+
+  bool write_results = conf.gB( "debug", "write_results", false );
+
+  bool write_results_pascal = conf.gB( "debug", "write_results_pascal", false );
+
+  std::string resultdir = conf.gS( "debug", "resultdir", "." );
+
+  if ( write_results )
+  {
+    cerr << "Writing Results to " << resultdir << endl;
+  }
+
+  MultiDataset md( &conf );
+
+  const ClassNames & classNames = md.getClassNames( "train" );
+
+  string method = conf.gS( "main", "method", "SSCsurka" );
+
+  SemanticSegmentation *semseg = NULL;
+
+  if ( method == "SSCsurka" )
+  {
+    semseg = new SemSegCsurka( &conf, &md );
+  }
+  else if ( method == "SSContext" )
+  {
+    semseg = new SemSegContextTree( &conf, &md );
+  }
+  else if( method == "SSNovelty" )
+  {
+    semseg = new SemSegNovelty( &conf, &md );
+  }
+
+  //SemanticSegmentation *semseg = new SemSegLocal ( &conf, &md );
+  //SemanticSegmentation *semseg = new SemSegSTF ( &conf, &md );
+  //SemanticSegmentation *semseg = new SemSegRegionBased(&conf, &md);
+
+  const LabeledSet *testFiles = md["test"];
+
+  NICE::Matrix M( classNames.getMaxClassno() + 1, classNames.getMaxClassno() + 1 );
+
+  M.set( 0 );
+
+  set<int> forbidden_classes;
+
+  std::string forbidden_classes_s = conf.gS( "analysis", "forbidden_classes", "" );
+
+  classNames.getSelection( forbidden_classes_s, forbidden_classes );
+
+  ProgressBar pb( "Semantic Segmentation Analysis" );
+
+  pb.show();
+
+  int fileno = 0;
+
+  LOOP_ALL_S( *testFiles )
+  {
+    EACH_INFO( classno, info );
+    std::string file = info.img();
+
+    NICE::Image lm;
+    NICE::MultiChannelImageT<double> probabilities;
+
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
+
+      lm.resize( l_gt->xsize, l_gt->ysize );
+      //lm.set( 0 );
+      l_gt->calcLabeledImage( lm, classNames.getBackgroundClass() );
+    }
+
+    semseg->semanticseg( file, lm, probabilities );
+
+    fprintf( stderr, "testSemanticSegmentation: Segmentation finished !\n" );
+
+    NICE::Image lm_gt;
+
+    if ( info.hasLocalizationInfo() )
+    {
+      const LocalizationResult *l_gt = info.localization();
+
+      lm_gt.resize( l_gt->xsize, l_gt->ysize );
+      lm_gt.set( 0 );
+
+      fprintf( stderr, "testSemanticSegmentation: Generating Labeled NICE::Image (Ground-Truth)\n" );
+      l_gt->calcLabeledImage( lm_gt, classNames.getBackgroundClass() );
+    }
+
+    std::string fname = StringTools::baseName( file, false );
+
+    if ( write_results_pascal )
+    {
+
+      NICE::Image pascal_lm( lm.width(), lm.height() );
+      int backgroundClass = classNames.getBackgroundClass();
+
+      for ( int y = 0 ; y < lm.height(); y++ )
+        for ( int x = 0 ; x < lm.width(); x++ )
+        {
+          int v = lm.getPixel( x, y );
+
+          if ( v == backgroundClass )
+            pascal_lm.setPixel( x, y, 255 );
+          else
+            pascal_lm.setPixel( x, y, 255 - v - 1 );
+        }
+
+      char filename[1024];
+
+      char *format = ( char * )"pgm";
+      sprintf( filename, "%s/%s.%s", resultdir.c_str(), fname.c_str(), format );
+
+      pascal_lm.write( filename );
+    }
+
+    if ( show_result || write_results )
+    {
+      NICE::ColorImage orig( file );
+      NICE::ColorImage rgb;
+      NICE::ColorImage rgb_gt;
+
+      classNames.labelToRGB( lm, rgb );
+
+      classNames.labelToRGB( lm_gt, rgb_gt );
+
+      if ( write_results )
+      {
+        std::stringstream out;       
+        std::vector< std::string > myList;
+        StringTools::split ( Globals::getCurrentImgFN (), '/', myList );
+        out << resultdir << "/" << myList.back();
+        cerr << "Writing to file " << resultdir << "/"<< myList.back() << endl;
+        orig.write ( out.str() + "_orig.jpg" );
+        rgb.write ( out.str() + "_result.png" );
+        rgb_gt.write ( out.str() + "_groundtruth.png" );
+      }
+
+      if ( show_result )
+      {
+#ifndef NOVISUAL
+        showImage( rgb, "Result" );
+        showImage( rgb_gt, "Groundtruth" );
+        showImage( orig, "Input" );
+#endif
+      }
+    }
+
+//#pragma omp critical
+    updateMatrix( lm, lm_gt, M, forbidden_classes );
+
+    cerr << M << endl;
+
+    fileno++;
+
+    pb.update( testFiles->count() );
+  }
+
+  pb.hide();
+
+  long maxMemory;
+  rs.getMaximumMemory(maxMemory);
+  cerr << "Maximum memory used: " << maxMemory << " KB" << endl;
+  
+  double overall = 0.0;
+  double sumall = 0.0;
+
+  for ( int r = 0; r < ( int )M.rows(); r++ )
+  {
+    for ( int c = 0; c < ( int )M.cols(); c++ )
+    {
+      if ( r == c )
+        overall += M( r, c );
+
+      sumall += M( r, c );
+    }
+  }
+
+  overall /= sumall;
+
+  // normalizing M using rows
+
+  for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+  {
+    double sum = 0.0;
+
+    for ( int c = 0 ; c < ( int )M.cols() ; c++ )
+      sum += M( r, c );
+
+    if ( fabs( sum ) > 1e-4 )
+      for ( int c = 0 ; c < ( int )M.cols() ; c++ )
+        M( r, c ) /= sum;
+  }
+
+  cerr << M << endl;
+
+  double avg_perf = 0.0;
+  int classes_trained = 0;
+
+  for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+  {
+    if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+    {
+      avg_perf += M( r, r );
+      double lsum = 0.0;
+      for(int r2 = 0; r2 < ( int )M.rows(); r2++)
+      {
+        lsum += M(r,r2);
+      }
+      if(lsum != 0.0)
+      {
+        classes_trained++;
+      }
+    }
+  }
+
+  if ( write_results )
+  {
+    ofstream fout(( resultdir + "/res.txt" ).c_str(), ios::out );
+    fout <<  "overall: " << overall << endl;
+    fout << "Average Performance " << avg_perf / ( classes_trained ) << endl;
+    fout << "Lower Bound " << 1.0  / classes_trained << endl;
+
+    for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+    {
+      if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+      {
+        std::string classname = classNames.text( r );
+        fout << classname.c_str() << ": " << M( r, r ) << endl;
+      }
+    }
+
+    fout.close();
+  }
+
+  fprintf( stderr, "overall: %f\n", overall );
+
+  fprintf( stderr, "Average Performance %f\n", avg_perf / ( classes_trained ) );
+  //fprintf(stderr, "Lower Bound %f\n", 1.0 / classes_trained);
+
+  for ( int r = 0 ; r < ( int )M.rows() ; r++ )
+  {
+    if (( classNames.existsClassno( r ) ) && ( forbidden_classes.find( r ) == forbidden_classes.end() ) )
+    {
+      std::string classname = classNames.text( r );
+      fprintf( stderr, "%s: %f\n", classname.c_str(), M( r, r ) );
+    }
+  }
+
+  delete semseg;
+
+  return 0;
+}

+ 1972 - 0
semseg/SemSegContextTree.cpp

@@ -0,0 +1,1972 @@
+#include "SemSegContextTree.h"
+#include "vislearning/baselib/Globals.h"
+#include "vislearning/baselib/ProgressBar.h"
+#include "core/basics/StringTools.h"
+
+#include "vislearning/cbaselib/CachedExample.h"
+#include "vislearning/cbaselib/PascalResults.h"
+#include "vislearning/baselib/ColorSpace.h"
+#include "segmentation/RSMeanShift.h"
+#include "segmentation/RSGraphBased.h"
+#include "core/basics/numerictools.h"
+#include "core/basics/StringTools.h"
+#include "core/basics/FileName.h"
+#include "vislearning/baselib/ICETools.h"
+
+#include "core/basics/Timer.h"
+#include "core/basics/vectorio.h"
+#include "core/image/FilterT.h"
+
+#include <omp.h>
+#include <iostream>
+
+#define DEBUG
+
+using namespace OBJREC;
+using namespace std;
+using namespace NICE;
+
+SemSegContextTree::SemSegContextTree (const Config *conf, const MultiDataset *md)
+    : SemanticSegmentation (conf, & (md->getClassNames ("train")))
+{
+  this->conf = conf;
+  string section = "SSContextTree";
+  lfcw = new LocalFeatureColorWeijer (conf);
+  firstiteration = true;
+
+  maxSamples = conf->gI (section, "max_samples", 2000);
+
+  minFeats = conf->gI (section, "min_feats", 50);
+
+  maxDepth = conf->gI (section, "max_depth", 10);
+
+  windowSize = conf->gI (section, "window_size", 16);
+
+  featsPerSplit = conf->gI (section, "feats_per_split", 200);
+
+  useShannonEntropy = conf->gB (section, "use_shannon_entropy", true);
+
+  nbTrees = conf->gI (section, "amount_trees", 1);
+
+  string segmentationtype = conf->gS (section, "segmentation_type", "meanshift");
+  
+  useCategorization = conf->gB (section, "use_categorization", false);
+  
+  cndir = conf->gS ("SSContextTree", "cndir", "");
+  
+  if(useCategorization && cndir == "")
+  {
+    fasthik = new GPHIKClassifierNICE(conf);
+  }
+  else
+  {
+    fasthik = NULL;
+  }
+
+  randomTests = conf->gI (section, "random_tests", 10);
+
+  bool saveLoadData = conf->gB ("debug", "save_load_data", false);
+  string fileLocation = conf->gS ("debug", "datafile", "tmp.txt");
+
+  pixelWiseLabeling = false;
+
+  useRegionFeature = conf->gB (section, "use_region_feat", false);
+
+  if (segmentationtype == "meanshift")
+    segmentation = new RSMeanShift (conf);
+  else if (segmentationtype == "none")
+  {
+    segmentation = NULL;
+    pixelWiseLabeling = true;
+    useRegionFeature = false;
+  }
+  else if (segmentationtype == "felzenszwalb")
+    segmentation = new RSGraphBased (conf);
+  else
+    throw ("no valid segmenation_type\n please choose between none, meanshift and felzenszwalb\n");
+
+  ftypes = conf->gI (section, "features", 100);;
+
+  string featsec = "Features";
+
+  vector<Operation*> tops;
+
+  if (conf->gB (featsec, "minus", true))
+    tops.push_back (new Minus());
+  if (conf->gB (featsec, "minus_abs", true))
+    tops.push_back (new MinusAbs());
+  if (conf->gB (featsec, "addition", true))
+    tops.push_back (new Addition());
+  if (conf->gB (featsec, "only1", true))
+    tops.push_back (new Only1());
+  if (conf->gB (featsec, "rel_x", true))
+    tops.push_back (new RelativeXPosition());
+  if (conf->gB (featsec, "rel_y", true))
+    tops.push_back (new RelativeYPosition());
+
+  ops.push_back (tops);
+
+  tops.clear();
+  tops.push_back (new RegionFeat());
+  ops.push_back (tops);
+
+  tops.clear();
+  if (conf->gB (featsec, "int", true))
+    tops.push_back (new IntegralOps());
+  if (conf->gB (featsec, "bi_int_cent", true))
+    tops.push_back (new BiIntegralCenteredOps());
+  if (conf->gB (featsec, "int_cent", true))
+    tops.push_back (new IntegralCenteredOps());
+  if (conf->gB (featsec, "haar_horz", true))
+    tops.push_back (new HaarHorizontal());
+  if (conf->gB (featsec, "haar_vert", true))
+    tops.push_back (new HaarVertical());
+  if (conf->gB (featsec, "haar_diag", true))
+    tops.push_back (new HaarDiag());
+  if (conf->gB (featsec, "haar3_horz", true))
+    tops.push_back (new Haar3Horiz());
+  if (conf->gB (featsec, "haar3_vert", true))
+    tops.push_back (new Haar3Vert());
+
+  ops.push_back (tops);
+  ops.push_back (tops);
+
+  tops.clear();
+  if (conf->gB (featsec, "minus", true))
+    tops.push_back (new Minus());
+  if (conf->gB (featsec, "minus_abs", true))
+    tops.push_back (new MinusAbs());
+  if (conf->gB (featsec, "addition", true))
+    tops.push_back (new Addition());
+  if (conf->gB (featsec, "only1", true))
+    tops.push_back (new Only1());
+  if (conf->gB (featsec, "rel_x", true))
+    tops.push_back (new RelativeXPosition());
+  if (conf->gB (featsec, "rel_y", true))
+    tops.push_back (new RelativeYPosition());
+
+  ops.push_back (tops);
+
+  useGradient = conf->gB (featsec, "use_gradient", true);
+
+  useWeijer = conf->gB (featsec, "use_weijer", true);
+
+  // geometric features of hoiem
+  useHoiemFeatures = conf->gB (featsec, "use_hoiem_features", false);
+  if (useHoiemFeatures)
+  {
+    hoiemDirectory = conf->gS (featsec, "hoiem_directory");
+  }
+
+  opOverview = vector<int> (NBOPERATIONS, 0);
+  contextOverview = vector<vector<double> > (maxDepth, vector<double> (2, 0.0));
+
+  calcVal.push_back (new MCImageAccess());
+  calcVal.push_back (new MCImageAccess());
+  calcVal.push_back (new MCImageAccess());
+  calcVal.push_back (new MCImageAccess());
+  calcVal.push_back (new ClassificationResultAccess());
+
+
+  classnames = md->getClassNames ("train");
+
+  ///////////////////////////////////
+  // Train Segmentation Context Trees
+  ///////////////////////////////////
+
+  if (saveLoadData)
+  {
+    if (FileMgt::fileExists (fileLocation))
+      read (fileLocation);
+    else
+    {
+      train (md);
+      write (fileLocation);
+    }
+  }
+  else
+  {
+    train (md);
+  }
+}
+
+SemSegContextTree::~SemSegContextTree()
+{
+}
+
+double SemSegContextTree::getBestSplit (std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<unsigned short int> > &currentfeats, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree, vector<vector<vector<double> > > &regionProbs)
+{
+  Timer t;
+  t.start();
+  int imgCount = 0;
+
+  try
+  {
+    imgCount = (int)feats.size();
+  }
+  catch (Exception)
+  {
+    cerr << "no features computed?" << endl;
+  }
+
+  double bestig = -numeric_limits< double >::max();
+
+  splitop = NULL;
+  splitval = -1.0;
+
+  set<vector<int> >selFeats;
+  map<int, int> e;
+  int featcounter = forest[tree][node].featcounter;
+
+  if (featcounter < minFeats)
+  {
+    return 0.0;
+  }
+
+  vector<double> fraction (a.size(), 0.0);
+
+  for (uint i = 0; i < fraction.size(); i++)
+  {
+    if (forbidden_classes.find (labelmapback[i]) != forbidden_classes.end())
+      fraction[i] = 0;
+    else
+      fraction[i] = ((double)maxSamples) / ((double)featcounter * a[i] * a.size());
+  }
+
+  featcounter = 0;
+
+  for (int iCounter = 0; iCounter < imgCount; iCounter++)
+  {
+    int xsize = (int)currentfeats[iCounter].width();
+    int ysize = (int)currentfeats[iCounter].height();
+
+    for (int x = 0; x < xsize; x++)
+    {
+      for (int y = 0; y < ysize; y++)
+      {
+        if (currentfeats[iCounter].get (x, y, tree) == node)
+        {
+          int cn = labels[iCounter] (x, y);
+          double randD = (double)rand() / (double)RAND_MAX;
+
+          if (labelmap.find (cn) == labelmap.end())
+            continue;
+
+          if (randD < fraction[labelmap[cn]])
+          {
+            vector<int> tmp (3, 0);
+            tmp[0] = iCounter;
+            tmp[1] = x;
+            tmp[2] = y;
+            featcounter++;
+            selFeats.insert (tmp);
+            e[cn]++;
+          }
+        }
+      }
+    }
+  }
+
+  map<int, int>::iterator mapit;
+
+  double globent = 0.0;
+
+  for (mapit = e.begin() ; mapit != e.end(); mapit++)
+  {
+    double p = (double)(*mapit).second / (double)featcounter;
+    globent += p * log2 (p);
+  }
+
+  globent = -globent;
+
+  if (globent < 0.5)
+  {
+    return 0.0;
+  }
+
+  /** vector of all possible features */
+  std::vector<Operation*> featsel;
+
+  for (int i = 0; i < featsPerSplit; i++)
+  {
+    int x1, x2, y1, y2;
+    int ft = (int)((double)rand() / (double)RAND_MAX * (double)ftypes);
+
+    int tmpws = windowSize;
+
+    if (firstiteration)
+      ft = 0;
+
+    if (channelsPerType[ft].size() == 0)
+    {
+      ft = 0;
+    }
+
+    if (ft > 1)
+    {
+      //use larger window size for context features
+      tmpws *= 4;
+    }
+    
+    
+    if(ft == 1)
+    {
+      if(depth < 8)
+      {
+        ft = 0;
+      }
+    }
+
+    x1 = (int)((double)rand() / (double)RAND_MAX * (double)tmpws) - tmpws / 2;
+    x2 = (int)((double)rand() / (double)RAND_MAX * (double)tmpws) - tmpws / 2;
+    y1 = (int)((double)rand() / (double)RAND_MAX * (double)tmpws) - tmpws / 2;
+    y2 = (int)((double)rand() / (double)RAND_MAX * (double)tmpws) - tmpws / 2;
+
+    int f1 = (int)((double)rand() / (double)RAND_MAX * (double)channelsPerType[ft].size());
+    int f2 = f1;
+    if ((double)rand() / (double)RAND_MAX > 0.5)
+      f2 = (int)((double)rand() / (double)RAND_MAX * (double)channelsPerType[ft].size());
+    int o = (int)((double)rand() / (double)RAND_MAX * (double)ops[ft].size());
+
+    f1 = channelsPerType[ft][f1];
+    f2 = channelsPerType[ft][f2];
+    if(ft == 1)
+    {
+      int classes = (int)regionProbs[0][0].size();
+      f2 = (int)((double)rand() / (double)RAND_MAX * (double)classes);
+    }
+    
+    Operation *op = ops[ft][o]->clone();
+
+    op->set(x1, y1, x2, y2, f1, f2, calcVal[ft]);
+    op->setFeatType(ft);
+
+    if (ft == 3 || ft == 4)
+      op->setContext(true);
+    else
+      op->setContext(false);
+
+    featsel.push_back (op);
+  }
+
+  for (int f = 0; f < featsPerSplit; f++)
+  {
+    double l_bestig = -numeric_limits< double >::max();
+    double l_splitval = -1.0;
+    set<vector<int> >::iterator it;
+    vector<double> vals;
+
+    double maxval = -numeric_limits<double>::max();
+    double minval = numeric_limits<double>::max();
+    for (it = selFeats.begin() ; it != selFeats.end(); it++)
+    {
+      Features feat;
+      feat.feats = &feats[ (*it) [0]];
+      feat.cfeats = &currentfeats[ (*it) [0]];
+      feat.cTree = tree;
+      feat.tree = &forest[tree];
+      
+      assert(forest.size() > tree);
+      assert(forest[tree][0].dist.size() > 0);
+      
+      feat.rProbs = &regionProbs[(*it) [0]];
+      
+      double val = featsel[f]->getVal (feat, (*it) [1], (*it) [2]);
+      if(!isfinite(val))
+      {
+        val = 0.0;
+        //cerr << "non finite value for " << featsel[f]->writeInfos() <<  endl << (*it) [1] << " " <<  (*it) [2] << endl;
+      }
+      vals.push_back (val);
+      maxval = std::max (val, maxval);
+      minval = std::min (val, minval);
+    }
+
+    if (minval == maxval)
+      continue;
+
+    double scale = maxval - minval;
+    vector<double> splits;
+
+    for (int r = 0; r < randomTests; r++)
+    {
+      splits.push_back (((double)rand() / (double)RAND_MAX*scale) + minval);
+    }
+
+    for (int run = 0 ; run < randomTests; run++)
+    {
+      set<vector<int> >::iterator it2;
+      double val = splits[run];
+
+      map<int, int> eL, eR;
+      int counterL = 0, counterR = 0;
+      int counter2 = 0;
+
+      for (it2 = selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++)
+      {
+        int cn = labels[ (*it2) [0]] ((*it2) [1], (*it2) [2]);
+        //cout << "vals[counter2] " << vals[counter2] << " val: " <<  val << endl;
+
+        if (vals[counter2] < val)
+        {
+          //left entropie:
+          eL[cn] = eL[cn] + 1;
+          counterL++;
+        }
+        else
+        {
+          //right entropie:
+          eR[cn] = eR[cn] + 1;
+          counterR++;
+        }
+      }
+
+      double leftent = 0.0;
+
+      for (mapit = eL.begin() ; mapit != eL.end(); mapit++)
+      {
+        double p = (double)(*mapit).second / (double)counterL;
+        leftent -= p * log2 (p);
+      }
+
+      double rightent = 0.0;
+
+      for (mapit = eR.begin() ; mapit != eR.end(); mapit++)
+      {
+        double p = (double)(*mapit).second / (double)counterR;
+        rightent -= p * log2 (p);
+      }
+
+      //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
+
+      double pl = (double)counterL / (double)(counterL + counterR);
+
+      double ig = globent - (1.0 - pl) * rightent - pl * leftent;
+
+      //double ig = globent - rightent - leftent;
+
+      if (useShannonEntropy)
+      {
+        double esplit = - (pl * log (pl) + (1 - pl) * log (1 - pl));
+        ig = 2 * ig / (globent + esplit);
+      }
+
+      if (ig > l_bestig)
+      {
+        l_bestig = ig;
+        l_splitval = val;
+      }
+    }
+
+    if (l_bestig > bestig)
+    {
+      bestig = l_bestig;
+      splitop = featsel[f];
+      splitval = l_splitval;
+    }
+  }
+
+  //FIXME: delete all features!
+  /*for(int i = 0; i < featsPerSplit; i++)
+  {
+   if(featsel[i] != splitop)
+    delete featsel[i];
+  }*/
+
+
+#ifdef DEBUG
+  //cout << "globent: " << globent <<  " bestig " << bestig << " splitval: " << splitval << endl;
+#endif
+  return bestig;
+}
+
+inline double SemSegContextTree::getMeanProb (const int &x, const int &y, const int &channel, const MultiChannelImageT<unsigned short int> &currentfeats)
+{
+  double val = 0.0;
+
+  for (int tree = 0; tree < nbTrees; tree++)
+  {
+    val += forest[tree][currentfeats.get (x,y,tree) ].dist[channel];
+  }
+
+  return val / (double)nbTrees;
+}
+
+void SemSegContextTree::computeIntegralImage (const NICE::MultiChannelImageT<unsigned short int> &currentfeats, NICE::MultiChannelImageT<double> &feats, int firstChannel)
+{
+  int xsize = currentfeats.width();
+  int ysize = currentfeats.height();
+
+  xsize = feats.width();
+  ysize = feats.height();
+
+  if (firstiteration)
+  {
+#pragma omp parallel for
+    for (int it = 0; it < (int)integralMap.size(); it++)
+    {
+      int corg = integralMap[it].first;
+      int cint = integralMap[it].second;
+
+      for (int y = 0; y < ysize; y++)
+      {
+        for (int x = 0; x < xsize; x++)
+        {
+          feats(x, y, cint) = feats(x, y, corg);
+        }
+      }
+      feats.calcIntegral(cint);
+    }
+  }
+
+  int channels = (int)forest[0][0].dist.size();
+
+#pragma omp parallel for
+  for (int c = 0; c < channels; c++)
+  {
+
+    feats (0, 0, firstChannel + c) = getMeanProb (0, 0, c, currentfeats);
+
+    //first column
+    for (int y = 1; y < ysize; y++)
+    {
+      feats (0, y, firstChannel + c) = getMeanProb (0, y, c, currentfeats)
+                                       + feats (0, y - 1, firstChannel + c);
+    }
+
+    //first row
+    for (int x = 1; x < xsize; x++)
+    {
+      feats (x, 0, firstChannel + c) = getMeanProb (x, 0, c, currentfeats)
+                                       + feats (x - 1, 0, firstChannel + c);
+    }
+
+    //rest
+    for (int y = 1; y < ysize; y++)
+    {
+      for (int x = 1; x < xsize; x++)
+      {
+        feats (x, y, firstChannel + c) = getMeanProb (x, y, c, currentfeats)
+                                         + feats (x, y - 1, firstChannel + c)
+                                         + feats (x - 1, y, firstChannel + c)
+                                         - feats (x - 1, y - 1, firstChannel + c);
+      }
+    }
+  }
+}
+
+inline double computeWeight (const double &d, const double &dim)
+{
+  return 1.0 / (pow (2, (double)(dim - d + 1)));
+}
+
+void SemSegContextTree::train (const MultiDataset *md)
+{
+  int shortsize = numeric_limits<short>::max();
+  
+  Timer timer;
+  timer.start();
+  const LabeledSet train = * (*md) ["train"];
+  const LabeledSet *trainp = &train;
+
+  ProgressBar pb ("compute feats");
+  pb.show();
+
+  //TODO: Speichefresser!, lohnt sich sparse?
+  vector<MultiChannelImageT<double> > allfeats;
+  vector<MultiChannelImageT<unsigned short int> > currentfeats;
+  vector<MatrixT<int> > labels;
+
+  vector<SparseVector*> globalCategorFeats;
+  vector<map<int,int> > classesPerImage;
+
+  std::string forbidden_classes_s = conf->gS ("analysis", "donttrain", "");
+
+  vector<vector<vector<double> > > regionProbs;
+  vector<vector<int> > rSize;
+  vector<int> amountRegionpI;
+
+  if (forbidden_classes_s == "")
+  {
+    forbidden_classes_s = conf->gS ("analysis", "forbidden_classes", "");
+  }
+
+  classnames.getSelection (forbidden_classes_s, forbidden_classes);
+
+  int imgcounter = 0;
+
+  int amountPixels = 0;
+
+  ////////////////////////////////////////////////////
+  //define which featurextraction methods should be used for each channel
+  rawChannels = 3;
+
+  // how many channels without integral image
+  int shift = 0;
+
+  if (useGradient)
+    rawChannels *= 2;
+
+  if (useWeijer)
+    rawChannels += 11;
+
+  if (useHoiemFeatures)
+    rawChannels += 8;
+
+  // gray value images
+  for (int i = 0; i < rawChannels; i++)
+  {
+    channelType.push_back (0);
+  }
+
+  // regions
+  if (useRegionFeature)
+  {
+    channelType.push_back (1);
+    shift++;
+  }
+
+///////////////////////////////////////////////////////////////////
+
+  LOOP_ALL_S (*trainp)
+  {
+    EACH_INFO (classno, info);
+
+    NICE::ColorImage img;
+
+    std::string currentFile = info.img();
+
+    CachedExample *ce = new CachedExample (currentFile);
+
+    const LocalizationResult *locResult = info.localization();
+
+    if (locResult->size() <= 0)
+    {
+      fprintf (stderr, "WARNING: NO ground truth polygons found for %s !\n",
+               currentFile.c_str());
+      continue;
+    }
+
+    fprintf (stderr, "SSContext: Collecting pixel examples from localization info: %s\n", currentFile.c_str());
+
+    int xsize, ysize;
+    ce->getImageSize (xsize, ysize);
+    amountPixels += xsize * ysize;
+
+    MatrixT<int> tmpMat (xsize, ysize);
+
+    currentfeats.push_back (MultiChannelImageT<unsigned short int> (xsize, ysize, nbTrees));
+    currentfeats[imgcounter].setAll (0);
+
+    labels.push_back (tmpMat);
+
+    try {
+      img = ColorImage (currentFile);
+    } catch (Exception) {
+      cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
+      continue;
+    }
+
+    Globals::setCurrentImgFN (currentFile);
+
+    //TODO: resize image?!
+    MultiChannelImageT<double> feats;
+    allfeats.push_back (feats);
+
+    int amountRegions;
+    // read image and do some simple transformations
+    extractBasicFeatures (allfeats[imgcounter], img, currentFile, amountRegions);
+
+    if (useRegionFeature)
+    {
+      amountRegionpI.push_back(amountRegions);
+      rSize.push_back(vector<int>(amountRegions, 0));
+      for (int y = 0; y < ysize; y++)
+      {
+        for (int x = 0; x < xsize; x++)
+        {
+          rSize[imgcounter][allfeats[imgcounter](x, y, rawChannels)]++;
+        }
+      }
+    }
+
+    // getting groundtruth
+    NICE::Image pixelLabels (xsize, ysize);
+
+    pixelLabels.set (0);
+
+    locResult->calcLabeledImage (pixelLabels, (*classNames).getBackgroundClass());
+
+    for (int x = 0; x < xsize; x++)
+    {
+      for (int y = 0; y < ysize; y++)
+      {
+        classno = pixelLabels.getPixel (x, y);
+        labels[imgcounter] (x, y) = classno;
+
+        if (forbidden_classes.find (classno) != forbidden_classes.end())
+          continue;
+
+        labelcounter[classno]++;
+
+      }
+    }
+    
+    if(useCategorization)
+    {
+      globalCategorFeats.push_back(new SparseVector());
+      classesPerImage.push_back(map<int,int>());
+      
+      for (int x = 0; x < xsize; x++)
+      {
+        for (int y = 0; y < ysize; y++)
+        {
+          classno = pixelLabels.getPixel (x, y);
+
+          if (forbidden_classes.find (classno) != forbidden_classes.end())
+            continue;
+
+          classesPerImage[imgcounter][classno] = 1;
+        }
+      }
+    }
+
+    imgcounter++;
+
+    pb.update (trainp->count());
+    delete ce;
+  }
+
+  pb.hide();
+
+  map<int, int>::iterator mapit;
+  int classes = 0;
+
+  for (mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++)
+  {
+    labelmap[mapit->first] = classes;
+    labelmapback[classes] = mapit->first;
+    classes++;
+  }
+
+///////////////////////////////////////////////////////////////////
+  for (int i = 0; i < rawChannels; i++)
+  {
+    channelType.push_back (2);
+  }
+
+  // integral images
+  for (int i = 0; i < classes; i++)
+  {
+    channelType.push_back (3);
+  }
+
+  integralMap.clear();
+  int integralImageAmount = rawChannels;
+  for (int ii = 0; ii < integralImageAmount; ii++)
+  {
+    integralMap.push_back (pair<int, int> (ii, ii + integralImageAmount + shift));
+  }
+
+  int amountTypes = 5;
+
+  channelsPerType = vector<vector<int> > (amountTypes, vector<int>());
+
+  for (int i = 0; i < (int)channelType.size(); i++)
+  {
+    channelsPerType[channelType[i]].push_back (i);
+  }
+
+  for (int i = 0; i < classes; i++)
+  {
+    channelsPerType[channelsPerType.size()-1].push_back (i);
+  }
+
+  ftypes = std::min (amountTypes, ftypes);
+
+////////////////////////////////////////////////////
+
+  if (useRegionFeature)
+  {
+    for (int a = 0; a < (int)amountRegionpI.size(); a++)
+    {
+      regionProbs.push_back(vector<vector<double> > (amountRegionpI[a], vector<double> (classes, 0.0)));
+    }
+  }
+
+  //balancing
+  int featcounter = 0;
+
+  a = vector<double> (classes, 0.0);
+
+  for (int iCounter = 0; iCounter < imgcounter; iCounter++)
+  {
+    int xsize = (int)currentfeats[iCounter].width();
+    int ysize = (int)currentfeats[iCounter].height();
+
+    for (int x = 0; x < xsize; x++)
+    {
+      for (int y = 0; y < ysize; y++)
+      {
+        featcounter++;
+        int cn = labels[iCounter] (x, y);
+        if (labelmap.find (cn) == labelmap.end())
+          continue;
+        a[labelmap[cn]] ++;
+      }
+    }
+  }
+
+  for (int i = 0; i < (int)a.size(); i++)
+  {
+    a[i] /= (double)featcounter;
+  }
+
+#ifdef DEBUG
+  for (int i = 0; i < (int)a.size(); i++)
+  {
+    cout << "a[" << i << "]: " << a[i] << endl;
+  }
+
+  cout << "a.size: " << a.size() << endl;
+
+#endif
+
+  depth = 0;
+
+  uniquenumber = 0;
+
+  for (int t = 0; t < nbTrees; t++)
+  {
+    vector<TreeNode> singletree;
+    singletree.push_back (TreeNode());
+    singletree[0].dist = vector<double> (classes, 0.0);
+    singletree[0].depth = depth;
+    singletree[0].featcounter = amountPixels;
+    singletree[0].nodeNumber = uniquenumber;
+    uniquenumber++;
+    forest.push_back (singletree);
+  }
+
+  vector<int> startnode (nbTrees, 0);
+
+  bool allleaf = false;
+  //int baseFeatSize = allfeats[0].size();
+
+  timer.stop();
+  cerr << "preprocessing finished in: " << timer.getLastAbsolute() << " seconds" << endl;
+  timer.start();
+
+  while (!allleaf && depth < maxDepth)
+  {
+    depth++;
+#ifdef DEBUG
+    cout << "depth: " << depth << endl;
+#endif
+    allleaf = true;
+    vector<MultiChannelImageT<unsigned short int> > lastfeats = currentfeats;
+    vector<vector<vector<double> > > lastRegionProbs = regionProbs;
+
+    if (useRegionFeature)
+    {
+      int rSize = (int)regionProbs.size();
+      for (int a = 0; a < rSize; a++)
+      {
+        int rSize2 = (int)regionProbs[a].size();
+        for (int b = 0; b < rSize2; b++)
+        {
+          int rSize3 = (int)regionProbs[a][b].size();
+          for (int c = 0; c < rSize3; c++)
+          {
+            regionProbs[a][b][c] = 0.0;
+          }
+        }
+      }
+    }
+
+#if 1
+    Timer timerDepth;
+    timerDepth.start();
+#endif
+
+    double weight = computeWeight (depth, maxDepth) - computeWeight (depth - 1, maxDepth);
+
+    if (depth == 1)
+    {
+      weight = computeWeight (1, maxDepth);
+    }
+
+//   omp_set_dynamic(0);
+#pragma omp parallel for
+    for (int tree = 0; tree < nbTrees; tree++)
+    {
+      const int t = (int)forest[tree].size();
+      const int s = startnode[tree];
+      startnode[tree] = t;
+//the following line seems to be a problem, since it produces many segmentation faults
+//#pragma omp parallel for
+      for (int i = s; i < t; i++)
+      {
+        if (!forest[tree][i].isleaf && forest[tree][i].left < 0)
+        {
+          Operation *splitfeat = NULL;
+          double splitval;
+          double bestig = getBestSplit (allfeats, lastfeats, labels, i, splitfeat, splitval, tree, lastRegionProbs);
+
+          for (int ii = 0; ii < (int)lastfeats.size(); ii++)
+          {
+            for (int c = 0; c < lastfeats[ii].channels(); c++)
+            {
+              short unsigned int minv, maxv;
+              lastfeats[ii].statistics (minv, maxv, c);
+            }
+          }
+
+          forest[tree][i].feat = splitfeat;
+          
+          forest[tree][i].decision = splitval;
+
+          if (splitfeat != NULL)
+          {
+            allleaf = false;
+            int left;
+#pragma omp critical
+            {
+              left = forest[tree].size();
+              forest[tree].push_back (TreeNode());
+              forest[tree].push_back (TreeNode());
+            }
+            int right = left + 1;
+            forest[tree][i].left = left;
+            forest[tree][i].right = right;
+            forest[tree][left].dist = vector<double> (classes, 0.0);
+            forest[tree][right].dist = vector<double> (classes, 0.0);
+            forest[tree][left].depth = depth;
+            forest[tree][right].depth = depth;
+            forest[tree][left].featcounter = 0;
+            forest[tree][right].featcounter = 0;
+            forest[tree][left].nodeNumber = uniquenumber;
+            int leftu = uniquenumber;
+            uniquenumber++;
+            forest[tree][right].nodeNumber = uniquenumber;
+            int rightu = uniquenumber;
+            uniquenumber++;
+            forest[tree][right].featcounter = 0;
+
+#pragma omp parallel for
+            for (int iCounter = 0; iCounter < imgcounter; iCounter++)
+            {
+              int xsize = currentfeats[iCounter].width();
+              int ysize = currentfeats[iCounter].height();
+
+              for (int x = 0; x < xsize; x++)
+              {
+                for (int y = 0; y < ysize; y++)
+                {
+                  if (currentfeats[iCounter].get (x, y, tree) == i)
+                  {
+                    Features feat;
+                    feat.feats = &allfeats[iCounter];
+                    feat.cfeats = &lastfeats[iCounter];
+                    feat.cTree = tree;
+                    feat.tree = &forest[tree];
+                    feat.rProbs = &lastRegionProbs[iCounter];
+                    double val = splitfeat->getVal (feat, x, y);
+                    if(!isfinite(val))
+                    {
+                      val = 0.0;
+                    }
+
+#pragma omp critical
+                    if (val < splitval)
+                    {
+                      currentfeats[iCounter].set (x, y, left, tree);
+                      if (labelmap.find (labels[iCounter] (x, y)) != labelmap.end())
+                        forest[tree][left].dist[labelmap[labels[iCounter] (x, y) ]]++;
+                      forest[tree][left].featcounter++;
+                      if(useCategorization && leftu < shortsize)
+                        (*globalCategorFeats[iCounter])[leftu]+=weight;
+                    }
+                    else
+                    {
+                      currentfeats[iCounter].set (x, y, right, tree);
+                      if (labelmap.find (labels[iCounter] (x, y)) != labelmap.end())
+                        forest[tree][right].dist[labelmap[labels[iCounter] (x, y) ]]++;
+                      forest[tree][right].featcounter++;
+                      
+                      if(useCategorization && rightu < shortsize)
+                        (*globalCategorFeats[iCounter])[rightu]+=weight;
+                    }
+                  }
+                }
+              }
+            }
+
+            double lcounter = 0.0, rcounter = 0.0;
+
+            for (uint d = 0; d < forest[tree][left].dist.size(); d++)
+            {
+              if (forbidden_classes.find (labelmapback[d]) != forbidden_classes.end())
+              {
+                forest[tree][left].dist[d] = 0;
+                forest[tree][right].dist[d] = 0;
+              }
+              else
+              {
+                forest[tree][left].dist[d] /= a[d];
+                lcounter += forest[tree][left].dist[d];
+                forest[tree][right].dist[d] /= a[d];
+                rcounter += forest[tree][right].dist[d];
+              }
+            }
+
+            if (lcounter <= 0 || rcounter <= 0)
+            {
+              cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
+              cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
+              cout << "bestig: " << bestig << endl;
+
+              for (int iCounter = 0; iCounter < imgcounter; iCounter++)
+              {
+                int xsize = currentfeats[iCounter].width();
+                int ysize = currentfeats[iCounter].height();
+                int counter = 0;
+
+                for (int x = 0; x < xsize; x++)
+                {
+                  for (int y = 0; y < ysize; y++)
+                  {
+                    if (lastfeats[iCounter].get (x, y, tree) == i)
+                    {
+                      if (++counter > 30)
+                        break;
+
+                      Features feat;
+
+                      feat.feats = &allfeats[iCounter];
+                      feat.cfeats = &lastfeats[iCounter];
+                      feat.cTree = tree;
+                      feat.tree = &forest[tree];
+                      feat.rProbs = &lastRegionProbs[iCounter];
+
+                      double val = splitfeat->getVal (feat, x, y);
+                      if(!isfinite(val))
+                      {
+                        val = 0.0;
+                      }
+
+                      cout << "splitval: " << splitval << " val: " << val << endl;
+                    }
+                  }
+                }
+              }
+
+              assert (lcounter > 0 && rcounter > 0);
+            }
+
+            for (uint d = 0; d < forest[tree][left].dist.size(); d++)
+            {
+              forest[tree][left].dist[d] /= lcounter;
+              forest[tree][right].dist[d] /= rcounter;
+            }
+          }
+          else
+          {
+            forest[tree][i].isleaf = true;
+          }
+        }
+      }
+    }
+
+
+    if (useRegionFeature)
+    {
+      for (int iCounter = 0; iCounter < imgcounter; iCounter++)
+      {
+        int xsize = currentfeats[iCounter].width();
+        int ysize = currentfeats[iCounter].height();
+        int counter = 0;
+
+#pragma omp parallel for
+        for (int x = 0; x < xsize; x++)
+        {
+          for (int y = 0; y < ysize; y++)
+          {
+            for (int tree = 0; tree < nbTrees; tree++)
+            {
+              int node = currentfeats[iCounter].get(x, y, tree);
+              for (uint d = 0; d < forest[tree][node].dist.size(); d++)
+              {
+                regionProbs[iCounter][(int)(allfeats[iCounter](x, y, rawChannels))][d] += forest[tree][node].dist[d];
+              }
+            }
+          }
+        }
+      }
+
+      int rSize1 = (int)regionProbs.size();
+      for (int a = 0; a < rSize1; a++)
+      {
+        int rSize2 = (int)regionProbs[a].size();
+        for (int b = 0; b < rSize2; b++)
+        {
+          int rSize3 = (int)regionProbs[a][b].size();
+          for (int c = 0; c < rSize3; c++)
+          {
+            regionProbs[a][b][c] /= (double)(rSize[a][b]);
+          }
+        }
+      }
+    }
+
+    //compute integral images
+    if (firstiteration)
+    {
+      for (int i = 0; i < imgcounter; i++)
+      {
+        allfeats[i].addChannel ((int)(classes + rawChannels));
+      }
+    }
+
+    for (int i = 0; i < imgcounter; i++)
+    {
+      computeIntegralImage (currentfeats[i], allfeats[i], channelType.size() - classes);
+    }
+
+    if (firstiteration)
+    {
+      firstiteration = false;
+    }
+
+#if 1
+    timerDepth.stop();
+
+    cout << "time for depth " << depth << ": " << timerDepth.getLastAbsolute() << endl;
+#endif
+    
+    lastfeats.clear();
+    lastRegionProbs.clear();
+  }
+
+  timer.stop();
+  cerr << "learning finished in: " << timer.getLastAbsolute() << " seconds" << endl;
+  timer.start();
+  
+  cout << "uniquenumber " << uniquenumber << endl;
+  
+  if(useCategorization && fasthik != NULL)
+  {
+    uniquenumber = std::min(shortsize, uniquenumber);
+    for(uint i = 0; i < globalCategorFeats.size(); i++)
+    {
+      globalCategorFeats[i]->setDim(uniquenumber);
+      globalCategorFeats[i]->normalize();
+    }
+    map<int,Vector> ys;
+    
+    int cCounter = 0;
+    for(map<int,int>::iterator it = labelmap.begin(); it != labelmap.end(); it++, cCounter++)
+    {
+      ys[cCounter] = Vector(globalCategorFeats.size());
+      for(int i = 0; i < imgcounter; i++)
+      {
+        if(classesPerImage[i].find(it->first) != classesPerImage[i].end())
+        {
+          ys[cCounter][i] = 1;
+        }
+        else
+        {
+          ys[cCounter][i] = -1;
+        }
+      }
+    }
+
+    //NOTE
+    // Compiler doesn't know how to automatically convert
+    // std::vector<T*> to std::vector<T const*> because the way
+    // the template system works means that in theory the two may
+    // be specialised differently.  This is an explicit conversion.
+    fasthik->train( reinterpret_cast<vector<const NICE::SparseVector *>&>(globalCategorFeats), ys);
+    
+  }
+  
+#ifdef DEBUG
+  for (int tree = 0; tree < nbTrees; tree++)
+  {
+    int t = (int)forest[tree].size();
+
+    for (int i = 0; i < t; i++)
+    {
+      printf ("tree[%i]: left: %i, right: %i", i, forest[tree][i].left, forest[tree][i].right);
+
+      if (!forest[tree][i].isleaf && forest[tree][i].left != -1)
+      {
+        cout <<  ", feat: " << forest[tree][i].feat->writeInfos() << " ";
+        opOverview[forest[tree][i].feat->getOps() ]++;
+        contextOverview[forest[tree][i].depth][ (int)forest[tree][i].feat->getContext() ]++;
+      }
+
+      for (int d = 0; d < (int)forest[tree][i].dist.size(); d++)
+      {
+        cout << " " << forest[tree][i].dist[d];
+      }
+
+      cout << endl;
+    }
+  }
+
+  std::map<int, int> featTypeCounter;
+
+  for (int tree = 0; tree < nbTrees; tree++)
+  {
+    int t = (int)forest[tree].size();
+
+    for (int i = 0; i < t; i++)
+    {
+      if (!forest[tree][i].isleaf && forest[tree][i].left != -1)
+      {
+        featTypeCounter[forest[tree][i].feat->getFeatType()] += 1;
+      }
+    }
+  }
+
+  cout << "evaluation of featuretypes" << endl;
+  for (map<int, int>::const_iterator it = featTypeCounter.begin(); it != featTypeCounter.end(); it++)
+  {
+    cerr << it->first << ": " << it->second << endl;
+  }
+
+  for (uint c = 0; c < ops.size(); c++)
+  {
+    for (int t = 0; t < ops[c].size(); t++)
+    {
+      cout << ops[c][t]->writeInfos() << ": " << opOverview[ops[c][t]->getOps() ] << endl;
+    }
+  }
+
+  for (int d = 0; d < maxDepth; d++)
+  {
+    double sum =  contextOverview[d][0] + contextOverview[d][1];
+    if(sum == 0)
+      sum = 1;
+
+    contextOverview[d][0] /= sum;
+    contextOverview[d][1] /= sum;
+
+    cout << "depth: " << d << " woContext: " << contextOverview[d][0] << " wContext: " << contextOverview[d][1] << endl;
+  }
+#endif
+
+  timer.stop();
+  cerr << "rest finished in: " << timer.getLastAbsolute() << " seconds" << endl;
+  timer.start();
+}
+
+void SemSegContextTree::extractBasicFeatures (NICE::MultiChannelImageT<double> &feats, const ColorImage &img, const string &currentFile, int &amountRegions)
+{
+  int xsize = img.width();
+  int ysize = img.height();
+  //TODO: resize image?!
+
+  feats.reInit (xsize, ysize, 3);
+
+  for (int x = 0; x < xsize; x++)
+  {
+    for (int y = 0; y < ysize; y++)
+    {
+      for (int r = 0; r < 3; r++)
+      {
+        feats.set (x, y, img.getPixel (x, y, r), r);
+      }
+    }
+  }
+
+  feats = ColorSpace::rgbtolab (feats);
+
+  if (useGradient)
+  {
+    int currentsize = feats.channels();
+    feats.addChannel (currentsize);
+
+    for (int c = 0; c < currentsize; c++)
+    {
+      ImageT<double> tmp = feats[c];
+      ImageT<double> tmp2 = feats[c+currentsize];
+
+      NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
+    }
+  }
+
+  if (useWeijer)
+  {
+    NICE::MultiChannelImageT<double> cfeats;
+    lfcw->getFeats (img, cfeats);
+    feats.addChannel (cfeats);
+  }
+
+  // read the geometric cues produced by Hoiem et al.
+  if (useHoiemFeatures)
+  {
+    // we could also give the following set as a config option
+    string hoiemClasses_s = "sky 000 090-045 090-090 090-135 090 090-por 090-sol";
+    vector<string> hoiemClasses;
+    StringTools::split (hoiemClasses_s, ' ', hoiemClasses);
+
+    // Now we have to do some fancy regular expressions :)
+    // Original image filename: basel_000083.jpg
+    // hoiem result: basel_000083_c_sky.png
+
+    // Fancy class of Ferid which supports string handling especially for filenames
+    FileName fn (currentFile);
+    fn.removeExtension();
+    FileName fnBase = fn.extractFileName();
+
+    // counter for the channel index, starts with the current size of the destination multi-channel image
+    int currentChannel = feats.channels();
+
+    // add a channel for each feature in advance
+    feats.addChannel (hoiemClasses.size());
+
+    // loop through all geometric categories and add the images
+    for (vector<string>::const_iterator i = hoiemClasses.begin(); i != hoiemClasses.end(); i++, currentChannel++)
+    {
+      string hoiemClass = *i;
+      FileName fnConfidenceImage (hoiemDirectory + fnBase.str() + "_c_" + hoiemClass + ".png");
+      if (! fnConfidenceImage.fileExists())
+      {
+        fthrow (Exception, "Unable to read the Hoiem geometric confidence image: " << fnConfidenceImage.str() << " (original image is " << currentFile << ")");
+      } else {
+        Image confidenceImage (fnConfidenceImage.str());
+        // check whether the image size is consistent
+        if (confidenceImage.width() != feats.width() || confidenceImage.height() != feats.height())
+        {
+          fthrow (Exception, "The size of the geometric confidence image does not match with the original image size: " << fnConfidenceImage.str());
+        }
+        ImageT<double> dst = feats[currentChannel];
+
+        // copy standard image to double image
+        for (uint y = 0 ; y < (uint) confidenceImage.height(); y++)
+          for (uint x = 0 ; x < (uint) confidenceImage.width(); x++)
+            feats (x, y, currentChannel) = (double)confidenceImage (x, y);
+      }
+    }
+  }
+
+  if (useRegionFeature)
+  {
+    //using segmentation
+    Matrix regions;
+    amountRegions = segmentation->segRegions (img, regions);
+
+    int cchannel = feats.channels();
+    feats.addChannel(1);
+
+    for (int y = 0; y < regions.cols(); y++)
+    {
+      for (int x = 0; x < regions.rows(); x++)
+      {
+        feats(x, y, cchannel) = regions(x, y);
+      }
+    }
+  }
+  else
+  {
+    amountRegions = -1;
+  }
+}
+
+void SemSegContextTree::semanticseg (CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities)
+{
+  int xsize;
+  int ysize;
+  ce->getImageSize (xsize, ysize);
+  firstiteration = true;
+
+  int classes = labelmapback.size();
+
+  int numClasses = classNames->numClasses();
+
+  fprintf (stderr, "ContextTree classification !\n");
+
+  probabilities.reInit (xsize, ysize, numClasses);
+  probabilities.setAll (0);
+
+  SparseVector *globalCategorFeat = new SparseVector();
+
+  std::string currentFile = Globals::getCurrentImgFN();
+  MultiChannelImageT<double> feats;
+
+  NICE::ColorImage img;
+  if(xsize != segresult.width() || ysize != segresult.height())
+  {
+    cout << currentFile << " " << xsize << " =? " << segresult.width() << "; " << ysize << " =? " << segresult.height() << endl;
+    exit(-1);
+  }
+  try {
+    img = ColorImage (currentFile);
+  } catch (Exception) {
+    cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
+    return;
+  }
+
+  //TODO add to features!
+  int amountRegions;
+  extractBasicFeatures (feats, img, currentFile, amountRegions); //read image and do some simple transformations
+
+  vector<int> rSize;
+  if (useRegionFeature)
+  {
+    rSize = vector<int>(amountRegions, 0);
+    for (int y = 0; y < ysize; y++)
+    {
+      for (int x = 0; x < xsize; x++)
+      {
+        rSize[feats(x, y, rawChannels)]++;
+      }
+    }
+  }
+
+  bool allleaf = false;
+
+  MultiChannelImageT<unsigned short int> currentfeats (xsize, ysize, nbTrees);
+
+  currentfeats.setAll (0);
+
+  depth = 0;
+
+  vector<vector<double> > regionProbs;
+  if (useRegionFeature)
+  {
+    regionProbs = vector<vector<double> > (amountRegions, vector<double> (classes, 0.0));
+  }
+
+  for (int d = 0; d < maxDepth && !allleaf; d++)
+  {
+    depth++;
+    vector<vector<double> > lastRegionProbs = regionProbs;
+    if (useRegionFeature)
+    {
+      int rSize2 = (int)regionProbs.size();
+      for (int b = 0; b < rSize2; b++)
+      {
+        int rSize3 = (int)regionProbs[b].size();
+        for (int c = 0; c < rSize3; c++)
+        {
+          regionProbs[b][c] = 0.0;
+        }
+      }
+    }
+
+    double weight = computeWeight (depth, maxDepth) - computeWeight (depth - 1, maxDepth);
+
+    if (depth == 1)
+    {
+      weight = computeWeight (1, maxDepth);
+    }
+
+    allleaf = true;
+
+    MultiChannelImageT<unsigned short int> lastfeats = currentfeats;
+
+    int tree;
+#pragma omp parallel for private(tree)
+    for (tree = 0; tree < nbTrees; tree++)
+    {
+      for (int x = 0; x < xsize; x++)
+      {
+        for (int y = 0; y < ysize; y++)
+        {
+          int t = currentfeats.get (x, y, tree);
+
+          if (forest[tree][t].left > 0)
+          {
+            allleaf = false;
+            Features feat;
+            feat.feats = &feats;
+            feat.cfeats = &lastfeats;
+            feat.cTree = tree;
+            feat.tree = &forest[tree];
+            feat.rProbs = &lastRegionProbs;
+
+            double val = forest[tree][t].feat->getVal (feat, x, y);
+            if(!isfinite(val))
+            {
+              val = 0.0;
+            }
+
+            if (val < forest[tree][t].decision)
+            {
+              currentfeats.set (x, y, forest[tree][t].left, tree);
+#pragma omp critical
+              {
+                if(fasthik != NULL && useCategorization && forest[tree][forest[tree][t].left].nodeNumber < uniquenumber)
+                  (*globalCategorFeat)[forest[tree][forest[tree][t].left].nodeNumber] += weight;
+              }
+            }
+            else
+            {
+              currentfeats.set (x, y, forest[tree][t].right, tree);
+#pragma omp critical
+              {
+                if(fasthik != NULL && useCategorization && forest[tree][forest[tree][t].right].nodeNumber < uniquenumber)
+                  (*globalCategorFeat)[forest[tree][forest[tree][t].right].nodeNumber] += weight;
+              }
+            }
+          }
+        }
+      }
+    }
+
+    if (useRegionFeature)
+    {
+      int xsize = currentfeats.width();
+      int ysize = currentfeats.height();
+
+#pragma omp parallel for
+      for (int x = 0; x < xsize; x++)
+      {
+        for (int y = 0; y < ysize; y++)
+        {
+          for (int tree = 0; tree < nbTrees; tree++)
+          {
+            int node = currentfeats.get(x, y, tree);
+            for (uint d = 0; d < forest[tree][node].dist.size(); d++)
+            {
+              regionProbs[(int)(feats(x, y, rawChannels))][d] += forest[tree][node].dist[d];
+            }
+          }
+        }
+      }
+
+
+      int rSize2 = (int)regionProbs.size();
+      for (int b = 0; b < rSize2; b++)
+      {
+        int rSize3 = (int)regionProbs[b].size();
+        for (int c = 0; c < rSize3; c++)
+        {
+          regionProbs[b][c] /= (double)(rSize[b]);
+        }
+      }
+    }
+
+    if (depth < maxDepth)
+    {
+      //compute integral images
+      if (firstiteration)
+      {
+        feats.addChannel (classes + rawChannels);
+      }
+      computeIntegralImage (currentfeats, feats, channelType.size() - classes);
+      if (firstiteration)
+      {
+        firstiteration = false;
+      }
+    }
+  }
+
+
+
+  int allClasses = (int)probabilities.channels();
+  vector<int> useclass (allClasses, 1);
+
+  vector<int> classesInImg;
+  
+  if(useCategorization)
+  {
+    if(cndir != "")
+    {
+      
+      std::vector< std::string > list;
+      StringTools::split (currentFile, '/', list);
+      string orgname = list.back();
+      
+      ifstream infile ((cndir + "/" + orgname + ".dat").c_str());
+      while (!infile.eof() && infile.good())
+      {
+        int tmp;
+        infile >> tmp;
+        assert (tmp >= 0 && tmp < allClasses);
+        classesInImg.push_back(tmp);
+      }
+    }
+    else
+    {
+      globalCategorFeat->setDim(uniquenumber);
+      globalCategorFeat->normalize();
+      ClassificationResult cr = fasthik->classify(globalCategorFeat);
+      for (uint i = 0; i < classes; i++)
+      {
+        cerr << cr.scores[i] << " ";
+        if(cr.scores[i] > 0.0/*-0.3*/)
+        {
+          classesInImg.push_back(i);
+        }
+      }
+    }
+    cerr << "amount of classes: " << classes << " used classes: " << classesInImg.size() << endl;
+  }
+  
+  if(classesInImg.size() == 0)
+  {
+    for (uint i = 0; i < classes; i++)
+    {
+      classesInImg.push_back(i);
+    }
+  }
+
+  if (pixelWiseLabeling)
+  {
+    //finales labeln:
+    //long int offset = 0;
+
+    if(segresult.width() == 0)
+    {
+      segresult.resize(xsize,ysize);
+      segresult.set(0);
+    }
+
+    for (int x = 0; x < xsize; x++)
+    {
+      for (int y = 0; y < ysize; y++)
+      {
+        double maxvalue = - numeric_limits<double>::max(); //TODO: das kann auch nur pro knoten gemacht werden, nicht pro pixel
+        int maxindex = 0;
+
+        for (uint c = 0; c < classesInImg.size(); c++)
+        {
+          int i = classesInImg[c];
+          int currentclass = labelmapback[i];
+          if (useclass[currentclass])
+          {
+            probabilities (x, y, currentclass) = getMeanProb (x, y, i, currentfeats);
+
+            if (probabilities (x, y, currentclass) > maxvalue)
+            {
+              maxvalue = probabilities (x, y, currentclass);
+              maxindex = currentclass;
+            }
+          }
+        }
+        if(x >= segresult.width() || y >= segresult.height())
+        {
+          cerr << x << " >= " << segresult.width() << " "<<  y << " >= " << segresult.height() << endl;
+        }
+        segresult.setPixel (x, y, maxindex);
+        if (maxvalue > 1)
+          cout << "maxvalue: " << maxvalue << endl;
+      }
+    }
+#undef VISUALIZE
+#ifdef VISUALIZE
+    for (int j = 0 ; j < (int)probabilities.numChannels; j++)
+    {
+      //cout << "class: " << j << endl;//" " << cn.text (j) << endl;
+
+      NICE::Matrix tmp (probabilities.height(), probabilities.width());
+      double maxval = -numeric_limits<double>::max();
+      double minval = numeric_limits<double>::max();
+
+
+      for (int y = 0; y < probabilities.height(); y++)
+        for (int x = 0; x < probabilities.width(); x++)
+        {
+          double val = probabilities (x, y, j);
+          tmp (y, x) = val;
+          maxval = std::max (val, maxval);
+          minval = std::min (val, minval);
+        }
+      tmp (0, 0) = 1.0;
+      tmp (0, 1) = 0.0;
+
+      NICE::ColorImage imgrgb (probabilities.width(), probabilities.height());
+      ICETools::convertToRGB (tmp, imgrgb);
+
+      cout << "maxval = " << maxval << " minval: " << minval << " for class " << j << endl; //cn.text (j) << endl;
+
+      std::string s;
+      std::stringstream out;
+      out << "tmpprebmap" << j << ".ppm";
+      s = out.str();
+      imgrgb.write (s);
+      //showImage(imgrgb, "Ergebnis");
+      //getchar();
+    }
+    cout << "fertsch" << endl;
+    getchar();
+    cout << "weiter gehtsch" << endl;
+#endif
+  }
+  else
+  {
+    //using segmentation
+    Matrix regions;
+
+    if (useRegionFeature)
+    {
+      int rchannel = -1;
+      for (uint i = 0; i < channelType.size(); i++)
+      {
+        if (channelType[i] == 1)
+        {
+          rchannel = i;
+          break;
+        }
+      }
+
+      assert(rchannel > -1);
+
+      int xsize = feats.width();
+      int ysize = feats.height();
+      regions.resize(xsize, ysize);
+      for (int y = 0; y < ysize; y++)
+      {
+        for (int x = 0; x < xsize; x++)
+        {
+          regions(x, y) = feats(x, y, rchannel);
+        }
+      }
+    }
+    else
+    {
+      amountRegions = segmentation->segRegions (img, regions);
+    }
+
+    regionProbs.clear();
+    regionProbs = vector<vector<double> >(amountRegions, vector<double> (classes, 0.0));
+
+    vector<int> bestlabels (amountRegions, labelmapback[classesInImg[0]]);
+
+    for (int y = 0; y < img.height(); y++)
+    {
+      for (int x = 0; x < img.width(); x++)
+      {
+        int cregion = regions (x, y);
+
+        for (uint c = 0; c < classesInImg.size(); c++)
+        {
+          int d = classesInImg[c];
+          regionProbs[cregion][d] += getMeanProb (x, y, d, currentfeats);
+        }
+      }
+    }
+
+    for (int r = 0; r < amountRegions; r++)
+    {
+      double maxval = regionProbs[r][classesInImg[0]];
+      bestlabels[r] = classesInImg[0];
+
+      for (int d = 1; d < classes; d++)
+      {
+        if (maxval < regionProbs[r][d])
+        {
+          maxval = regionProbs[r][d];
+          bestlabels[r] = d;
+        }
+      }
+
+      bestlabels[r] = labelmapback[bestlabels[r]];
+    }
+
+    for (int y = 0; y < img.height(); y++)
+    {
+      for (int x = 0; x < img.width(); x++)
+      {
+
+        segresult.setPixel (x, y, bestlabels[regions (x,y) ]);
+      }
+    }
+
+#undef WRITEREGIONS
+#ifdef WRITEREGIONS
+    RegionGraph rg;
+    segmentation->getGraphRepresentation (img, regions,  rg);
+    for (uint pos = 0; pos < regionProbs.size(); pos++)
+    {
+      rg[pos]->setProbs (regionProbs[pos]);
+    }
+
+    std::string s;
+    std::stringstream out;
+    std::vector< std::string > list;
+    StringTools::split (Globals::getCurrentImgFN (), '/', list);
+
+    out << "rgout/" << list.back() << ".graph";
+    string writefile = out.str();
+    rg.write (writefile);
+#endif
+  }
+
+  cout << "segmentation finished" << endl;
+}
+
+void SemSegContextTree::store (std::ostream & os, int format) const
+{
+  os.precision (numeric_limits<double>::digits10 + 1);
+  os << nbTrees << endl;
+  classnames.store (os);
+
+  map<int, int>::const_iterator it;
+
+  os << labelmap.size() << endl;
+  for (it = labelmap.begin() ; it != labelmap.end(); it++)
+    os << (*it).first << " " << (*it).second << endl;
+
+  os << labelmapback.size() << endl;
+  for (it = labelmapback.begin() ; it != labelmapback.end(); it++)
+    os << (*it).first << " " << (*it).second << endl;
+
+  int trees = forest.size();
+  os << trees << endl;
+
+  for (int t = 0; t < trees; t++)
+  {
+    int nodes = forest[t].size();
+    os << nodes << endl;
+    for (int n = 0; n < nodes; n++)
+    {
+      os << forest[t][n].left << " " << forest[t][n].right << " " << forest[t][n].decision << " " << forest[t][n].isleaf << " " << forest[t][n].depth << " " << forest[t][n].featcounter << " " << forest[t][n].nodeNumber << endl;
+      os << forest[t][n].dist << endl;
+
+      if (forest[t][n].feat == NULL)
+        os << -1 << endl;
+      else
+      {
+        os << forest[t][n].feat->getOps() << endl;
+        forest[t][n].feat->store (os);
+      }
+    }
+  }
+
+  os << channelType.size() << endl;
+  for (int i = 0; i < (int)channelType.size(); i++)
+  {
+    os << channelType[i] << " ";
+  }
+  os << endl;
+
+  os << integralMap.size() << endl;
+  for (int i = 0; i < (int)integralMap.size(); i++)
+  {
+    os << integralMap[i].first << " " << integralMap[i].second << endl;
+  }
+
+  os << rawChannels << endl;
+  
+  os << uniquenumber << endl;
+}
+
+void SemSegContextTree::restore (std::istream & is, int format)
+{
+  is >> nbTrees;
+
+  classnames.restore (is);
+
+  int lsize;
+  is >> lsize;
+
+  labelmap.clear();
+  for (int l = 0; l < lsize; l++)
+  {
+    int first, second;
+    is >> first;
+    is >> second;
+    labelmap[first] = second;
+  }
+
+  is >> lsize;
+  labelmapback.clear();
+  for (int l = 0; l < lsize; l++)
+  {
+    int first, second;
+    is >> first;
+    is >> second;
+    labelmapback[first] = second;
+  }
+
+  int trees;
+  is >> trees;
+  forest.clear();
+
+  
+  for (int t = 0; t < trees; t++)
+  {
+    vector<TreeNode> tmptree;
+    forest.push_back (tmptree);
+    int nodes;
+    is >> nodes;
+    
+    for (int n = 0; n < nodes; n++)
+    {
+      TreeNode tmpnode;
+      forest[t].push_back (tmpnode);
+      is >> forest[t][n].left;
+      is >> forest[t][n].right;
+      is >> forest[t][n].decision;
+      is >> forest[t][n].isleaf;
+      is >> forest[t][n].depth;
+      is >> forest[t][n].featcounter;
+      is >> forest[t][n].nodeNumber;
+
+      is >> forest[t][n].dist;
+      
+      int feattype;
+      is >> feattype;
+      assert (feattype < NBOPERATIONS);
+      forest[t][n].feat = NULL;
+
+      if (feattype >= 0)
+      {
+        for (uint o = 0; o < ops.size(); o++)
+        {
+          for (uint o2 = 0; o2 < ops[o].size(); o2++)
+          {
+            if (forest[t][n].feat == NULL)
+            {
+              for (uint c = 0; c < ops[o].size(); c++)
+              {
+                if (ops[o][o2]->getOps() == feattype)
+                {
+                  forest[t][n].feat = ops[o][o2]->clone();
+                  break;
+                }
+              }
+            }
+          }
+        }
+
+        assert (forest[t][n].feat != NULL);
+        forest[t][n].feat->restore (is);
+        
+      }
+    }
+  }
+
+  channelType.clear();
+  int ctsize;
+  is >> ctsize;
+  for (int i = 0; i < ctsize; i++)
+  {
+    int tmp;
+    is >> tmp;
+    channelType.push_back (tmp);
+  }
+
+  integralMap.clear();
+  int iMapSize;
+  is >> iMapSize;
+  for (int i = 0; i < iMapSize; i++)
+  {
+    int first;
+    int second;
+    is >> first;
+    is >> second;
+    integralMap.push_back (pair<int, int> (first, second));
+  }
+
+  is >> rawChannels;
+  
+  is >> uniquenumber;
+}
+
+

+ 248 - 0
semseg/SemSegContextTree.h

@@ -0,0 +1,248 @@
+/**
+* @file SemSegContextTree.h
+* @brief Context Trees -> Combination of decision tree and context information
+* @author Björn Fröhlich
+* @date 29.11.2011
+
+*/
+#ifndef SemSegContextTreeINCLUDE
+#define SemSegContextTreeINCLUDE
+
+// nice-core includes
+#include <core/vector/VVector.h>
+
+// nice-gphik-core includes
+#include <gp-hik-exp/GPHIKClassifierNICE.h>
+
+// nice-vislearning includes
+#include <vislearning/features/localfeatures/LocalFeatureColorWeijer.h>
+
+// nice-segmentation includes
+#include <segmentation/RegionSegmentationMethod.h>
+
+
+// nice-semseg includes
+#include "semseg/semseg/operations/Operations.h"
+#include "SemanticSegmentation.h"
+
+
+
+namespace OBJREC {
+
+/** Localization system */
+
+class SemSegContextTree : public SemanticSegmentation
+{
+    /** Segmentation Method */
+    RegionSegmentationMethod *segmentation;
+
+    /** tree -> saved as vector of nodes */
+    std::vector<std::vector<TreeNode> > forest;
+
+    /** local features */
+    LocalFeatureColorWeijer *lfcw;
+
+    /** number of featuretype -> currently: local and context features = 2 */
+    int ftypes;
+
+    /** maximum samples for tree  */
+    int maxSamples;
+
+    /** size for neighbourhood */
+    int windowSize;
+    
+    /** how many feats should be considered for a split */
+    int featsPerSplit;
+
+    /** count samples per label */
+    std::map<int, int> labelcounter;
+
+    /** map of labels */
+    std::map<int, int> labelmap;
+
+    /** map of labels inverse*/
+    std::map<int, int> labelmapback;
+
+    /** scalefactor for balancing for each class */
+    std::vector<double> a;
+
+    /** counter for used operations */
+    std::vector<int> opOverview;
+
+    /** relative use of context vs raw features per tree level*/
+    std::vector<std::vector<double> > contextOverview;
+
+    /** the minimum number of features allowed in a leaf */
+    int minFeats;
+
+    /** maximal depth of tree */
+    int maxDepth;
+
+    /** current depth for training */
+    int depth;
+
+    /** how many splittests */
+    int randomTests;
+
+    /** operations for pairwise features */
+    std::vector<std::vector<Operation*> > ops;
+
+    std::vector<ValueAccess*> calcVal;
+
+    /** use alternative calculation for information gain */
+    bool useShannonEntropy;
+
+    /** Classnames */
+    ClassNames classnames;
+
+    /** train selection */
+    std::set<int> forbidden_classes;
+
+    /** Configfile */
+    const NICE::Config *conf;
+
+    /** use pixelwise labeling or regionlabeling with additional segmenation */
+    bool pixelWiseLabeling;
+
+    /** Number of trees used for the forest */
+    int nbTrees;
+    
+    /** use Gradient image or not */
+    bool useGradient;
+    
+    /** use Color features from van de Weijer or not */
+    bool useWeijer;
+    
+    /** use Regions as extra feature channel or not */
+    bool useRegionFeature;
+    
+    /** use external image categorization to avoid some classes */
+    bool useCategorization;
+    
+    /** categorization information for external categorization */
+    std::string cndir;
+
+    /** how to handle each channel
+     * 0: simple grayvalue features
+     * 1: which pixel belongs to which region
+     * 2: graycolor integral images
+     * 3: context integral images
+     * 4: context features (not in MultiChannelImageT encoded)
+     */
+    std::vector<int> channelType;
+
+    /** list of channels per feature type */
+    std::vector<std::vector<int> > channelsPerType;
+    
+    /** whether we should use the geometric features of Hoeim (only offline computation with MATLAB supported) */
+    bool useHoiemFeatures;
+
+    /** directory of the geometric features */
+    std::string hoiemDirectory;
+    
+    /** first iteration or not */
+    bool firstiteration;
+    
+    /** which IntegralImage channel belongs to which raw value channel */
+    std::vector<std::pair<int, int> > integralMap;
+    
+    /** amount of grayvalue Channels */
+    int rawChannels;
+    
+    /** classifier for categorization */
+    OBJREC::GPHIKClassifierNICE *fasthik;
+    
+    /** unique numbers for nodes */
+    int uniquenumber;
+
+  public:
+    /** simple constructor */
+    SemSegContextTree ( const NICE::Config *conf, const MultiDataset *md );
+
+    /** simple destructor */
+    virtual ~SemSegContextTree();
+
+    /**
+     * test a single image
+     * @param ce input data
+     * @param segresult segmentation results
+     * @param probabilities probabilities for each pixel
+     */
+    void semanticseg ( CachedExample *ce,   NICE::Image & segresult,  NICE::MultiChannelImageT<double> & probabilities );
+
+    /**
+     * the main training method
+     * @param md training data
+     */
+    void train ( const MultiDataset *md );
+
+
+    /**
+     * @brief computes integral image of given feats
+     *
+     * @param currentfeats input features
+     * @param integralImage output image (must be initilized)
+     * @return void
+     **/
+    void computeIntegralImage ( const NICE::MultiChannelImageT<unsigned short int> &currentfeats, NICE::MultiChannelImageT<double> &lfeats,int firstChannel );
+
+    /**
+     * @brief reads image and does some simple convertions
+     *
+     * @param feats output image
+     * @param currentFile image filename
+     * @return void
+     **/
+    void extractBasicFeatures ( NICE::MultiChannelImageT<double> &feats, const NICE::ColorImage &img, const std::string &currentFile, int &amountRegions);
+
+    /**
+     * compute best split for current settings
+     * @param feats features
+     * @param currentfeats matrix with current node for each feature
+     * @param labels labels for each feature
+     * @param node current node
+     * @param splitfeat output feature position
+     * @param splitval
+     * @return best information gain
+     */
+    double getBestSplit ( std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<unsigned short int> > &currentfeats, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree, std::vector<std::vector<std::vector<double> > > &regionProbs );
+
+    /**
+     * @brief computes the mean probability for a given class over all trees
+     * @param x x position
+     * @param y y position
+     * @param channel current class
+     * @param currentfeats information about the nodes
+     * @return double mean value
+     **/
+    inline double getMeanProb ( const int &x, const int &y, const int &channel, const NICE::MultiChannelImageT<unsigned short int> &currentfeats );
+
+    /**
+     * @brief load all data to is stream
+     *
+     * @param is input stream
+     * @param format has no influence
+     * @return void
+     **/
+    virtual void restore ( std::istream & is, int format = 0 );
+
+    /**
+     * @brief save all data to is stream
+     *
+     * @param os output stream
+     * @param format has no influence
+     * @return void
+     **/
+    virtual void store ( std::ostream & os, int format = 0 ) const;
+
+    /**
+     * @brief clean up
+     *
+     * @return void
+     **/
+    virtual void clear () {}
+};
+
+} // namespace
+
+#endif

+ 2368 - 0
semseg/SemSegCsurka.cpp

@@ -0,0 +1,2368 @@
+#include <sstream>
+#include <iostream>
+
+#include "SemSegCsurka.h"
+#include "vislearning/baselib/ICETools.h"
+#include "core/image/Filter.h"
+#include "semseg/semseg/postsegmentation/PSSImageLevelPrior.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+#undef DEBUG_CSURK
+
+#undef UNCERTAINTY
+// #define UNCERTAINTY
+
+SemSegCsurka::SemSegCsurka ( const Config *conf,
+                             const MultiDataset *md )
+    : SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
+{
+  this->conf = conf;
+
+  opSiftImpl = conf->gS ( "Descriptor", "implementation", "VANDESANDE" );
+  readfeat = conf->gB ( "Descriptor", "read", true );
+  writefeat = conf->gB ( "Descriptor", "write", true );
+#ifdef DEBUG_CSURK
+  clog << "[log] SemSegCsurka::SemSegCsurka: OppenentSift implemenation: " << opSiftImpl << endl;
+#endif
+
+  save_cache = conf->gB ( "FPCPixel", "save_cache", true );
+  read_cache = conf->gB ( "FPCPixel", "read_cache", false );
+  cache = conf->gS ( "cache", "root", "" );
+  sigmaweight = conf->gD ( "SemSegCsurka", "sigmaweight", 0.6 );
+
+  dim = conf->gI ( "SemSegCsurka", "pcadim", 50 );
+
+  usepca = conf->gB ( "SemSegCsurka", "usepca", true );
+  calcpca = conf->gB ( "SemSegCsurka", "calcpca", false );
+
+  usegmm = conf->gB ( "SemSegCsurka", "usegmm", false );
+  norm = conf->gB ( "SemSegCsurka", "normalize", false );
+  usefisher = conf->gB ( "SemSegCsurka", "usefisher", false );
+  dogmm = conf->gB ( "SemSegCsurka", "dogmm", false );
+  gaussians = conf->gI ( "SemSegCsurka", "gaussians", 50 );
+
+  usekmeans = conf->gB ( "SemSegCsurka", "usekmeans", false );
+  kmeansfeat = conf->gI ( "SemSegCsurka", "kmeansfeat", 50 );
+  kmeanshard = conf->gB ( "SemSegCsurka", "kmeanshard", false );
+
+  cname = conf->gS ( "SemSegCsurka", "classifier", "RandomForests" );
+  anteil = conf->gD ( "SemSegCsurka", "anteil", 1.0 );
+  userellocprior = conf->gB ( "SemSegCsurka", "rellocfeat", false );
+  bool usesrg = conf->gB ( "SemSegCsurka", "usesrg", false );
+
+  useregions = conf->gB ( "SemSegCsurka", "useregions", true );
+  savesteps = conf->gB ( "SemSegCsurka", "savesteps", true );
+  bool usegcopt = conf->gB ( "SemSegCsurka", "usegcopt", false );
+
+  bestclasses = conf->gI ( "SemSegCsurka", "bestclasses", 0 );
+
+  smoothhl = conf->gB ( "SemSegCsurka", "smoothhl", false );
+  smoothfactor = conf->gD ( "SemSegCsurka", "smoothfactor", 1.0 );
+
+  usecolorfeats = conf->gB ( "SemSegCsurka", "usecolorfeats", false );
+
+  string rsMethod = conf->gS ( "SemSegCsurka", "segmentation", "meanshift" );
+
+  g = NULL;
+  k = NULL;
+  relloc = NULL;
+  srg = NULL;
+  gcopt = NULL;
+
+  if ( !useregions && ( userellocprior || usesrg ) )
+  {
+    cerr << "relative location priors and super region growing are just supported in combination with useregions" << endl;
+    exit ( 1 );
+  }
+
+  if ( usepca )
+    pca = PCA ( dim );
+
+  RegionSegmentationMethod * tmpseg;
+  if ( rsMethod == "meanshift" )
+    tmpseg = new RSMeanShift ( conf );
+  else
+    tmpseg = new RSGraphBased ( conf );
+
+  if ( save_cache )
+    seg = new RSCache ( conf, tmpseg );
+  else
+    seg = tmpseg;
+
+  if ( userellocprior )
+    relloc = new RelativeLocationPrior ( conf );
+  else
+    relloc = NULL;
+
+#ifdef NICE_USELIB_ICE
+  if ( usesrg )
+    srg = new PPSuperregion ( conf );
+  else
+    srg = NULL;
+#else
+  srg = NULL;
+#endif
+
+  if ( usegcopt )
+    gcopt = new PPGraphCut ( conf );
+  else
+    gcopt = NULL;
+
+  classifier = NULL;
+  vclassifier = NULL;
+  if ( cname == "RandomForests" )
+    classifier = new FPCRandomForests ( conf, "ClassifierForest" );
+  else if ( cname == "SMLR" )
+    classifier = new FPCSMLR ( conf, "ClassifierSMLR" );
+  else if ( cname == "GPHIK" )
+    classifier = new GPHIKClassifierNICE ( conf, "ClassiferGPHIK" );
+  else
+    vclassifier = GenericClassifierSelection::selectVecClassifier ( conf, "main" );
+  //classifier = new FPCSparseMultinomialLogisticRegression(conf, "ClassifierSMLR");
+
+  if ( classifier != NULL )
+    classifier->setMaxClassNo ( classNames->getMaxClassno() );
+  else
+    vclassifier->setMaxClassNo ( classNames->getMaxClassno() );
+
+  cn = md->getClassNames ( "train" );
+
+  if ( read_cache )
+  {
+    fprintf ( stderr, "SemSegCsurka:: Reading classifier data from %s\n", ( cache + "/fpcrf.data" ).c_str() );
+
+    if ( classifier != NULL )
+      classifier->read ( cache + "/fpcrf.data" );
+    else
+      vclassifier->read ( cache + "/veccl.data" );
+
+    if ( usepca )
+    {
+      std::string filename = cache + "/pca";
+      pca.read ( filename );
+    }
+
+    if ( usegmm )
+    {
+      g = new GMM ( conf, gaussians );
+
+      if ( !g->loadData ( cache + "/gmm" ) )
+      {
+        cerr << "SemSegCsurka:: no gmm file found" << endl;
+        exit ( -1 );
+      }
+    }
+    else {
+      g = NULL;
+    }
+
+    if ( usekmeans )
+    {
+      k = new KMeansOnline ( gaussians );
+    }
+
+    fprintf ( stderr, "SemSegCsurka:: successfully read\n" );
+
+    std::string filename = cache + "/rlp";
+
+    FILE *value;
+    value = fopen ( filename.c_str(), "r" );
+
+    if ( value == NULL )
+    {
+      trainpostprocess ( md );
+    }
+    else
+    {
+      if ( userellocprior )
+      {
+        relloc->read ( filename );
+      }
+    }
+
+    filename = cache + "/srg";
+
+    value = fopen ( filename.c_str(), "r" );
+
+    if ( value == NULL )
+    {
+      trainpostprocess ( md );
+    }
+    else
+    {
+      if ( srg != NULL )
+      {
+        srg->read ( filename );
+      }
+    }
+  }
+  else
+  {
+    train ( md );
+  }
+}
+
+SemSegCsurka::~SemSegCsurka()
+{
+  // clean-up
+  if ( classifier != NULL )
+    delete classifier;
+  if ( vclassifier != NULL )
+    delete vclassifier;
+  if ( seg != NULL )
+    delete seg;
+
+  g = NULL;
+  if ( g != NULL )
+    delete g;
+}
+
+void SemSegCsurka::normalize ( Examples &ex )
+{
+  assert ( ex.size() > 0 );
+  if ( vecmin.size() == 0 )
+  {
+    for ( int j = 0; j < ( int ) ex[0].second.vec->size(); j++ )
+    {
+      double maxv = -numeric_limits<int>::max();
+      double minv = numeric_limits<int>::max();
+      for ( int i = 0; i < ( int ) ex.size(); i++ )
+      {
+        maxv = std::max ( maxv, ( *ex[i].second.vec ) [j] );
+        minv = std::min ( minv, ( *ex[i].second.vec ) [j] );
+      }
+      vecmin.push_back ( minv );
+      vecmax.push_back ( maxv );
+    }
+  }
+  for ( int i = 0; i < ( int ) ex.size(); i++ )
+  {
+    for ( int j = 0; j < ( int ) ex[i].second.vec->size(); j++ )
+    {
+      ( *ex[i].second.vec ) [j] = ( ( *ex[i].second.vec ) [j] - vecmin[j] ) / ( vecmax[j] - vecmin[j] );
+    }
+  }
+  return;
+}
+
+void SemSegCsurka::convertLowToHigh ( Examples &ex, double reduce )
+{
+  cout << "converting low-level features to high-level features" << endl;
+
+  if ( reduce >= 1.0 )
+  {
+    for ( int i = 0; i < ( int ) ex.size(); i++ )
+    {
+      SparseVector *f = new SparseVector();
+
+      if ( usekmeans )
+      {
+        k->getDist ( *ex[i].second.vec, *f, kmeansfeat, kmeanshard );
+      }
+      else
+      {
+        if ( usefisher )
+          g->getFisher ( *ex[i].second.vec, *f );
+        else
+          g->getProbs ( *ex[i].second.vec, *f );
+      }
+      delete ex[i].second.vec;
+
+      ex[i].second.vec = NULL;
+      ex[i].second.svec = f;
+    }
+  }
+  else
+  {
+    srand ( time ( NULL ) );
+
+    vector<bool> del ( ex.size(), false );
+    cout << "Example size old " << ex.size() << endl;
+
+//#pragma omp parallel for
+    for ( int i = 0; i < ( int ) ex.size(); i++ )
+    {
+      double rval = ( double ) rand() / ( double ) RAND_MAX;
+      if ( rval < reduce )
+      {
+        SparseVector *f = new SparseVector();
+
+        if ( usekmeans )
+          k->getDist ( *ex[i].second.vec, *f, kmeansfeat, kmeanshard );
+        else
+        {
+          if ( usefisher )
+            g->getFisher ( *ex[i].second.vec, *f );
+          else
+            g->getProbs ( *ex[i].second.vec, *f );
+        }
+
+        delete ex[i].second.vec;
+        ex[i].second.vec = NULL;
+        ex[i].second.svec = f;
+      }
+      else
+      {
+        del[i] = true;
+      }
+    }
+    for ( int i = ( int ) del.size() - 1; i >= 0; i-- )
+    {
+      if ( del[i] )
+      {
+        ex.erase ( ex.begin() + i );
+      }
+    }
+    cerr << "Example size new " << ex.size() << endl;
+  }
+  cerr << "converting low-level features to high-level features finished" << endl;
+}
+
+void SemSegCsurka::smoothHL ( Examples ex )
+{
+
+  if ( !smoothhl )
+    return;
+  assert ( ex.size() > 1 );
+
+  long long int minx = numeric_limits<long long int>::max();
+  long long int miny = numeric_limits<long long int>::max();
+  long long int maxx = -numeric_limits<long long int>::max();
+  long long int maxy = -numeric_limits<long long int>::max();
+  long long int distx = numeric_limits<long long int>::max();
+  long long int disty = numeric_limits<long long int>::max();
+
+  set<double> scales;
+  for ( int i = 0; i < ( int ) ex.size(); i++ )
+  {
+    scales.insert ( ex[i].second.scale );
+  }
+
+  map<double, int> scalepos;
+  int it = 0;
+
+  for ( set<double>::const_iterator iter = scales.begin(); iter != scales.end();    ++iter, ++it )
+  {
+    scalepos.insert ( make_pair ( *iter, it ) );
+  }
+
+  for ( int i = 0; i < ( int ) ex.size(); i++ )
+  {
+    if ( minx < numeric_limits<int>::max() && ex[i].second.x - minx > 0 )
+      distx = std::min ( distx, ex[i].second.x - minx );
+    if ( miny < numeric_limits<int>::max() && ex[i].second.y - miny > 0 )
+      disty = std::min ( disty, ex[i].second.y - miny );
+    minx = std::min ( ( long long int ) ex[i].second.x, minx );
+    maxx = std::max ( ( long long int ) ex[i].second.x, maxx );
+    miny = std::min ( ( long long int ) ex[i].second.y, miny );
+    maxy = std::max ( ( long long int ) ex[i].second.y, maxy );
+  }
+
+  distx = abs ( distx );
+
+  int xsize = ( maxx - minx ) / distx + 1;
+  int ysize = ( maxy - miny ) / disty + 1;
+  double valx = ( ( double ) xsize - 1 ) / ( double ) ( maxx - minx );
+  double valy = ( ( double ) ysize - 1 ) / ( double ) ( maxy - miny );
+
+  //double sigma = smoothfactor;
+  double sigma = std::max ( xsize, ysize ) * smoothfactor;
+  //double sigma = 0.2;
+  cout << "sigma1: " << sigma << endl;
+
+  vector<NICE::FloatImage> imgv;
+  vector<NICE::FloatImage> gaussImgv;
+  for ( int i = 0; i < ( int ) scalepos.size(); i++ )
+  {
+    NICE::FloatImage img ( xsize, ysize );
+    NICE::FloatImage gaussImg ( xsize, ysize );
+    imgv.push_back ( img );
+    gaussImgv.push_back ( gaussImg );
+  }
+
+  for ( int d = 0; d < ex[0].second.svec->getDim(); d++ )
+  {
+    //TODO: max und min dynamisches bestimmen
+
+    for ( int i = 0; i < ( int ) scalepos.size(); i++ )
+    {
+      imgv[i].set ( 0.0 );
+      gaussImgv[i].set ( 0.0 );
+    }
+
+    for ( int i = 0; i < ( int ) ex.size(); i++ )
+    {
+      int xpos = ( ex[i].second.x - minx ) * valx;
+      int ypos = ( ex[i].second.y - miny ) * valy;
+
+      double val = ex[i].second.svec->get ( d );
+      imgv[scalepos[ex[i].second.scale]].setPixel ( xpos, ypos, val );
+    }
+
+    /*
+    for(int y = 0; y < ysize; y++)
+    {
+     for(int x = 0; x < xsize; x++)
+     {
+      // refactor-nice.pl: check this substitution
+      // old: double val = GetValD(img,x,y);
+      double val = img.getPixel(x,y);
+      double  c = 0.0;
+      if(val == 0.0)
+      {
+       if(x > 0)
+       {
+        // refactor-nice.pl: check this substitution
+        // old: val+=GetValD(img,x-1,y);
+        val+=img.getPixel(x-1,y);
+        c+=1.0;
+       }
+       if(y > 0)
+       {
+        // refactor-nice.pl: check this substitution
+        // old: val+=GetValD(img,x,y-1);
+        val+=img.getPixel(x,y-1);
+        c+=1.0;
+       }
+       if(x < xsize-1)
+       {
+        // refactor-nice.pl: check this substitution
+        // old: val+=GetValD(img,x+1,y);
+        val+=img.getPixel(x+1,y);
+        c+=1.0;
+       }
+       if(y < ysize-1)
+       {
+        // refactor-nice.pl: check this substitution
+        // old: val+=GetValD(img,x,y+1);
+        val+=img.getPixel(x,y+1);
+        c+=1.0;
+       }
+       // refactor-nice.pl: check this substitution
+       // old: PutValD(img,x,y,val/c);
+       img.setPixel(x,y,val/c);
+      }
+     }
+    }*/
+
+    for ( int i = 0; i < ( int ) imgv.size(); i++ )
+      filterGaussSigmaApproximate<float, float, float> ( imgv[i], sigma, &gaussImgv[i] );
+
+    for ( int i = 0; i < ( int ) ex.size(); i++ )
+    {
+      int xpos = ( ex[i].second.x - minx ) * valx;
+      int ypos = ( ex[i].second.y - miny ) * valy;
+      // refactor-nice.pl: check this substitution
+      // old: double val = GetValD ( gaussImgv[scalepos[ex[i].second.scale]], xpos, ypos );
+      double val = gaussImgv[scalepos[ex[i].second.scale]].getPixel ( xpos, ypos );
+
+      if ( fabs ( val ) < 1e-7 )
+      {
+        if ( ex[i].second.svec->get ( d ) != 0.0 )
+        {
+          ex[i].second.svec->erase ( d );
+        }
+      }
+      else
+      {
+        ( *ex[i].second.svec ) [d] = val;
+      }
+    }
+  }
+}
+
+void SemSegCsurka::initializePCA ( Examples &ex )
+{
+#ifdef DEBUG
+  cerr << "start computing pca" << endl;
+#endif
+  std::string filename = cache + "/pca";
+  FILE *value;
+  value = fopen ( filename.c_str(), "r" );
+
+  if ( value == NULL || calcpca )
+  {
+    srand ( time ( NULL ) );
+
+    int featsize = ( int ) ex.size();
+    int maxfeatures = dim * 10;
+    int olddim = ex[0].second.vec->size();
+
+    maxfeatures = std::min ( maxfeatures, featsize );
+
+    NICE::Matrix features ( maxfeatures, olddim );
+
+    for ( int i = 0; i < maxfeatures; i++ )
+    {
+      int k = rand() % featsize;
+
+      int vsize = ( int ) ex[k].second.vec->size();
+      for ( int j = 0; j < vsize; j++ )
+      {
+        features ( i, j ) = ( * ( ex[k].second.vec ) ) [j];
+      }
+    }
+    pca.calculateBasis ( features, dim );
+
+    if ( save_cache )
+      pca.save ( filename );
+
+  }
+  else
+  {
+    cout << "readpca: " << filename << endl;
+    pca.read ( filename );
+    cout << "end" << endl;
+  }
+#ifdef DEBUG
+  cerr << "finished computing pca" << endl;
+#endif
+}
+
+void SemSegCsurka::doPCA ( Examples &ex )
+{
+  cout << "converting features using pca starts" << endl;
+
+  std::string savedir = cname = conf->gS ( "cache", "root", "/dev/null/" );
+  std::string shortf = ex.filename;
+  if ( string::npos != ex.filename.rfind ( "/" ) )
+    shortf = ex.filename.substr ( ex.filename.rfind ( "/" ) );
+  std::string filename = savedir + "/pcasave/" + shortf;
+  std::string syscall = "mkdir " + savedir + "/pcasave";
+  system ( syscall.c_str() );
+  cout << "filename: " << filename << endl;
+
+  if ( !FileMgt::fileExists ( filename ) || calcpca )
+  {
+    ofstream ofStream;
+
+    //Opens the file binary
+    ofStream.open ( filename.c_str(), fstream::out | fstream::binary );
+
+    for ( int k = 0; k < ( int ) ex.size(); k++ )
+    {
+      NICE::Vector tmp = pca.getFeatureVector ( * ( ex[k].second.vec ), true );
+      delete ex[k].second.vec;
+      for ( int d = 0; d < ( int ) tmp.size(); d++ )
+        ofStream.write ( ( char* ) &tmp[d], sizeof ( double ) );
+      ex[k].second.vec = new NICE::Vector ( tmp );
+    }
+    ofStream.close();
+    cout << endl;
+  }
+  else
+  {
+    ifstream ifStream;
+    ifStream.open ( filename.c_str(), std::fstream::in | std::fstream::binary );
+    for ( int k = 0; k < ( int ) ex.size(); k++ )
+    {
+      NICE::Vector tmp = NICE::Vector ( dim );
+      delete ex[k].second.vec;
+      for ( int d = 0; d < dim; d++ )
+        ifStream.read ( ( char* ) &tmp[d], sizeof ( double ) );
+      ex[k].second.vec = new NICE::Vector ( tmp );
+    }
+
+    ifStream.close();
+  }
+  cout << "converting features using pca finished" << endl;
+}
+
+void SemSegCsurka::train ( const MultiDataset *md )
+{
+
+  /*die einzelnen Trainingsschritte
+  1. auf allen Trainingsbilder SIFT Merkmale an den Gitterpunkten bei allen Auflösungen bestimmen
+  2. PCA anwenden
+  3. aus diesen ein GMM erstellen
+  4. für jedes SIFT-Merkmal einen Vektor erstellen, der an der Stelle i die Wahrscheinlichkeit enthällt zur Verteilung i des GMM, Zur Zeit mit BoV-Alternative durch Moosman06 erledigt
+  5. diese Vektoren in einem diskriminitativen Klassifikator ( z.B. SLR oder Randomized Forests) zusammen mit ihrer Klassenzugehörigkeit anlernen
+  */
+#ifdef DEBUG
+  cerr << "SemSegCsurka:: training starts" << endl;
+#endif
+
+  Examples examples;
+  examples.filename = "training";
+
+
+  // Welche Opponentsift Implementierung soll genutzt werden ?
+  LocalFeatureRepresentation *cSIFT = NULL;
+  LocalFeatureRepresentation *writeFeats = NULL;
+  LocalFeatureRepresentation *readFeats = NULL;
+  LocalFeatureRepresentation *getFeats = NULL;
+
+  if ( opSiftImpl == "NICE" )
+  {
+    cSIFT = new LFonHSG ( conf, "HSGtrain" );
+  }
+  else if ( opSiftImpl == "VANDESANDE" )
+  {
+    // the used features
+    cSIFT = new LFColorSande ( conf, "LFColorSandeTrain" );
+  }
+  else
+  {
+    fthrow ( Exception, "feattype: %s not yet supported" << opSiftImpl );
+  }
+
+  getFeats = cSIFT;
+
+  if ( writefeat )
+  {
+    // write the features to a file, if there isn't any to read
+    writeFeats = new LFWriteCache ( conf, cSIFT );
+    getFeats = writeFeats;
+  }
+
+  if ( readfeat )
+  {
+    // read the features from a file
+    if ( writefeat )
+    {
+      readFeats = new LFReadCache ( conf, writeFeats, -1 );
+    }
+    else
+    {
+      readFeats = new LFReadCache ( conf, cSIFT, -1 );
+    }
+    getFeats = readFeats;
+  }
+
+  // additional Colorfeatures
+  LocalFeatureColorWeijer lcw ( conf );
+
+  int lfdimension = -1;
+
+  const LabeledSet train = * ( *md ) ["train"];
+  const LabeledSet *trainp = &train;
+
+  ////////////////////////
+  // Merkmale berechnen //
+  ////////////////////////
+
+  std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
+  if ( forbidden_classes_s == "" )
+  {
+    forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
+  }
+  cn.getSelection ( forbidden_classes_s, forbidden_classes );
+  cerr << "forbidden: " << forbidden_classes_s << endl;
+
+  ProgressBar pb ( "Local Feature Extraction" );
+  pb.show();
+
+  int imgnb = 0;
+
+  LOOP_ALL_S ( *trainp )
+  {
+    //EACH_S(classno, currentFile);
+    EACH_INFO ( classno, info );
+
+    pb.update ( trainp->count() );
+
+    NICE::ColorImage img;
+
+    std::string currentFile = info.img();
+    
+    CachedExample *ce = new CachedExample ( currentFile );
+
+    const LocalizationResult *locResult = info.localization();
+    if ( locResult->size() <= 0 )
+    {
+      fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+                currentFile.c_str() );
+      continue;
+    }
+
+    fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+              currentFile.c_str() );
+
+    int xsize, ysize;
+    ce->getImageSize ( xsize, ysize );
+
+    NICE::Image pixelLabels ( xsize, ysize );
+    pixelLabels.set ( 0 );
+    locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+    try {
+      img = ColorImage ( currentFile );
+    } catch ( Exception ) {
+      cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+      continue;
+    }
+
+    Globals::setCurrentImgFN ( currentFile );
+
+    VVector features;
+    VVector cfeatures;
+    VVector positions;
+
+    NICE::ColorImage cimg ( currentFile );
+
+    getFeats->extractFeatures ( img, features, positions );
+
+#ifdef DEBUG_CSURK
+    cout << "[log] SemSegCsruka::train -> " << currentFile << " an " << positions.size() << " Positionen wurden Features (Anz = " << features.size() << ") " << endl;
+    cout << "mit einer Dimension von " << features[ 0].size() << " extrahiert." << endl;
+#endif
+
+    if ( usecolorfeats )
+      lcw.getDescriptors ( cimg, cfeatures, positions );
+
+    int j = 0;
+
+    for ( VVector::const_iterator i = features.begin();
+          i != features.end();
+          i++, j++ )
+    {
+      const NICE::Vector & x = *i;
+      classno = pixelLabels.getPixel ( ( int ) positions[j][0], ( int ) positions[j][1] );
+
+      if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+        continue;
+
+      if ( lfdimension < 0 )
+        lfdimension = ( int ) x.size();
+      else
+        assert ( lfdimension == ( int ) x.size() );
+
+      NICE::Vector *v = new NICE::Vector ( x );
+
+      if ( usecolorfeats && !usepca )
+        v->append ( cfeatures[j] );
+
+      Example example ( v );
+      example.position = imgnb;
+      examples.push_back (
+        pair<int, Example> ( classno, example ) );
+    }
+    features.clear();
+    positions.clear();
+    delete ce;
+    imgnb++;
+  }
+
+  pb.hide();
+
+  //////////////////
+  // PCA anwenden //
+  //////////////////
+
+  if ( usepca )
+  {
+    if ( !read_cache )
+    {
+      initializePCA ( examples );
+    }
+    doPCA ( examples );
+    lfdimension = dim;
+  }
+
+  /////////////////////////////////////////////////////
+  // Low-Level Features in High-Level transformieren //
+  /////////////////////////////////////////////////////
+
+  int hlfdimension = lfdimension;
+
+  if ( norm )
+    normalize ( examples );
+
+  if ( usegmm )
+  {
+    if ( !usepca && !norm )
+      normalize ( examples );
+    g = new GMM ( conf, gaussians );
+
+    if ( dogmm || !g->loadData ( cache + "/gmm" ) )
+    {
+      g->computeMixture ( examples );
+      if ( save_cache )
+        g->saveData ( cache + "/gmm" );
+    }
+
+    hlfdimension = gaussians;
+
+    if ( usefisher )
+      hlfdimension = gaussians * 2 * dim;
+  }
+
+  if ( usekmeans )
+  {
+    if ( !usepca || norm )
+      normalize ( examples );
+    k = new KMeansOnline ( gaussians );
+
+    k->cluster ( examples );
+
+    hlfdimension = gaussians;
+  }
+
+  if ( usekmeans || usegmm )
+  {
+    examples.clear();
+    pb.reset ( "Local Feature Extraction" );
+    lfdimension = -1;
+    pb.update ( trainp->count() );
+    LOOP_ALL_S ( *trainp )
+    {
+      EACH_INFO ( classno, info );
+
+      pb.update ( trainp->count() );
+
+      NICE::ColorImage img;
+
+      std::string currentFile = info.img();
+
+      CachedExample *ce = new CachedExample ( currentFile );
+
+      const LocalizationResult *locResult = info.localization();
+      if ( locResult->size() <= 0 )
+      {
+        fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+                  currentFile.c_str() );
+        continue;
+      }
+
+      fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+                currentFile.c_str() );
+
+      int xsize, ysize;
+      ce->getImageSize ( xsize, ysize );
+
+      NICE::Image pixelLabels ( xsize, ysize );
+      pixelLabels.set ( 0 );
+      locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+      try {
+        img = ColorImage ( currentFile );
+      }
+      catch ( Exception ) {
+        cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+        continue;
+      }
+
+      Globals::setCurrentImgFN ( currentFile );
+
+      VVector features;
+      VVector cfeatures;
+      VVector positions;
+
+      NICE::ColorImage cimg ( currentFile );
+
+      getFeats->extractFeatures ( img, features, positions );
+
+      if ( usecolorfeats )
+        lcw.getDescriptors ( cimg, cfeatures, positions );
+
+      int j = 0;
+
+      Examples tmpex;
+
+      for ( VVector::const_iterator i = features.begin();
+            i != features.end();
+            i++, j++ )
+      {
+
+        const NICE::Vector & x = *i;
+
+        classno = pixelLabels.getPixel ( ( int ) positions[j][0], ( int ) positions[j][1] );
+
+        if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+          continue;
+
+        if ( lfdimension < 0 )
+          lfdimension = ( int ) x.size();
+        else
+          assert ( lfdimension == ( int ) x.size() );
+
+        NICE::Vector *v = new NICE::Vector ( x );
+        if ( usecolorfeats )
+          v->append ( cfeatures[j] );
+
+        Example example ( v );
+        example.position = imgnb;
+        example.x = ( int ) positions[j][0];
+        example.y = ( int ) positions[j][1];
+        example.scale = positions[j][2];
+
+        tmpex.push_back ( pair<int, Example> ( classno, example ) );
+      }
+      tmpex.filename = currentFile;
+      if ( usepca )
+      {
+        doPCA ( tmpex );
+      }
+
+      convertLowToHigh ( tmpex, anteil );
+
+      smoothHL ( tmpex );
+
+      for ( int i = 0; i < ( int ) tmpex.size(); i++ )
+      {
+        examples.push_back ( pair<int, Example> ( tmpex[i].first, tmpex[i].second ) );
+      }
+
+      tmpex.clear();
+
+      features.clear();
+      positions.clear();
+      delete ce;
+      imgnb++;
+
+    }
+
+    pb.hide();
+  }
+  ////////////////////////////
+  // Klassifikator anlernen //
+  ////////////////////////////
+  FeaturePool fp;
+
+  Feature *f;
+
+  if ( usegmm || usekmeans )
+    f = new SparseVectorFeature ( hlfdimension );
+  else
+    f = new VectorFeature ( hlfdimension );
+
+  f->explode ( fp );
+  delete f;
+
+  if ( usecolorfeats && ! ( usekmeans || usegmm ) )
+  {
+    int dimension = hlfdimension + 11;
+    for ( int i = hlfdimension ; i < dimension ; i++ )
+    {
+      VectorFeature *f = new VectorFeature ( dimension );
+      f->feature_index = i;
+      fp.addFeature ( f, 1.0 / dimension );
+    }
+  }
+  /*
+  cout << "train classifier" << endl;
+  fp.store(cout);
+  getchar();
+  for(int z = 0; z < examples.size(); z++)
+  {
+  cout << "examples.size() " << examples.size() << endl;
+  cout << "class: " << examples[z].first << endl;
+   cout << *examples[z].second.vec << endl;
+   getchar();
+  }*/
+
+  if ( classifier != NULL )
+    classifier->train ( fp, examples );
+  else
+  {
+    LabeledSetVector lvec;
+    convertExamplesToLSet ( examples, lvec );
+    vclassifier->teach ( lvec );
+    if ( usegmm )
+      convertLSetToSparseExamples ( examples, lvec );
+    else
+      convertLSetToExamples ( examples, lvec );
+    vclassifier->finishTeaching();
+  }
+
+  fp.destroy();
+
+  if ( save_cache )
+  {
+    if ( classifier != NULL )
+      classifier->save ( cache + "/fpcrf.data" );
+    else
+      vclassifier->save ( cache + "/veccl.data" );
+  }
+
+  ////////////
+  //clean up//
+  ////////////
+  for ( int i = 0; i < ( int ) examples.size(); i++ )
+  {
+    examples[i].second.clean();
+  }
+  examples.clear();
+
+  if ( cSIFT != NULL )
+    delete cSIFT;
+  if ( writeFeats != NULL )
+    delete writeFeats;
+  if ( readFeats != NULL )
+    delete readFeats;
+  getFeats = NULL;
+
+  trainpostprocess ( md );
+
+  cerr << "SemSeg training finished" << endl;
+}
+
+void SemSegCsurka::trainpostprocess ( const MultiDataset *md )
+{
+  cout << "start postprocess" << endl;
+  ////////////////////////////
+  // Postprocess trainieren //
+  ////////////////////////////
+  const LabeledSet train = * ( *md ) ["train"];
+  const LabeledSet *trainp = &train;
+
+  if ( userellocprior || srg != NULL || gcopt != NULL )
+  {
+    clog << "[log] SemSegCsurka::trainpostprocess: if ( userellocprior || srg != NULL || gcopt !=NULL )" << endl;
+    if ( userellocprior )
+      relloc->setClassNo ( cn.numClasses() );
+
+    if ( gcopt != NULL )
+    {
+      gcopt->setClassNo ( cn.numClasses() );
+    }
+
+    ProgressBar pb ( "learn relative location prior maps" );
+    pb.show();
+    LOOP_ALL_S ( *trainp ) // für alle Bilder den ersten Klassifikationsschritt durchführen um den zweiten Klassifikator anzutrainieren
+    {
+      EACH_INFO ( classno, info );
+
+      pb.update ( trainp->count() );
+
+      NICE::ColorImage img;
+
+      std::string currentFile = info.img();
+      Globals::setCurrentImgFN ( currentFile );
+      CachedExample *ce = new CachedExample ( currentFile );
+
+      const LocalizationResult *locResult = info.localization();
+      if ( locResult->size() <= 0 )
+      {
+        fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+                  currentFile.c_str() );
+        continue;
+      }
+
+      fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+                currentFile.c_str() );
+
+      int xsize, ysize;
+      ce->getImageSize ( xsize, ysize );
+
+      NICE::Image pixelLabels ( xsize, ysize );
+      pixelLabels.set ( 0 );
+      locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+      try {
+        img = ColorImage ( currentFile );
+      }
+      catch ( Exception )
+      {
+        cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+        continue;
+      }
+
+      //Regionen ermitteln
+      NICE::Matrix mask;
+
+      int regionsize = seg->segRegions ( img, mask );
+#ifdef DEBUG_CSURK
+      Image overlay ( img.width(), img.height() );
+
+      double maxval = -numeric_limits<double>::max();
+
+      for ( int y = 0; y < img.height(); y++ )
+      {
+        for ( int x = 0; x < img.width(); x++ )
+        {
+          int val = ( ( int ) mask ( x, y ) + 1 ) % 256;
+          overlay.setPixel ( x, y, val );
+          maxval = std::max ( mask ( x, y ), maxval );
+        }
+      }
+
+      cout << maxval << " different regions found" << endl;
+
+      NICE::showImageOverlay ( img, overlay, "Segmentation Result" );
+#endif
+
+      Examples regions;
+
+      vector<vector<int> > hists;
+
+      for ( int i = 0; i < regionsize; i++ )
+      {
+        Example tmp;
+        regions.push_back ( pair<int, Example> ( 0, tmp ) );
+        vector<int> hist ( cn.numClasses(), 0 );
+        hists.push_back ( hist );
+      }
+
+      for ( int x = 0; x < xsize; x++ )
+      {
+        for ( int y = 0; y < ysize; y++ )
+        {
+          int numb = mask ( x, y );
+          regions[numb].second.x += x;
+          regions[numb].second.y += y;
+          regions[numb].second.weight += 1.0;
+          hists[numb][pixelLabels.getPixel ( x,y ) ]++;
+        }
+      }
+
+      for ( int i = 0; i < regionsize; i++ )
+      {
+        regions[i].second.x /= ( int ) regions[i].second.weight;
+        regions[i].second.y /= ( int ) regions[i].second.weight;
+
+        int maxval = -numeric_limits<int>::max();
+        int maxpos = -1;
+        int secondpos = -1;
+        for ( int k = 0; k < ( int ) hists[i].size(); k++ )
+        {
+          if ( maxval < hists[i][k] )
+          {
+            maxval = hists[i][k];
+            secondpos = maxpos;
+            maxpos = k;
+          }
+        }
+
+        if ( cn.text ( maxpos ) == "various" )
+          regions[i].first = secondpos;
+        else
+          regions[i].first = maxpos;
+
+      }
+      if ( userellocprior )
+        relloc->trainPriorsMaps ( regions, xsize, ysize );
+
+      if ( srg != NULL )
+        srg->trainShape ( regions, mask );
+
+      if ( gcopt != NULL )
+        gcopt->trainImage ( regions, mask );
+
+      delete ce;
+
+    }
+    pb.hide();
+    if ( userellocprior )
+      relloc->finishPriorsMaps ( cn );
+
+    if ( srg != NULL )
+      srg->finishShape ( cn );
+
+    if ( gcopt != NULL )
+      gcopt->finishPP ( cn );
+  }
+  if ( userellocprior )
+  {
+    clog << "[log] SemSegCsurka::trainpostprocess: if ( userellocprior )" << endl;
+    ProgressBar pb ( "learn relative location classifier" );
+    pb.show();
+
+    int nummer = 0;
+    LOOP_ALL_S ( *trainp ) // für alle Bilder den ersten Klassifikationsschritt durchführen um den zweiten Klassifikator anzutrainieren
+    {
+      //EACH_S(classno, currentFile);
+      EACH_INFO ( classno, info );
+      nummer++;
+      pb.update ( trainp->count() );
+
+      NICE::Image img;
+      std::string currentFile = info.img();
+
+      CachedExample *ce = new CachedExample ( currentFile );
+
+      const LocalizationResult *locResult = info.localization();
+      if ( locResult->size() <= 0 )
+      {
+        fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+                  currentFile.c_str() );
+        continue;
+      }
+
+      fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n",
+                currentFile.c_str() );
+
+      int xsize, ysize;
+      ce->getImageSize ( xsize, ysize );
+
+      NICE::Image pixelLabels ( xsize, ysize );
+      pixelLabels.set ( 0 );
+      locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
+
+      try {
+        img = Preprocess::ReadImgAdv ( currentFile.c_str() );
+      }
+      catch ( Exception )
+      {
+        cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+        continue;
+      }
+      Globals::setCurrentImgFN ( currentFile );
+
+      NICE::Image segresult;
+
+      NICE::MultiChannelImageT<double> probabilities ( xsize, ysize, classno );
+
+      Examples regions;
+
+      NICE::Matrix mask;
+
+      if ( savesteps )
+      {
+        std::ostringstream s1;
+        s1 << cache << "/rlpsave/" << nummer;
+
+        std::string filename = s1.str();
+        s1 << ".probs";
+
+        std::string fn2 = s1.str();
+
+        FILE *file;
+        file = fopen ( filename.c_str(), "r" );
+
+        if ( file == NULL )
+        {
+          //berechnen
+          classifyregions ( ce, segresult, probabilities, regions, mask );
+          //schreiben
+          ofstream fout ( filename.c_str(), ios::app );
+          fout << regions.size() << endl;
+          for ( int i = 0; i < ( int ) regions.size(); i++ )
+          {
+            regions[i].second.store ( fout );
+            fout << regions[i].first << endl;
+          }
+          fout.close();
+          probabilities.store ( fn2 );
+        }
+        else
+        {
+          //lesen
+          ifstream fin ( filename.c_str() );
+          int size;
+          fin >> size;
+
+          for ( int i = 0; i < size; i++ )
+          {
+            Example ex;
+            ex.restore ( fin );
+            int tmp;
+            fin >> tmp;
+            regions.push_back ( pair<int, Example> ( tmp, ex ) );
+          }
+
+          fin.close();
+
+          probabilities.restore ( fn2 );
+        }
+      }
+      else
+      {
+        classifyregions ( ce, segresult, probabilities, regions, mask );
+      }
+
+      relloc->trainClassifier ( regions, probabilities );
+
+      delete ce;
+
+    }
+    relloc->finishClassifier();
+    pb.hide();
+
+    relloc->save ( cache + "/rlp" );
+  }
+  cout << "finished postprocess" << endl;
+}
+
+void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities, Examples &Regionen, NICE::Matrix & mask )
+{
+  /* die einzelnen Testschritte:
+  1.x  auf dem Testbild alle SIFT Merkmale an den Gitterpunkten bei allen Auflösungen bestimmen
+  2.x  für jedes SIFT-Merkmal einen Vektor erstellen, der an der Stelle i die Wahrscheinlichkeit enthällt zur Verteilung i des GMM
+  3.x diese Vektoren klassifizieren, so dass für jede Klasse die Wahrscheinlichkeit gespeichert wird
+  4.x für jeden Pixel die Wahrscheinlichkeiten mitteln aus allen Patches, in denen der Pixel vorkommt
+  5.x das Originalbild in homogene Bereiche segmentieren
+  6.x die homogenen Bereiche bekommen die gemittelten Wahrscheinlichkeiten ihrer Pixel
+  7. (einzelne Klassen mit einem globalen Klassifikator ausschließen)
+  8.x jeder Pixel bekommt die Klasse seiner Region zugeordnet
+  */
+
+  clog << "[log] SemSegCsruka::classifyregions" << endl;
+  int xsize, ysize;
+
+  ce->getImageSize ( xsize, ysize );
+
+  probabilities.reInit ( xsize, ysize, classNames->getMaxClassno() + 1 );
+  clog << "[log] SemSegCsruka::classifyregions: probabilities.channels() = " << probabilities.channels() << endl;
+
+  segresult.resize ( xsize, ysize );
+
+  Examples pce;
+
+  // Welche Opponentsift Implementierung soll genutzt werden ?
+  LocalFeatureRepresentation *cSIFT = NULL;
+  LocalFeatureRepresentation *writeFeats = NULL;
+  LocalFeatureRepresentation *readFeats = NULL;
+  LocalFeatureRepresentation *getFeats = NULL;
+
+
+  if ( opSiftImpl == "NICE" )
+  {
+    cSIFT = new LFonHSG ( conf, "HSGtest" );
+  }
+  else if ( opSiftImpl == "VANDESANDE" )
+  {
+    // the used features
+    cSIFT = new LFColorSande ( conf, "LFColorSandeTest" );
+  }
+  else
+  {
+    fthrow ( Exception, "feattype: %s not yet supported" << opSiftImpl );
+  }
+
+  getFeats = cSIFT;
+
+  if ( writefeat )
+  {
+    // write the features to a file, if there isn't any to read
+    writeFeats = new LFWriteCache ( conf, cSIFT );
+    getFeats = writeFeats;
+  }
+
+  if ( readfeat )
+  {
+    // read the features from a file
+    if ( writefeat )
+    {
+      readFeats = new LFReadCache ( conf, writeFeats, -1 );
+    }
+    else
+    {
+      readFeats = new LFReadCache ( conf, cSIFT, -1 );
+    }
+    getFeats = readFeats;
+  }
+
+
+  // additional Colorfeatures
+  LocalFeatureColorWeijer lcw ( conf );
+
+  NICE::ColorImage img;
+
+  std::string currentFile = Globals::getCurrentImgFN();
+
+  try
+  {
+    img = ColorImage ( currentFile );
+  }
+  catch ( Exception )
+  {
+    cerr << "SemSegCsurka: error opening image file <" << currentFile << ">" << endl;
+  }
+
+  VVector features;
+  VVector cfeatures;
+  VVector positions;
+
+  getFeats->extractFeatures ( img, features, positions );
+
+  if ( usecolorfeats )
+    lcw.getDescriptors ( img, cfeatures, positions );
+
+  set<double> scales;
+
+  int j = 0;
+  int lfdimension = -1;
+  for ( VVector::const_iterator i = features.begin();
+        i != features.end();
+        i++, j++ )
+  {
+    const NICE::Vector & x = *i;
+
+    if ( lfdimension < 0 ) lfdimension = ( int ) x.size();
+    else assert ( lfdimension == ( int ) x.size() );
+
+    NICE::Vector *v = new NICE::Vector ( x );
+
+    if ( usecolorfeats )
+      v->append ( cfeatures[j] );
+
+    Example tmp = Example ( v );
+    tmp.x = ( int ) positions[j][0];
+    tmp.y = ( int ) positions[j][1];
+    tmp.width = ( int ) ( 16.0 * positions[j][2] );
+    tmp.height = tmp.width;
+    tmp.scale = positions[j][2];
+    scales.insert ( positions[j][2] );
+    pce.push_back ( pair<int, Example> ( 0, tmp ) );
+  }
+
+  //////////////////
+  // PCA anwenden //
+  //////////////////
+  pce.filename = currentFile;
+  if ( usepca )
+  {
+    doPCA ( pce );
+    lfdimension = dim;
+  }
+
+  //////////////////
+  // BoV anwenden //
+  //////////////////
+  if ( norm )
+    normalize ( pce );
+  if ( usegmm || usekmeans )
+  {
+    if ( !usepca && !norm )
+      normalize ( pce );
+    convertLowToHigh ( pce );
+    smoothHL ( pce );
+    lfdimension = gaussians;
+  }
+
+  /////////////////////////////////////////
+  // Wahrscheinlichkeitskarten erstellen //
+  /////////////////////////////////////////
+  int klassen = probabilities.channels();
+  NICE::MultiChannelImageT<double> preMap ( xsize, ysize, klassen*scales.size() );
+
+  // initialisieren
+  for ( int y = 0 ; y < ysize ; y++ )
+    for ( int x = 0 ; x < xsize ; x++ )
+    {
+      // alles zum Hintergrund machen
+      segresult.setPixel ( x, y, 0 );
+      // Die Wahrscheinlichkeitsmaps auf 0 initialisieren
+      for ( int i = 0 ; i < ( int ) probabilities.channels(); i++ )
+      {
+        probabilities[i](x,y) = 0.0;
+      }
+      for ( int j = 0; j < ( int ) preMap.channels(); j++ )
+      {
+        preMap[j](x,y) = 0.0;
+      }
+    }
+
+  // Die Wahrscheinlichkeitsmaps mit den einzelnen Wahrscheinlichkeiten je Skalierung füllen
+  int scalesize = scales.size();
+
+  // Globale Häufigkeiten akkumulieren
+  FullVector fV ( ( int ) probabilities.channels() );
+
+  for ( int i = 0; i < fV.size(); i++ )
+    fV[i] = 0.0;
+
+  // read allowed classes
+
+  string cndir = conf->gS ( "SemSegCsurka", "cndir", "" );
+  int classes = ( int ) probabilities.channels();
+  vector<int> useclass ( classes, 1 );
+
+  std::vector< std::string > list;
+  StringTools::split ( currentFile, '/', list );
+
+  string orgname = list.back();
+  if ( cndir != "" )
+  {
+    useclass = vector<int> ( classes, 0 );
+    ifstream infile ( ( cndir + "/" + orgname + ".dat" ).c_str() );
+    while ( !infile.eof() && infile.good() )
+    {
+      int tmp;
+      infile >> tmp;
+      if ( tmp >= 0 && tmp < classes )
+      {
+        useclass[tmp] = 1;
+      }
+    }
+  }
+
+#ifdef UNCERTAINTY
+  std::vector<FloatImage> uncert;
+  std::vector<FloatImage> gpUncertainty;
+  std::vector<FloatImage> gpMean;    
+  std::vector<FloatImage> gpMeanRatio;  
+  std::vector<FloatImage> gpWeightAll;
+  std::vector<FloatImage> gpWeightRatio;
+//   std::vector<FloatImage> gpImpactAll;
+//   std::vector<FloatImage> gpImpactRatio;
+  
+  //pre-allocate storage -- one image per scale and method
+  for(int s = 0; s < scalesize; s++)
+  {
+    uncert.push_back(FloatImage(xsize, ysize));
+    uncert[s].set(0.0);
+    
+    gpUncertainty.push_back(FloatImage(xsize, ysize));
+    gpMean.push_back(FloatImage(xsize, ysize));
+    gpMeanRatio.push_back(FloatImage(xsize, ysize));
+    gpWeightAll.push_back(FloatImage(xsize, ysize));
+    gpWeightRatio.push_back(FloatImage(xsize, ysize));
+/*    gpImpactAll.push_back(FloatImage(xsize, ysize));    
+    gpImpactRatio.push_back(FloatImage(xsize, ysize));   */  
+   
+    gpUncertainty[s].set(0.0);
+    gpMean[s].set(0.0);
+    gpMeanRatio[s].set(0.0);
+    gpWeightAll[s].set(0.0);
+    gpWeightRatio[s].set(0.0);
+//     gpImpactAll[s].set(0.0); 
+//     gpImpactRatio[s].set(0.0);   
+  }
+  
+  ColorImage imgrgb ( xsize, ysize );
+  std::string s;
+  std::stringstream out;
+  std::vector< std::string > list2;
+  StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
+  out << "uncertainty/" << list2.back();
+  
+  double maxu = -numeric_limits<double>::max();
+  double minu = numeric_limits<double>::max();
+  
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);
+  
+#endif
+
+  #ifdef UNCERTAINTY
+  std::cerr << "compute values for uncertainty stuff as well" << std::endl;
+  #endif
+  
+  if ( classifier != NULL )
+  {
+    clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen: classifier != NULL" << endl;
+#pragma omp parallel for
+    for ( int s = 0; s < scalesize; s++ )
+    {
+#pragma omp parallel for
+      for ( int i = s; i < ( int ) pce.size(); i += scalesize )
+      {
+        ClassificationResult r = classifier->classify ( pce[i].second );
+
+        #ifdef UNCERTAINTY
+        //we need this if we want to compute GP-AL-measure lateron
+        double minMeanAbs ( numeric_limits<double>::max() );
+        double maxMeanAbs ( 0.0 );
+        double sndMaxMeanAbs ( 0.0 );
+        #endif
+        
+        for ( int j = 0 ; j < r.scores.size(); j++ )
+        {
+          if ( useclass[j] == 0 )
+            continue;
+
+          fV[j] += r.scores[j];
+          preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
+          
+         #ifdef UNCERTAINTY 
+          //check whether we found a class with higher smaller abs mean than the current minimum
+         if (abs(r.scores[j]) < minMeanAbs)  
+           minMeanAbs = abs(r.scores[j]);
+         //check for larger abs mean as well
+         if (abs(r.scores[j]) > maxMeanAbs)
+         {
+           sndMaxMeanAbs = maxMeanAbs;
+           maxMeanAbs = abs(r.scores[j]);
+         }
+         // and also for the second highest mean of all classes
+         else if (abs(r.scores[j]) > sndMaxMeanAbs)
+         {
+           sndMaxMeanAbs = abs(r.scores[j]);
+         }
+         #endif          
+        }
+
+        /*if(r.uncertainty < 0.0)
+        {
+          cerr << "uncertainty: " << r.uncertainty << endl;
+          pce[i].second.svec->store(cerr);
+          cerr << endl;
+          exit(-1);
+        }*/
+#ifdef UNCERTAINTY
+        uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
+        maxu = std::max ( r.uncertainty, maxu );
+        minu = std::min ( r.uncertainty, minu );
+        
+        
+        double firstTerm (1.0 / sqrt(r.uncertainty+gpNoise));
+        
+        //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+        // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+        gpUncertainty[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+        
+        // compute results when we take the lowest mean value of all classes
+        gpMean[s] ( pce[i].second.x, pce[i].second.y ) = minMeanAbs;
+        
+        //look at the difference in the absolut mean values for the most plausible class
+        // and the second most plausible class
+        gpMeanRatio[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs - sndMaxMeanAbs;
+        
+
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < r.scores.size(); j++ )
+        {
+          if ( useclass[j] == 0 )
+            continue;
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(r.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(r.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(r.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm;        
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+//         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+//         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;      
+#endif
+      }
+    }
+  }
+  else
+  {
+//#pragma omp parallel for
+    for ( int s = 0; s < scalesize; s++ )
+    {
+//#pragma omp parallel for
+      for ( int i = s; i < ( int ) pce.size(); i += scalesize )
+      {
+        ClassificationResult r = vclassifier->classify ( * ( pce[i].second.vec ) );
+        
+        #ifdef UNCERTAINTY
+        //we need this if we want to compute GP-AL-measure lateron
+        double minMeanAbs ( numeric_limits<double>::max() );
+        double maxMeanAbs ( 0.0 );
+        double sndMaxMeanAbs ( 0.0 );
+        #endif        
+        
+        for ( int j = 0 ; j < ( int ) r.scores.size(); j++ )
+        {
+          if ( useclass[j] == 0 )
+            continue;
+          fV[j] += r.scores[j];
+          preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
+          
+         #ifdef UNCERTAINTY 
+          //check whether we found a class with higher smaller abs mean than the current minimum
+         if (abs(r.scores[j]) < minMeanAbs)  
+           minMeanAbs = abs(r.scores[j]);
+         //check for larger abs mean as well
+         if (abs(r.scores[j]) > maxMeanAbs)
+         {
+           sndMaxMeanAbs = maxMeanAbs;
+           maxMeanAbs = abs(r.scores[j]);
+         }
+         // and also for the second highest mean of all classes
+         else if (abs(r.scores[j]) > sndMaxMeanAbs)
+         {
+           sndMaxMeanAbs = abs(r.scores[j]);
+         }
+         #endif            
+        }
+#ifdef UNCERTAINTY
+        uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
+        maxu = std::max ( r.uncertainty, maxu );
+        minu = std::min ( r.uncertainty, minu );
+        
+        
+        double firstTerm (1.0 / sqrt(r.uncertainty+gpNoise));
+        
+        //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+        // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+        gpUncertainty[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+        
+        // compute results when we take the lowest mean value of all classes
+        gpMean[s] ( pce[i].second.x, pce[i].second.y ) = minMeanAbs;
+        
+        //look at the difference in the absolut mean values for the most plausible class
+        // and the second most plausible class
+        gpMeanRatio[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs - sndMaxMeanAbs;
+        
+
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < fV.size(); j++ )
+        {
+          if ( useclass[j] == 0 )
+            continue;
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(r.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(r.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(r.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm;        
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+//         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+//         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;      
+#endif
+      }
+    }
+  }
+
+  #ifdef UNCERTAINTY
+  std::cerr << "uncertainty values and derived scores successfully computed" << std::endl;
+  #endif
+
+#ifdef UNCERTAINTY
+  cout << "maxvdirect: " << maxu << " minvdirect: " << minu << endl;
+  //pre-allocate the image for filtering lateron
+  FloatImage gaussUncert ( xsize, ysize );
+  
+  //just store the first scale
+  ICETools::convertToRGB ( uncert[0], imgrgb );
+  imgrgb.write ( out.str() + "rough.ppm" );
+  
+  //pre-allocate memory for filtering of scales
+  FloatImage gaussGPUncertainty ( xsize, ysize );
+  FloatImage gaussGPMean ( xsize, ysize );
+  FloatImage gaussGPMeanRatio( xsize, ysize );
+  FloatImage gaussGPWeightAll ( xsize, ysize );
+  FloatImage gaussGPWeightRatio ( xsize, ysize );
+   
+  //just store the first scale for every method
+  ICETools::convertToRGB ( gpUncertainty[0], imgrgb );
+  imgrgb.write ( out.str() + "gpUncertainty.ppm" );
+  ICETools::convertToRGB ( gpMean[0], imgrgb );
+  imgrgb.write ( out.str() + "gpMean.ppm" );
+  ICETools::convertToRGB ( gpMeanRatio[0], imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatio.ppm" );
+  ICETools::convertToRGB ( gpWeightAll[0], imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAll.ppm" );
+  ICETools::convertToRGB ( gpWeightRatio[0], imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatio.ppm" );  
+  
+#endif
+
+  vector<double> scalesVec;
+  for ( set<double>::const_iterator iter = scales.begin();
+        iter != scales.end();
+        ++iter )
+  {
+    scalesVec.push_back ( *iter );
+  }
+
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+
+  for ( int j = 0 ; j < ( int ) preMap.channels(); j++ )
+  {
+    cout << "klasse: " << j << endl;//" " << cn.text ( j ) << endl;
+
+    NICE::Matrix tmp ( preMap.ysize, preMap.xsize );
+    double maxval = 0.0;
+    for ( int y = 0; y < preMap.ysize; y++ )
+      for ( int x = 0; x < preMap.xsize; x++ )
+      {
+        double val = preMap.get ( x, y, j );
+        tmp ( y, x ) = val;
+        maxval = std::max ( val, maxval );
+      }
+
+    NICE::ColorImage imgrgb ( preMap.xsize, preMap.ysize );
+    ICETools::convertToRGB ( tmp, imgrgb );
+
+    cout << "maxval = " << maxval << " for class " << j << endl; //cn.text ( j ) << endl;
+
+    //Show ( ON, imgrgb, cn.text ( j ) );
+    //showImage(imgrgb, "Ergebnis");
+
+    std::string s;
+    std::stringstream out;
+    out << "tmpprebmap" << j << ".ppm";
+    s = out.str();
+    imgrgb.writePPM ( s );
+
+    //getchar();
+  }
+#endif
+
+  // Gaußfiltern
+  clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen -> Gaussfiltern" << endl;
+  for ( int s = 0; s < scalesize; s++ )
+  {
+    double sigma = sigmaweight * 16.0 * scalesVec[s];
+    cerr << "sigma: " << sigma << endl;
+#pragma omp parallel for
+    for ( int i = 0; i < klassen; i++ )
+    {
+      if ( forbidden_classes.find ( i ) != forbidden_classes.end() )
+      {
+        continue;
+      }
+
+      int pos = i + s * klassen;
+
+      double maxval = preMap[pos](0,0);
+      double minval = maxval;
+
+      for ( int y = 0; y < ysize; y++ )
+      {
+        for ( int x = 0; x < xsize; x++ )
+        {
+          maxval = std::max ( maxval, preMap[pos](x,y) );
+          minval = std::min ( minval, preMap[pos](x,y) );
+        }
+      }
+
+      NICE::FloatImage dblImg ( xsize, ysize );
+      NICE::FloatImage gaussImg ( xsize, ysize );
+
+      for ( int y = 0; y < ysize; y++ )
+      {
+        for ( int x = 0; x < xsize; x++ )
+        {
+          dblImg.setPixel ( x, y, preMap[pos](x,y) );
+        }
+      }
+
+      filterGaussSigmaApproximate<float, float, float> ( dblImg, sigma, &gaussImg );
+
+      for ( int y = 0; y < ysize; y++ )
+      {
+        for ( int x = 0; x < xsize; x++ )
+        {
+          preMap[pos](x,y) = gaussImg.getPixel ( x, y );
+        }
+      }
+    }
+#ifdef UNCERTAINTY
+    filterGaussSigmaApproximate<float, float, float> ( uncert[s], sigma, &gaussUncert );
+    uncert[s] = gaussUncert;
+    
+    //apply the gauss-filtering to all scales of every method
+    filterGaussSigmaApproximate<float, float, float> ( gpUncertainty[s], sigma, &gaussGPUncertainty );
+    filterGaussSigmaApproximate<float, float, float> ( gpMean[s], sigma, &gaussGPMean );
+    filterGaussSigmaApproximate<float, float, float> ( gpMeanRatio[s], sigma, &gaussGPMeanRatio );
+    filterGaussSigmaApproximate<float, float, float> ( gpWeightAll[s], sigma, &gaussGPWeightAll );
+    filterGaussSigmaApproximate<float, float, float> ( gpWeightRatio[s], sigma, &gaussGPWeightRatio );
+    
+    gpUncertainty[s] = gaussGPUncertainty; 
+    gpMean[s] = gaussGPMean; 
+    gpMeanRatio[s] = gaussGPMeanRatio; 
+    gpWeightAll[s] = gaussGPWeightAll;
+    gpWeightRatio[s] = gaussGPWeightRatio;   
+#endif
+  }
+
+  // Zusammenfassen und auswerten
+  clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen -> zusammenfassen und auswerten" << endl;
+//#pragma omp parallel for
+  for ( int x = 0; x < xsize; x++ )
+  {
+    for ( int y = 0; y < ysize; y++ )
+    {
+      for ( int j = 0 ; j < ( int ) probabilities.channels(); j++ )
+      {
+        double prob = 0.0;
+        for ( int s = 0; s < ( int ) scalesize; s++ )
+        {
+          prob += preMap.get ( x, y, j + s * klassen );
+        }
+
+        double val = prob / ( double ) ( scalesize );
+        probabilities.set ( x, y, val, j );
+      }
+    }
+  }
+  
+#ifdef UNCERTAINTY
+  for ( int x = 0; x < xsize; x++ )
+  {
+    for ( int y = 0; y < ysize; y++ )
+    {
+      for ( int s = 0; s < ( int ) scalesize; s++ )
+      {
+        gaussUncert(x,y) += uncert[s](x,y);
+        //and for the other methods as well
+        gaussGPUncertainty(x,y) += gpUncertainty[s](x,y);
+        gaussGPMean(x,y) += gpMean[s](x,y);
+        gaussGPMeanRatio(x,y) += gpMeanRatio[s](x,y);
+        gaussGPWeightAll(x,y) += gpWeightAll[s](x,y);
+        gaussGPWeightRatio(x,y) += gpWeightRatio[s](x,y);
+      }
+      gaussUncert(x,y)/=scalesize;
+      //and for the other methods as well
+      gaussGPUncertainty(x,y)/=scalesize;
+      gaussGPMean(x,y)/=scalesize;
+      gaussGPMeanRatio(x,y)/=scalesize;
+      gaussGPWeightAll(x,y)/=scalesize;
+      gaussGPWeightRatio(x,y)/=scalesize;      
+    }
+  }
+
+  maxu = -numeric_limits<double>::max();
+  minu = numeric_limits<double>::max();
+  for ( int y = 0; y < ysize; y++ )
+  {
+    for ( int x = 0; x < xsize; x++ )
+    {
+      double val = uncert[0] ( x, y );
+      maxu = std::max ( val, maxu );
+      minu = std::min ( val, minu );
+    }
+  }
+  cout << "maxvo = " << maxu << " minvo = " << minu << endl;
+
+  maxu = -numeric_limits<float>::max();
+  minu = numeric_limits<float>::max();
+
+  for ( int y = 0; y < ysize; y++ )
+  {
+    for ( int x = 0; x < xsize; x++ )
+    {
+      double val = gaussUncert ( x, y );
+      maxu = std::max ( val, maxu );
+      minu = std::min ( val, minu );
+    }
+  }
+  cout << "maxvf = " << maxu << " minvf = " << minu << endl;
+  
+  gaussUncert(0,0) = 0.0;
+  gaussUncert(0,1) = 0.04;
+  ICETools::convertToRGB ( gaussUncert, imgrgb );
+  imgrgb.write ( out.str() + "filtered.ppm" );
+  
+  ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
+  imgrgb.write ( out.str() + "gpUncertaintyFiltered.ppm" );
+  ICETools::convertToRGB ( gaussGPMean, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanFiltered.ppm" );
+  ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatioFiltered.ppm" );
+  ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAllFiltered.ppm" );
+  ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatioFiltered.ppm" );  
+  
+#endif
+
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+
+  std::string s;
+  std::stringstream out;
+  std::vector< std::string > list2;
+  StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
+
+  out << "probmaps/" << list2.back() << ".probs";
+
+  s = out.str();
+
+  probabilities.store ( s );
+
+  for ( int j = 0 ; j < ( int ) probabilities.channels(); j++ )
+  {
+    cout << "klasse: " << j << endl;//" " << cn.text ( j ) << endl;
+
+    NICE::Matrix tmp ( probabilities.ysize, probabilities.xsize );
+    double maxval = 0.0;
+    for ( int y = 0; y < probabilities.ysize; y++ )
+      for ( int x = 0; x < probabilities.xsize; x++ )
+      {
+        double val = probabilities.get ( x, y, j );
+
+        tmp ( y, x ) = val;
+        maxval = std::max ( val, maxval );
+      }
+
+    NICE::ColorImage imgrgb ( probabilities.xsize, probabilities.ysize );
+    ICETools::convertToRGB ( tmp, imgrgb );
+
+    cout << "maxval = " << maxval << " for class " << j << endl; //cn.text ( j ) << endl;
+
+    //Show ( ON, imgrgb, cn.text ( j ) );
+    //showImage(imgrgb, "Ergebnis");
+
+    std::string s;
+    std::stringstream out;
+    out << "tmp" << j << ".ppm";
+    s = out.str();
+    imgrgb.writePPM ( s );
+
+    //getchar();
+  }
+#endif
+  if ( useregions )
+  {
+    if ( bestclasses > 0 )
+    {
+      PSSImageLevelPrior pss ( 0, bestclasses, 0.2 );
+      pss.setPrior ( fV );
+      pss.postprocess ( segresult, probabilities );
+    }
+
+    //Regionen ermitteln
+
+    int regionsize = seg->segRegions ( img, mask );
+
+    Regionen.clear();
+    vector<vector <double> > regionprob;
+
+#ifdef UNCERTAINTY
+    std::vector<double> regionUncert;
+    
+    std::vector<double> regionGPUncertainty;
+    std::vector<double> regionGPMean;
+    std::vector<double> regionGPMeanRatio;
+    std::vector<double> regionGPWeightAll;
+    std::vector<double> regionGPWeightRatio;    
+#endif
+
+    // Wahrscheinlichkeiten für jede Region initialisieren
+    for ( int i = 0; i < regionsize; i++ )
+    {
+      vector<double> tmp;
+      for ( int j = 0; j < ( int ) probabilities.channels(); j++ )
+      {
+        tmp.push_back ( 0.0 );
+      }
+      regionprob.push_back ( tmp );
+      Regionen.push_back ( pair<int, Example> ( 0, Example() ) );
+#ifdef UNCERTAINTY
+      regionUncert.push_back ( 0.0 );
+      
+      regionGPUncertainty.push_back ( 0.0 );
+      regionGPMean.push_back ( 0.0 );
+      regionGPMeanRatio.push_back ( 0.0 );
+      regionGPWeightAll.push_back ( 0.0 );
+      regionGPWeightRatio.push_back ( 0.0 );
+#endif
+    }
+
+    // Wahrscheinlichkeiten für Regionen bestimmen
+    for ( int x = 0; x < xsize; x++ )
+    {
+      for ( int y = 0; y < ysize; y++ )
+      {
+        int pos = mask ( x, y );
+        Regionen[pos].second.weight += 1.0;
+        Regionen[pos].second.x += x;
+        Regionen[pos].second.y += y;
+        for ( int j = 0 ; j < ( int ) probabilities.channels(); j++ )
+        {
+          double val = probabilities.get ( x, y, j );
+          regionprob[pos][j] += val;
+        }
+#ifdef UNCERTAINTY
+        regionUncert[pos] += gaussUncert ( x, y );
+        
+        regionGPUncertainty[pos] += gaussGPUncertainty ( x, y );
+        regionGPMean[pos] += gaussGPMean ( x, y );
+        regionGPMeanRatio[pos] += gaussGPMeanRatio ( x, y );
+        regionGPWeightAll[pos] += gaussGPWeightAll ( x, y );
+        regionGPWeightRatio[pos] += gaussGPWeightRatio ( x, y );
+#endif
+      }
+    }
+
+    /*
+    cout << "regions: " << regionsize << endl;
+    cout << "outfeats: " << endl;
+    for(int j = 0; j < regionprob.size(); j++)
+    {
+     for(int i = 0; i < regionprob[j].size(); i++)
+     {
+      cout << regionprob[j][i] << " ";
+     }
+     cout << endl;
+    }
+    cout << endl;
+    getchar();*/
+
+    // beste Wahrscheinlichkeit je Region wählen
+    for ( int i = 0; i < regionsize; i++ )
+    {
+      if ( Regionen[i].second.weight > 0 )
+      {
+        Regionen[i].second.x /= ( int ) Regionen[i].second.weight;
+        Regionen[i].second.y /= ( int ) Regionen[i].second.weight;
+      }
+      double maxval = -numeric_limits<double>::max();
+      int maxpos = 0;
+
+      for ( int j = 0 ; j < ( int ) regionprob[i].size(); j++ )
+      {
+        if ( forbidden_classes.find ( j ) != forbidden_classes.end() )
+          continue;
+
+        regionprob[i][j] /= Regionen[i].second.weight;
+
+        if ( maxval < regionprob[i][j] )
+        {
+          maxval = regionprob[i][j];
+          maxpos = j;
+        }
+        probabilities.set ( Regionen[i].second.x, Regionen[i].second.y, regionprob[i][j], j );
+      }
+
+      Regionen[i].first = maxpos;
+#ifdef UNCERTAINTY
+      regionUncert[i] /= Regionen[i].second.weight;
+      
+      regionGPUncertainty[i] /= Regionen[i].second.weight;
+      regionGPMean[i] /= Regionen[i].second.weight;
+      regionGPMeanRatio[i] /= Regionen[i].second.weight;
+      regionGPWeightAll[i] /= Regionen[i].second.weight;
+      regionGPWeightRatio[i] /= Regionen[i].second.weight;
+#endif
+    }
+    // Pixel jeder Region labeln
+    for ( int y = 0; y < ( int ) mask.cols(); y++ )
+    {
+      for ( int x = 0; x < ( int ) mask.rows(); x++ )
+      {
+        int pos = mask ( x, y );
+        segresult.setPixel ( x, y, Regionen[pos].first );
+#ifdef UNCERTAINTY
+        gaussUncert ( x, y ) = regionUncert[pos];
+        
+        gaussGPUncertainty ( x, y ) = regionGPUncertainty[pos];
+        gaussGPMean ( x, y ) = regionGPMean[pos];
+        gaussGPMeanRatio ( x, y ) = regionGPMeanRatio[pos];
+        gaussGPWeightAll ( x, y ) = regionGPWeightAll[pos];
+        gaussGPWeightRatio ( x, y ) = regionGPWeightRatio[pos];        
+#endif
+      }
+    }   
+#ifdef UNCERTAINTY
+    maxu = -numeric_limits<float>::max();
+    minu = numeric_limits<float>::max();
+    for ( int y = 0; y < ysize; y++ )
+    {
+      for ( int x = 0; x < xsize; x++ )
+      {
+        //float val = uncert(x,y);
+        double val = gaussUncert ( x, y );
+        maxu = std::max ( val, maxu );
+        minu = std::min ( val, minu );
+      }
+    }
+    cout << "maxvr = " << maxu << " minvr = " << minu << endl;
+//    uncert(0,0) = 1;
+//    uncert(0,1) = 0;
+    ICETools::convertToRGB ( gaussUncert, imgrgb );
+    imgrgb.write ( out.str() + "region.ppm" );
+    
+  ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
+  imgrgb.write ( out.str() + "gpUncertaintyRegion.ppm" );
+  ICETools::convertToRGB ( gaussGPMean, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRegion.ppm" );
+  ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatioRegion.ppm" );
+  ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAllRegion.ppm" );
+  ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatioRegion.ppm" );      
+#endif
+
+#undef WRITEREGIONS
+#ifdef WRITEREGIONS
+    RegionGraph rg;
+    seg->getGraphRepresentation ( img, mask, rg );
+    for ( uint pos = 0; pos < regionprob.size(); pos++ )
+    {
+      rg[pos]->setProbs ( regionprob[pos] );
+    }
+
+    std::string s;
+    std::stringstream out;
+    std::vector< std::string > list;
+    StringTools::split ( Globals::getCurrentImgFN (), '/', list );
+
+    out << "rgout/" << list.back() << ".graph";
+    string writefile = out.str();
+    rg.write ( writefile );
+#endif
+  }
+  else
+  {
+
+    PSSImageLevelPrior pss ( 1, 4, 0.2 );
+    pss.setPrior ( fV );
+    pss.postprocess ( segresult, probabilities );
+
+  }
+
+  // Saubermachen:
+  clog << "[log] SemSegCsurka::classifyregions: sauber machen" << endl;
+  for ( int i = 0; i < ( int ) pce.size(); i++ )
+  {
+    pce[i].second.clean();
+  }
+  pce.clear();
+
+  if ( cSIFT != NULL )
+    delete cSIFT;
+  if ( writeFeats != NULL )
+    delete writeFeats;
+  if ( readFeats != NULL )
+    delete readFeats;
+  getFeats = NULL;
+}
+
+void SemSegCsurka::semanticseg ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
+{
+  Examples regions;
+  NICE::Matrix regionmask;
+  classifyregions ( ce, segresult, probabilities, regions, regionmask );
+  if ( userellocprior || srg != NULL || gcopt != NULL )
+  {
+    if ( userellocprior )
+      relloc->postprocess ( regions, probabilities );
+
+    if ( srg != NULL )
+      srg->optimizeShape ( regions, regionmask, probabilities );
+
+    if ( gcopt != NULL )
+      gcopt->optimizeImage ( regions, regionmask, probabilities );
+
+    // Pixel jeder Region labeln
+    for ( int y = 0; y < ( int ) regionmask.cols(); y++ )
+    {
+      for ( int x = 0; x < ( int ) regionmask.rows(); x++ )
+      {
+        int pos = regionmask ( x, y );
+        segresult.setPixel ( x, y, regions[pos].first );
+      }
+    }
+  }
+
+#ifndef NOVISUAL
+#undef VISSEMSEG
+#ifdef VISSEMSEG
+//  showImage(img);
+  for ( int j = 0 ; j < ( int ) probabilities.channels(); j++ )
+  {
+    cout << "klasse: " << j << " " << cn.text ( j ) << endl;
+
+    NICE::Matrix tmp ( probabilities.ysize, probabilities.xsize );
+    double maxval = -numeric_limits<double>::max();
+    for ( int y = 0; y < probabilities.ysize; y++ )
+      for ( int x = 0; x < probabilities.xsize; x++ )
+      {
+        double val = probabilities.get ( x, y, j );
+        tmp ( y, x ) = val;
+        maxval = std::max ( val, maxval );
+      }
+
+    NICE::ColorImage imgrgb ( probabilities.xsize, probabilities.ysize );
+    ICETools::convertToRGB ( tmp, imgrgb );
+
+    cout << "maxval = " << maxval << " for class " << cn.text ( j ) << endl;
+
+    Show ( ON, imgrgb, cn.text ( j ) );
+    imgrgb.Write ( "tmp.ppm" );
+
+    getchar();
+  }
+#endif
+#endif
+
+}

+ 249 - 0
semseg/SemSegCsurka.h

@@ -0,0 +1,249 @@
+/**
+ * @file SemSegCsurka.h
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich
+ * @date 04/24/2009
+ */
+#ifndef SemSegCsurkaINCLUDE
+#define SemSegCsurkaINCLUDE
+
+#include "SemanticSegmentation.h"
+
+#include "vislearning/math/ftransform/PCA.h"
+
+#include "vislearning/features/localfeatures/GenericLocalFeatureSelection.h"
+#include "vislearning/features/localfeatures/LFonHSG.h"
+#include "vislearning/features/localfeatures/LFColorSande.h"
+#include "vislearning/features/localfeatures/LocalFeatureColorWeijer.h"
+#include "vislearning/features/localfeatures/LFReadCache.h"
+#include "vislearning/features/localfeatures/LFWriteCache.h"
+#include "vislearning/features/fpfeatures/VectorFeature.h"
+#include "vislearning/features/fpfeatures/SparseVectorFeature.h"
+
+#include "vislearning/cbaselib/CachedExample.h"
+#include "vislearning/baselib/Preprocess.h"
+#include "vislearning/baselib/Globals.h"
+
+#include "segmentation/RegionSegmentationMethod.h"
+#include "segmentation/RSMeanShift.h"
+#include "segmentation/RSGraphBased.h"
+#include "segmentation/RSCache.h"
+
+#include "SemSegTools.h"
+
+#include "vislearning/math/cluster/GMM.h"
+#include "vislearning/math/cluster/KMeansOnline.h"
+
+#include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "vislearning/classifier/fpclassifier/logisticregression/FPCSMLR.h"
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+
+#include "semseg/semseg/postsegmentation/RelativeLocationPrior.h"
+#include "semseg/semseg/postsegmentation/PPSuperregion.h"
+#include "semseg/semseg/postsegmentation/PPGraphCut.h"
+
+#include "vislearning/classifier/genericClassifierSelection.h"
+
+/** @brief pixelwise labeling systems */
+
+namespace OBJREC {
+
+class SemSegCsurka : public SemanticSegmentation
+{
+
+  protected:
+
+    //! for normalization
+    std::vector<double> vecmin, vecmax;
+
+    //! boolean whether to save the cache or not
+    bool save_cache;
+
+    //! boolean whether to read the cache or not, if read_cache is false, everything will be trained
+    bool read_cache;
+
+    //! The cached Data
+    std::string cache;
+
+    //! The PCA
+    PCA pca;
+
+    //! using normalization
+    bool norm;
+
+    //! feature Dimension after PCA
+    int dim;
+
+    //! Classifier
+    FeaturePoolClassifier *classifier;
+    VecClassifier *vclassifier;
+
+    //! Configuration File
+    const NICE::Config *conf;
+
+
+    //! name of all classes
+    ClassNames cn;
+
+    //! set of forbidden/background classes
+    std::set<int> forbidden_classes;
+
+    //! whether to use the colorfeats or not
+    bool usecolorfeats;
+
+    //! low level Segmentation method
+    RegionSegmentationMethod *seg;
+
+    //! weight for the gaussimage
+    double sigmaweight;
+
+    //! Gaussian Mixture
+    GMM *g;
+
+    //! KMeans
+    KMeansOnline *k;
+
+    //! use pca or not
+    bool usepca;
+
+    //! forced recalculation of the pca
+    bool calcpca;
+
+    //! use highlevel transformation with gmm or not
+    bool usegmm;
+
+    //! use highlevel transformation with kmeans or not
+    bool usekmeans;
+
+    int bestclasses;
+
+    //! how much clusters of the kmeans to use
+    int kmeansfeat;
+
+    //! use hard assignment or not
+    bool kmeanshard;
+
+    //! use fisher kernel for bag if visual words
+    bool usefisher;
+
+    //! forced recalculation of the gmm
+    bool dogmm;
+
+    //! number of gaussians
+    int gaussians;
+
+    //! whether to use the relative location features or not
+    bool userellocprior;
+
+    //! which classifier to use
+    std::string cname;
+
+    //! use regions segmentation or not
+    bool useregions;
+
+    //! how many features should be used for training the classifier (relative value between 0 and 1
+    double anteil;
+
+    //! save steps for faster computing postprocesses
+    bool savesteps;
+
+    //! the relative location features
+    RelativeLocationPrior *relloc;
+
+    //! Shape pp
+    PPSuperregion *srg;
+
+    //! Graph Cut pp
+    PPGraphCut *gcopt;
+
+    //! smooth high level features or not
+    bool smoothhl;
+
+    //! sigma for high level smoothing
+    double smoothfactor;
+
+    //! which OpponentSIFT implementation to use {NICE, VANDESANDE}
+    std::string opSiftImpl;
+
+    //! read features?
+    bool readfeat;
+
+    //! write features?
+    bool writefeat;
+
+    /**
+     * converts the low level features in high level features
+     * @param ex input and output features
+     * @param reduce reduce the dataset (1.0 means no reduction)
+     */
+    void convertLowToHigh ( Examples &ex, double reduce = 1.0 );
+
+    /**
+     * Starts the PCA
+     * @param ex input features
+     */
+    void initializePCA ( Examples &ex );
+
+    /**
+     * using PCA on al input features
+     * @param ex input features
+     */
+    void doPCA ( Examples &ex );
+
+    /**
+     * normalize the features between 0 and 1
+     * @param ex input features
+     */
+    void normalize ( Examples &ex );
+
+
+    /**
+     * smooth the high level features
+     * @param ex input features
+     */
+    void smoothHL ( Examples ex );
+
+  public:
+
+    /** constructor
+      *  @param conf needs a configfile
+      *  @param md and a MultiDataset (contains images and other things)
+      */
+    SemSegCsurka ( const NICE::Config *conf, const MultiDataset *md );
+
+    /** simple destructor */
+    virtual ~SemSegCsurka();
+
+    /** The trainingstep
+      *  @param md and a MultiDataset (contains images and other things)
+      */
+    void train ( const MultiDataset *md );
+
+    /** The trainingstep for the postprocess
+      *  @param md and a MultiDataset (contains images and other things)
+      */
+    void trainpostprocess ( const MultiDataset *md );
+
+    /** The main procedure. Input: Image, Output: Segmented Image with pixelwise labeles and the probabilities
+      * @param ce image data
+      * @param segresult result of the semantic segmentation with a label for each pixel
+      * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+      */
+    void semanticseg ( CachedExample *ce,
+                       NICE::Image & segresult,
+                       NICE::MultiChannelImageT<double> & probabilities );
+
+    /** this procedure is equal semanticseg, if there is no post process
+    * @param ce image data
+    * @param segresult result of the semantic segmentation with a label for each pixel
+    * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+    * @param Regionen the output regions
+    * @param mask the positions of the regions
+    */
+    void classifyregions ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities, Examples &Regionen, NICE::Matrix &mask );
+    void getFeats ( NICE::Image arg1, NICE::VVector arg2, NICE::VVector arg3 );
+};
+
+} //namespace
+
+#endif

+ 108 - 0
semseg/SemSegLocal.cpp

@@ -0,0 +1,108 @@
+/**
+* @file SemSegLocal.cpp
+* @brief semantic segmentation using image patches only
+* @author Erik Rodner
+* @date 05/08/2008
+
+*/
+#include <iostream>
+
+#include "SemSegLocal.h"
+#include "vislearning/cbaselib/CachedExample.h"
+#include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+#include "vislearning/features/fpfeatures/PixelPairFeature.h"
+
+#include "SemSegTools.h"
+
+using namespace OBJREC;
+
+using namespace std;
+using namespace NICE;
+
+
+SemSegLocal::SemSegLocal ( const Config *conf,
+                           const MultiDataset *md )
+    : SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
+{
+  save_cache = conf->gB ( "FPCPixel", "save_cache", true );
+  read_cache = conf->gB ( "FPCPixel", "read_cache", false );
+  cache = conf->gS ( "FPCPixel", "cache", "fpc.data" );
+  fpc = new FPCRandomForests ( conf, "FPCPixel" );
+  fpc->setMaxClassNo ( classNames->getMaxClassno() );
+
+
+  if ( read_cache ) {
+    fprintf ( stderr, "LocSSimpleFP:: Reading classifier data from %s\n", cache.c_str() );
+    fpc->read ( cache );
+    fprintf ( stderr, "LocSSimpleFP:: successfully read\n" );
+  } else {
+    train ( conf, md );
+  }
+}
+
+void SemSegLocal::train ( const Config *conf, const MultiDataset *md )
+{
+  Examples examples;
+  vector<CachedExample *> imgexamples;
+
+  SemSegTools::collectTrainingExamples (
+    conf,
+    "FPCPixel", // config section for grid settings
+    * ( ( *md ) ["train"] ),
+    *classNames,
+    examples,
+    imgexamples );
+
+  assert ( examples.size() > 0 );
+
+  FeaturePool fp;
+  PixelPairFeature hf ( conf );
+  hf.explode ( fp );
+
+  fpc->train ( fp, examples );
+
+  // clean up memory !!
+  for ( vector<CachedExample *>::iterator i = imgexamples.begin();
+        i != imgexamples.end();
+        i++ )
+    delete ( *i );
+
+  if ( save_cache ) {
+    fpc->save ( cache );
+  }
+
+  fp.destroy();
+}
+
+
+SemSegLocal::~SemSegLocal()
+{
+  if ( fpc != NULL )
+    delete fpc;
+}
+
+
+void SemSegLocal::semanticseg ( CachedExample *ce,
+                                NICE::Image & segresult,
+                                NICE::MultiChannelImageT<double> & probabilities )
+{
+  // for speed optimization
+  FPCRandomForests *fpcrf = dynamic_cast<FPCRandomForests *> ( fpc );
+  int xsize, ysize;
+  ce->getImageSize ( xsize, ysize );
+  probabilities.reInit ( xsize, ysize, classNames->getMaxClassno() + 1 );
+  segresult.resize ( xsize, ysize );
+
+  Example pce ( ce, 0, 0 );
+  long int offset = 0;
+  for ( int y = 0 ; y < ysize ; y++ )
+    for ( int x = 0 ; x < xsize ; x++, offset++ )
+    {
+      pce.x = x ;
+      pce.y = y;
+      ClassificationResult r = fpcrf->classify ( pce );
+      segresult.setPixel ( x, y, r.classno );
+      for ( int i = 0 ; i < ( int ) probabilities.channels(); i++ )
+        probabilities[i](x,y) = r.scores[i];
+    }
+}

+ 48 - 0
semseg/SemSegLocal.h

@@ -0,0 +1,48 @@
+/**
+* @file SemSegLocal.h
+* @brief semantic segmentation using image patches only
+* @author Erik Rodner
+* @date 05/08/2008
+
+*/
+#ifndef SEMSEGLOCALINCLUDE
+#define SEMSEGLOCALINCLUDE
+
+#include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
+#include "SemanticSegmentation.h"
+
+
+namespace OBJREC
+{
+
+/** abstract interface for pixelwise localization systems */
+class SemSegLocal : public SemanticSegmentation
+{
+
+  protected:
+    bool save_cache;
+    bool read_cache;
+
+    std::string cache;
+    FeaturePoolClassifier *fpc;
+
+  public:
+
+    /** simple constructor */
+    SemSegLocal ( const NICE::Config *conf, const MultiDataset *md );
+
+    /** simple destructor */
+    virtual ~SemSegLocal();
+
+    void train ( const NICE::Config *conf, const MultiDataset *md );
+
+    void semanticseg ( CachedExample *ce,
+                       NICE::Image & segresult,
+                       NICE::MultiChannelImageT<double> & probabilities );
+
+};
+
+
+} // namespace
+
+#endif

+ 2237 - 0
semseg/SemSegNovelty.cpp

@@ -0,0 +1,2237 @@
+#include <sstream>
+#include <iostream>
+
+#include "core/image/FilterT.h"
+#include "core/basics/numerictools.h"
+#include "core/basics/StringTools.h"
+#include "core/basics/Timer.h"
+
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
+#include "vislearning/baselib/ICETools.h"
+#include "vislearning/baselib/Globals.h"
+#include "vislearning/features/fpfeatures/SparseVectorFeature.h"
+
+#include "segmentation/GenericRegionSegmentationMethodSelection.h"
+
+#include "SemSegNovelty.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+SemSegNovelty::SemSegNovelty ( )
+    : SemanticSegmentation ( )
+{
+  this->forbidden_classesTrain.clear();
+  this->forbidden_classesActiveLearning.clear();
+  this->classesInUse.clear();
+  
+  this->globalMaxUncert = -numeric_limits<double>::max();  
+  
+  //we don't have queried any region so far
+  this->queriedRegions.clear(); 
+  
+  this->featExtract = new LocalFeatureColorWeijer ();
+  
+  // those two guys need to be NULL, since only one of them will be active later on
+  this->classifier = NULL;
+  this->vclassifier = NULL;
+  
+  // this one here as well
+  this->regionSeg = NULL;
+}
+
+SemSegNovelty::SemSegNovelty ( const Config * _conf,
+                               const MultiDataset *md )
+{
+  ///////////
+  // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
+  ///////////  
+  this->forbidden_classesTrain.clear();
+  this->forbidden_classesActiveLearning.clear();
+  this->classesInUse.clear();
+  
+  this->globalMaxUncert = -numeric_limits<double>::max();  
+  
+  //we don't have queried any region so far
+  this->queriedRegions.clear(); 
+  
+  this->featExtract = new LocalFeatureColorWeijer ();
+  
+  // those two guys need to be NULL, since only one of them will be active later on
+  this->classifier = NULL;
+  this->vclassifier = NULL;
+  
+  // this one here as well
+  this->regionSeg = NULL;  
+  
+  ///////////
+  // here comes the new code part different from the empty constructor
+  ///////////    
+  this->setClassNames ( & ( md->getClassNames ( "train" ) ) );
+  
+  this->initFromConfig( _conf ); 
+}
+
+SemSegNovelty::~SemSegNovelty()
+{
+  if(newTrainExamples.size() > 0)
+  {
+    // show most uncertain region
+    if (b_visualizeALimages)
+      showImage(maskedImg);
+    
+    //incorporate new information into the classifier
+    if (classifier != NULL)
+    {
+      //NOTE dangerous!
+      classifier->addMultipleExamples(newTrainExamples);
+    }
+    
+    //store the classifier, such that we can read it again in the next round (if we like that)
+    classifier->save ( cache + "/classifier.data" );
+  }
+  
+  // clean-up
+  
+  ///////////////////////////////
+  //     FEATURE EXTRACTION    //
+  ///////////////////////////////   
+  if ( featExtract != NULL )
+    delete featExtract;
+
+  ///////////////////////////////
+  //     CLASSIFICATION STUFF  //
+  ///////////////////////////////   
+  if ( classifier != NULL )
+    delete classifier;
+  if ( vclassifier != NULL )
+    delete vclassifier;
+  
+  ///////////////////////////////
+  //     SEGMENTATION STUFF    //
+  ///////////////////////////////   
+  if ( this->regionSeg != NULL )
+    delete this->regionSeg;
+}
+
+void SemSegNovelty::initFromConfig(const Config* conf, const string _confSection) 
+{
+  //first of all, call method of parent object
+  SemanticSegmentation::initFromConfig( conf );
+  
+  featExtract->initFromConfig ( conf );
+
+  //save and read segmentation results from files
+  this->reuseSegmentation = conf->gB ( "FPCPixel", "reuseSegmentation", true );
+  //save the classifier to a file
+  this->save_classifier = conf->gB ( "FPCPixel", "save_classifier", true ); 
+  //read the classifier from a file
+  this->read_classifier = conf->gB ( "FPCPixel", "read_classifier", false ); 
+
+  //write uncertainty results in the same folder as done for the segmentation results
+  resultdir = conf->gS("debug", "resultdir", "result");
+  cache = conf->gS ( "cache", "root", "" );
+  cache = conf->getAbsoluteFilenameRelativeToThisConfig(cache);
+
+  this->findMaximumUncert = conf->gB(_confSection, "findMaximumUncert", true);
+  this->whs = conf->gI ( _confSection, "window_size", 10 );
+  //distance to next descriptor during training
+  this->trainWSize = conf->gI ( _confSection, "train_window_size", 10 );
+  //distance to next descriptor during testing
+  this->testWSize = conf->gI (_confSection, "test_window_size", 10);
+  // select your segmentation method here
+  this->s_rsMethode = conf->gS ( _confSection, "segmentation", "none" );
+ 
+  if( this->s_rsMethode == "none" )
+  {
+    regionSeg = NULL;
+  }
+  else
+  {
+    RegionSegmentationMethod *tmpRegionSeg = GenericRegionSegmentationMethodSelection::selectRegionSegmentationMethod( conf, this->s_rsMethode );    
+    if ( reuseSegmentation )
+      regionSeg = new RSCache ( conf, tmpRegionSeg );
+    else
+      regionSeg = tmpRegionSeg;
+  }
+  
+  //define which measure for "novelty" we want to use
+  noveltyMethodString = conf->gS( _confSection,  "noveltyMethod", "gp-variance");
+  if (noveltyMethodString.compare("gp-variance") == 0)  // novel = large variance
+  {
+    this->noveltyMethod = GPVARIANCE;
+    this->mostNoveltyWithMaxScores = true;
+  }
+  else if (noveltyMethodString.compare("gp-uncertainty") == 0) //novel = large uncertainty (mean / var)
+  {
+    this->noveltyMethod = GPUNCERTAINTY;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  } 
+  else if (noveltyMethodString.compare("gp-mean") == 0) //novel = small mean
+  {
+    this->noveltyMethod = GPMINMEAN;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  }
+  else if (noveltyMethodString.compare("gp-meanRatio") == 0)  //novel = small difference between mean of most plausible class and mean of snd
+                                                              //        most plausible class (not useful in binary settings)
+  {
+    this->noveltyMethod = GPMEANRATIO;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  }
+  else if (noveltyMethodString.compare("gp-weightAll") == 0) // novel = large weight in alpha vector after updating the model (can be predicted exactly)
+  {
+    this->noveltyMethod = GPWEIGHTALL;
+    this->mostNoveltyWithMaxScores = true;
+  }
+  else if (noveltyMethodString.compare("gp-weightRatio") == 0) // novel = small difference between weights for alpha vectors 
+                                                               //     with assumptions of GT label to be the most 
+                                                               //     plausible against the second most plausible class   
+  {
+    this->noveltyMethod = GPWEIGHTRATIO;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  }
+  else if (noveltyMethodString.compare("random") == 0) 
+  {
+     initRand(); 
+     this->noveltyMethod = RANDOM;
+  }
+  else
+  {
+    this->noveltyMethod = GPVARIANCE;
+    this->mostNoveltyWithMaxScores = true;
+  }
+  
+  b_visualizeALimages = conf->gB(_confSection, "visualizeALimages", false);
+  
+  
+  classifierString = conf->gS ( _confSection, "classifier", "GPHIKClassifier" );  
+  classifier = NULL;
+  vclassifier = NULL;
+  if ( classifierString.compare("GPHIKClassifier") == 0)
+  { 
+    //just to make sure, that we do NOT perform an optimization after every iteration step
+    //this would just take a lot of time, which is not desired so far
+    //TODO edit this!
+    //this->conf->sB( "GPHIKClassifier", "performOptimizationAfterIncrement", false );    
+    classifier = new GPHIKClassifierNICE ( conf, "GPHIKClassifier" );
+  }
+  else
+    vclassifier = GenericClassifierSelection::selectVecClassifier ( conf, classifierString ); 
+  
+  
+  
+  
+  //check the same thing for the training classes - this is very specific to our setup 
+  std::string forbidden_classesTrain_s = conf->gS ( "analysis", "donttrainTrain", "" );
+  if ( forbidden_classesTrain_s == "" )
+  {
+    forbidden_classesTrain_s = conf->gS ( "analysis", "forbidden_classesTrain", "" );
+  }
+  this->classNames->getSelection ( forbidden_classesTrain_s, forbidden_classesTrain );  
+}
+
+
+
+void SemSegNovelty::visualizeRegion(const NICE::ColorImage &img, const NICE::Matrix &regions, int region, NICE::ColorImage &outimage)
+{
+  std::vector<uchar> color;
+  color.push_back(255);
+  color.push_back(0);
+  color.push_back(0);
+    
+  int width = img.width();
+  int height = img.height();
+  
+  outimage.resize(width,height);
+  
+  for(int y = 0; y < height; y++)
+  {
+    for(int x = 0; x < width; x++)
+    {
+      if(regions(x,y) == region)
+      {
+        for(int c = 0; c < 3; c++)
+        {
+          outimage(x,y,c) = color[c];
+        }
+      }
+      else
+      {
+        for(int c = 0; c < 3; c++)
+        {
+          outimage(x,y,c) = img(x,y,c);
+        }
+      }
+    }
+  }
+}
+
+void SemSegNovelty::train ( const MultiDataset *md )
+{
+  if ( this->read_classifier )
+  {
+    try
+    {
+      if ( this->classifier != NULL )
+      {
+        string classifierdst = "/classifier.data";        
+        fprintf ( stderr, "SemSegNovelty:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );        
+        classifier->read ( cache + classifierdst );
+      }
+      else
+      {
+        string classifierdst = "/veccl.data";        
+        fprintf ( stderr, "SemSegNovelty:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );          
+        vclassifier->read ( cache + classifierdst );      
+      }
+      
+
+      fprintf ( stderr, "SemSegNovelty:: successfully read\n" );
+    }
+    catch ( char *str )
+    {
+      cerr << "error reading data: " << str << endl;
+    }
+  }
+  else
+  {
+    const LabeledSet train = * ( *md ) ["train"];
+    const LabeledSet *trainp = &train;
+
+    ////////////////////////
+    // feature extraction //
+    ////////////////////////
+  
+    ProgressBar pb ( "Local Feature Extraction" );
+    pb.show();
+
+    int imgnb = 0;
+
+    Examples examples;
+    examples.filename = "training";
+
+    int featdim = -1;
+
+    classesInUse.clear();  
+    
+    LOOP_ALL_S ( *trainp )
+    {
+      //EACH_S(classno, currentFile);
+      EACH_INFO ( classno, info );
+
+      std::string currentFile = info.img();
+
+      CachedExample *ce = new CachedExample ( currentFile );
+
+      const LocalizationResult *locResult = info.localization();
+      if ( locResult->size() <= 0 )
+      {
+	fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+		  currentFile.c_str() );
+	continue;
+      }
+
+      int xsize, ysize;
+      ce->getImageSize ( xsize, ysize );
+
+      Image labels ( xsize, ysize );
+      labels.set ( 0 );
+      locResult->calcLabeledImage ( labels, ( *classNames ).getBackgroundClass() );
+
+      NICE::ColorImage img;
+      try {
+	img = ColorImage ( currentFile );
+      } catch ( Exception ) {
+	cerr << "SemSegNovelty: error opening image file <" << currentFile << ">" << endl;
+	continue;
+      }
+
+      Globals::setCurrentImgFN ( currentFile );
+
+      MultiChannelImageT<double> feats;
+
+      // extract features
+      featExtract->getFeats ( img, feats );
+      featdim = feats.channels();
+      feats.addChannel(featdim);
+
+      for (int c = 0; c < featdim; c++)
+      {
+	ImageT<double> tmp = feats[c];
+	ImageT<double> tmp2 = feats[c+featdim];
+
+	NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
+      }
+      featdim += featdim;
+
+      // compute integral images
+      for ( int c = 0; c < featdim; c++ )
+      {
+	feats.calcIntegral ( c );
+      }
+
+      for ( int y = 0; y < ysize; y += trainWSize)
+      {
+    for ( int x = 0; x < xsize; x += trainWSize )
+	{
+
+	  int classnoTmp = labels.getPixel ( x, y );
+	  
+	  if ( forbidden_classesTrain.find ( classnoTmp ) != forbidden_classesTrain.end() )
+	  {
+	    continue;
+	  }
+	  
+	  if (classesInUse.find(classnoTmp) == classesInUse.end())
+	  {
+	    classesInUse.insert(classnoTmp);
+	  }
+	  
+	  Example example;
+	  example.vec = NULL;
+	  example.svec = new SparseVector ( featdim );
+	  for ( int f = 0; f < featdim; f++ )
+	  {
+	    double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+	    if ( val > 1e-10 )
+	      ( *example.svec ) [f] = val;
+	  }
+
+	  example.svec->normalize();
+
+	  example.position = imgnb;
+	  examples.push_back ( pair<int, Example> ( classnoTmp, example ) );
+
+	}
+      }
+  
+      
+      
+
+      delete ce;
+      imgnb++;
+      pb.update ( trainp->count() );
+    }
+    
+      
+    numberOfClasses = classesInUse.size();
+    std::cerr << "numberOfClasses: " << numberOfClasses << std::endl;  
+    std::cerr << "classes in use: " << std::endl;
+    for (std::set<int>::const_iterator it = classesInUse.begin(); it != classesInUse.end(); it++)
+    {
+      std::cerr << *it << " ";
+    }    
+    std::cerr << std::endl;
+
+    pb.hide();
+
+
+    //////////////////////
+    // train classifier //
+    //////////////////////
+    FeaturePool fp;
+
+    Feature *f = new SparseVectorFeature ( featdim );
+
+    f->explode ( fp );
+    delete f;
+
+    if ( classifier != NULL )
+    {
+      std::cerr << "train FP-classifier with " << examples.size() << " examples" << std::endl;
+      classifier->train ( fp, examples );
+      std::cerr << "training finished" << std::endl;
+    }
+    else
+    {
+      LabeledSetVector lvec;
+      convertExamplesToLSet ( examples, lvec );
+      vclassifier->teach ( lvec );
+  //     if ( usegmm )
+  //       convertLSetToSparseExamples ( examples, lvec );
+  //     else
+      std::cerr << "classifierString: " << classifierString << std::endl;
+      if (this->classifierString.compare("nn") == 0)
+      {
+	convertLSetToExamples ( examples, lvec, true /* only remove pointers to the data in the LSet-struct*/);
+      }
+      else
+      {
+	convertLSetToExamples ( examples, lvec, false /* remove all training examples of the LSet-struct */);
+      }
+      vclassifier->finishTeaching();
+    }  
+
+    fp.destroy();
+
+    if ( save_classifier )
+    {
+      if ( classifier != NULL )
+	classifier->save ( cache + "/classifier.data" );
+      else
+	vclassifier->save ( cache + "/veccl.data" );    
+    }
+
+    ////////////
+    //clean up//
+    ////////////
+    for ( int i = 0; i < ( int ) examples.size(); i++ )
+    {
+      examples[i].second.clean();
+    }
+    examples.clear();
+
+    cerr << "SemSeg training finished" << endl;
+  }
+}
+
+
+void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
+{  
+  Timer timer;
+  timer.start();
+  
+  //segResult contains the GT labels when this method is called
+  // we simply store them in labels, to have an easy access to the GT information lateron
+  NICE::Image labels = segresult;
+  //just to be sure that we do not have a GT-biased result :)
+  segresult.set(0);
+
+  int featdim = -1;
+
+  std::string currentFile = Globals::getCurrentImgFN();
+
+
+  int xsize, ysize;
+  ce->getImageSize ( xsize, ysize );
+
+  probabilities.reInit( xsize, ysize, this->classNames->getMaxClassno() + 1);
+  probabilities.setAll ( -std::numeric_limits<double>::max() );
+   
+  NICE::ColorImage img;
+  try {
+    img = ColorImage ( currentFile );
+  } catch ( Exception ) {
+    cerr << "SemSegNovelty: error opening image file <" << currentFile << ">" << endl;
+    return;
+  }
+
+ // MultiChannelImageT<double> m_CurrentImageFeatures;
+
+  // extract features
+  m_CurrentImageFeatures.freeData();
+  featExtract->getFeats ( img, m_CurrentImageFeatures );
+  featdim = m_CurrentImageFeatures.channels();
+  m_CurrentImageFeatures.addChannel(featdim);
+
+  for (int c = 0; c < featdim; c++)
+  {
+    ImageT<double> tmp = m_CurrentImageFeatures[c];
+    ImageT<double> tmp2 = m_CurrentImageFeatures[c+featdim];
+
+    NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
+  }
+  featdim += featdim;
+
+  // compute integral images
+  for ( int c = 0; c < featdim; c++ )
+  {
+    m_CurrentImageFeatures.calcIntegral ( c );
+  }
+  
+  timer.stop();
+  std::cout << "AL time for preparation: " << timer.getLastAbsolute() << std::endl;
+    
+  timer.start();
+  //classification results currently only needed to be computed separately if we use the vclassifier, i.e., the nearest neighbor used 
+  // for the "novel feature learning" approach
+  //in all other settings, such as active sem seg in general, we do this within the novelty-computation-methods
+  if ( classifier == NULL )
+  {
+    this->computeClassificationResults( m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim);
+  }
+//   timer.stop();
+//   
+//   std::cerr << "classification results computed" << std::endl;
+  
+  FloatImage noveltyImage ( xsize, ysize );
+  noveltyImage.set ( 0.0 );  
+  
+  switch (noveltyMethod)
+  {
+    case GPVARIANCE:
+    {
+         this->computeNoveltyByVariance( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;
+    }
+    case GPUNCERTAINTY:
+    {
+         this->computeNoveltyByGPUncertainty( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPMINMEAN:
+    {
+         std::cerr << "compute novelty using the minimum mean" << std::endl;
+         this->computeNoveltyByGPMean( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPMEANRATIO:
+    {
+         this->computeNoveltyByGPMeanRatio( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPWEIGHTALL:
+    {
+         this->computeNoveltyByGPWeightAll( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPWEIGHTRATIO:
+    {
+         this->computeNoveltyByGPWeightRatio( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }    
+    case RANDOM:
+    {
+         this->computeNoveltyByRandom( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize,  featdim );
+         break;               
+    }
+    default:
+    {
+         //do nothing, keep the image constant to 0.0
+         break;
+    }
+         
+  }
+  
+  timer.stop();
+  std::cout << "AL time for novelty score computation: " << timer.getLastAbsolute() << std::endl;
+  
+  if (b_visualizeALimages)
+  {
+      ColorImage imgrgbTmp (xsize, ysize);
+      ICETools::convertToRGB ( noveltyImage, imgrgbTmp );
+      showImage(imgrgbTmp, "Novelty Image without Region Segmentation");  
+  }
+  
+    
+  timer.start();
+  
+  //Regionen ermitteln
+  if(regionSeg != NULL)
+  {
+    NICE::Matrix mask;
+    int amountRegions = regionSeg->segRegions ( img, mask );
+    
+    //compute probs per region
+    std::vector<std::vector<double> > regionProb(amountRegions, std::vector<double>(probabilities.channels(), -std::numeric_limits<double>::max() ));
+    std::vector<double> regionNoveltyMeasure (amountRegions, 0.0);
+
+    std::vector<int> regionCounter(amountRegions, 0);
+    std::vector<int> regionCounterNovelty(amountRegions, 0);
+    for ( int y = 0; y < ysize; y += testWSize) //y++)
+    {
+      for (int x = 0; x < xsize; x += testWSize) //x++)
+      {
+        int r = mask(x,y);
+        regionCounter[r]++;
+
+        for(int j = 0; j < probabilities.channels(); j++)
+        {
+            if( regionProb[r][j] == -std::numeric_limits<double>::max() )
+                regionProb[r][j] = 0.0f;
+            regionProb[r][j] += probabilities ( x, y, j );
+        }
+        
+        if ( forbidden_classesActiveLearning.find( labels(x,y) ) == forbidden_classesActiveLearning.end() )
+        {
+          //count the amount of "novelty" for the corresponding region
+          regionNoveltyMeasure[r] += noveltyImage(x,y);
+          regionCounterNovelty[r]++;
+        }
+      }
+    }
+       
+    //find best class per region
+    std::vector<int> bestClassPerRegion(amountRegions,0);
+    
+    double maxNoveltyScore = -numeric_limits<double>::max();
+    if (!mostNoveltyWithMaxScores)
+    {
+      maxNoveltyScore = numeric_limits<double>::max();
+    }   
+    
+    int maxUncertRegion = -1;
+    
+    //loop over all regions and compute averaged novelty scores
+    for(int r = 0; r < amountRegions; r++)
+    {
+      
+      //check for the most plausible class per region
+      double maxval = -numeric_limits<double>::max();
+      
+      //loop over all classes
+      for(int c = 0; c < probabilities.channels(); c++)
+      {
+        regionProb[r][c] /= regionCounter[r];
+        
+        if(  (maxval < regionProb[r][c]) ) //&& (regionProb[r][c] != 0.0) ) 
+        {        
+              maxval = regionProb[r][c];
+              bestClassPerRegion[r] = c;
+        }
+      }
+       
+      //if the region only contains unvalid information (e.g., background) skip it
+      if (regionCounterNovelty[r] == 0)
+      {
+        continue;
+      }
+      
+      //normalize summed novelty scores to region size
+      regionNoveltyMeasure[r] /= regionCounterNovelty[r];
+    
+      //did we find a region that has a higher score as the most novel region known so far within this image?
+      if(   (  mostNoveltyWithMaxScores && (maxNoveltyScore < regionNoveltyMeasure[r]) )    // if we look for large novelty scores, e.g., variance
+        || ( !mostNoveltyWithMaxScores && (maxNoveltyScore > regionNoveltyMeasure[r]) ) )  // if we look for small novelty scores, e.g., min mean
+      {
+                   //did we already query a region of this image?                --   and it was this specific region
+        if ( (queriedRegions.find( currentFile ) != queriedRegions.end() ) && ( queriedRegions[currentFile].find(r) != queriedRegions[currentFile].end() ) )
+        {
+          continue;
+        }
+        else //only accept the region as novel if we never queried it before
+        {
+          maxNoveltyScore = regionNoveltyMeasure[r];
+          maxUncertRegion = r;        
+        }
+
+      }
+
+    }
+    
+    // after finding the most novel region for the current image, check whether this region is also the most novel with respect
+    // to all previously seen test images
+    // if so, store the corresponding features, since we want to "actively" query them to incorporate useful information
+    if(findMaximumUncert)
+    {
+      if(    (   mostNoveltyWithMaxScores && (maxNoveltyScore > globalMaxUncert) )
+          || (  !mostNoveltyWithMaxScores && (maxNoveltyScore < globalMaxUncert) ) )
+      {
+        //current most novel region of the image has "higher" novelty score then previous most novel region of all test images worked on so far
+        // -> save new important features of this region
+        Examples examples;
+
+        for ( int y = 0; y < ysize; y += testWSize )
+        {
+          for ( int x = 0; x < xsize; x += testWSize)
+          {
+              if(mask(x,y) == maxUncertRegion)
+              {
+                  int classnoTmp = labels(x,y);
+                  if ( forbidden_classesActiveLearning.find(classnoTmp) != forbidden_classesActiveLearning.end() )
+                      continue;
+                  Example t_Example(NULL, x, y);
+                  t_Example.vec = NULL;
+                  t_Example.svec = new SparseVector ( featdim );
+                  for ( int f = 0; f < featdim; f++ )
+                  {
+//                      double val =  ( *example.svec ) [f];
+                       double val = m_CurrentImageFeatures.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+                    if ( val > 1e-10 )
+                      ( *t_Example.svec ) [f] = val;
+                  }
+                  examples.push_back ( pair<int, Example> ( classnoTmp, t_Example ) );
+              }
+          }
+        }
+        
+        if(examples.size() > 0)
+        {
+          std::cerr << "found " << examples.size() << " new examples in the queried region" << std::endl << std::endl;
+          // sauber aufräumen
+          for( int i=0; i< newTrainExamples.size(); i++)
+          {
+              delete newTrainExamples.at(i).second.svec;
+              newTrainExamples.at(i).second.svec = NULL;
+          }
+          newTrainExamples.clear();
+          newTrainExamples = examples;
+          globalMaxUncert = maxNoveltyScore;
+          //prepare for later visualization
+//           if (b_visualizeALimages)
+            visualizeRegion(img,mask,maxUncertRegion,maskedImg);
+        }
+        else
+        {
+          std::cerr << "the queried region has no valid information" << std::endl << std::endl;
+        }
+        
+        //save filename and region index
+        currentRegionToQuery.first = currentFile;
+        currentRegionToQuery.second = maxUncertRegion;
+      }
+    }
+
+    //write back best results per region
+    //i.e., write normalized novelty scores for every region into the novelty image
+    for ( int y = 0; y < ysize; y++)
+    {
+      for (int x = 0; x < xsize; x++)
+      {
+        int r = mask(x,y);
+        for(int j = 0; j < probabilities.channels(); j++)
+        {
+          probabilities ( x, y, j ) = regionProb[r][j];
+        }
+        segresult(x,y) = bestClassPerRegion[r];
+        // write novelty scores for every segment into the "final" image
+        noveltyImage(x,y) = regionNoveltyMeasure[r];
+      }
+    }
+  } // if regionSeg != null
+  
+  timer.stop();
+  std::cout << "AL time for determination of novel regions: " << timer.getLastAbsolute() << std::endl;
+
+  if (b_visualizeALimages)
+  {
+    timer.start();
+
+    std::stringstream out;
+    std::vector< std::string > list2;
+    StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
+    out << resultdir << "/" << list2.back();
+
+    noveltyImage.writeRaw(out.str() + "_run_" +  NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+".rawfloat");
+
+    ColorImage imgrgb ( xsize, ysize );
+    ICETools::convertToRGB ( noveltyImage, imgrgb );
+    showImage(imgrgb, "Novelty Image");
+
+    timer.stop();
+    cout << "AL time for writing the raw novelty image: " << timer.getLastAbsolute() << endl;
+  }
+
+}
+
+inline void SemSegNovelty::computeClassificationResults( const NICE::MultiChannelImageT<double> & feats, 
+                                                   NICE::Image & segresult,
+                                                   NICE::MultiChannelImageT<double> & probabilities,
+                                                   const int & xsize,
+                                                   const int & ysize,
+                                                   const int & featdim
+                                                       )
+{
+  std::cerr << "featdim: " << featdim << std::endl;
+  
+  if ( classifier != NULL )
+  {  
+  
+    #pragma omp parallel for
+    for ( int y = 0; y < ysize; y += testWSize )
+    {
+      Example example;
+      example.vec = NULL;
+      example.svec = new SparseVector ( featdim );
+      for ( int x = 0; x < xsize; x += testWSize)
+      {
+        for ( int f = 0; f < featdim; f++ )
+        {
+          double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+          if ( val > 1e-10 )
+            ( *example.svec ) [f] = val;
+        }
+        example.svec->normalize();
+
+        ClassificationResult cr = classifier->classify ( example );
+
+        int xs = std::max(0, x - testWSize/2);
+        int xe = std::min(xsize - 1, x + testWSize/2);
+        int ys = std::max(0, y - testWSize/2);
+        int ye = std::min(ysize - 1, y + testWSize/2);
+        for (int yl = ys; yl <= ye; yl++)
+        {
+          for (int xl = xs; xl <= xe; xl++)
+          {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            {
+              probabilities ( xl, yl, j ) = cr.scores[j];
+            }
+            segresult ( xl, yl ) = cr.classno;
+          }
+        }
+        
+        example.svec->clear();
+      }
+      delete example.svec;
+      example.svec = NULL;
+    }
+  }
+  else //vclassifier
+  {
+    std::cerr << "compute classification results with vclassifier" << std::endl;
+    #pragma omp parallel for
+    for ( int y = 0; y < ysize; y += testWSize )
+    {
+      for ( int x = 0; x < xsize; x += testWSize)
+      {
+        NICE::Vector v(featdim);
+        for ( int f = 0; f < featdim; f++ )
+        {
+          double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+          v[f] = val;
+        }
+        v.normalizeL1();
+
+        ClassificationResult cr = vclassifier->classify ( v );
+
+        int xs = std::max(0, x - testWSize/2);
+        int xe = std::min(xsize - 1, x + testWSize/2);
+        int ys = std::max(0, y - testWSize/2);
+        int ye = std::min(ysize - 1, y + testWSize/2);
+        for (int yl = ys; yl <= ye; yl++)
+        {
+          for (int xl = xs; xl <= xe; xl++)
+          {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            {
+              probabilities ( xl, yl, j ) = cr.scores[j];
+            }
+            segresult ( xl, yl ) = cr.classno;
+          }
+        }
+      }
+    }    
+
+  }
+}
+
+// compute novelty images depending on the strategy chosen
+
+void SemSegNovelty::computeNoveltyByRandom(         NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim  )
+{
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+  
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      
+      double randVal = randDouble();
+
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = randVal; 
+        }
+      }     
+    }
+  }  
+}
+
+
+void SemSegNovelty::computeNoveltyByVariance(       NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+      
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = cr.uncertainty; 
+        }
+      }          
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPUncertainty(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  
+  double gpNoise =  0.01;
+  //TODO getMethod for GPHIK
+  //conf->gD("GPHIK", "noise", 0.01);
+  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+      
+      double maxMeanAbs ( 0.0 ); 
+      
+      for ( int j = 0 ; j < cr.scores.size(); j++ )
+      {   
+        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+        //check for larger abs mean
+        if (abs(cr.scores[j]) > maxMeanAbs)
+        {
+          maxMeanAbs = abs(cr.scores[j]);
+        }
+        
+      }
+
+      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+      
+      //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+      // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+      double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {         
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = gpUncertaintyVal;  
+        }
+      }   
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPMean(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  0.01;
+  //TODO getMethod for GPHIK
+  //conf->gD("GPHIK", "noise", 0.01);
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+
+      double minMeanAbs ( numeric_limits<double>::max() );
+      
+      for ( int j = 0 ; j < probabilities.channels(); j++ )
+      {
+        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+
+        //check whether we found a class with higher smaller abs mean than the current minimum
+        if (abs( cr.scores[j] ) < minMeanAbs)
+        {
+          minMeanAbs = abs(cr.scores[j]); 
+        }
+      }
+
+      // compute results when we take the lowest mean value of all classes
+      double gpMeanVal = minMeanAbs;
+  
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = gpMeanVal; 
+        }
+      }     
+    }
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPMeanRatio(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  0.01;
+  //TODO getMethod for GPHIK
+  //conf->gD("GPHIK", "noise", 0.01);
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+
+      double maxMean ( -numeric_limits<double>::max() );
+      double sndMaxMean ( -numeric_limits<double>::max() );     
+      
+      for ( int j = 0 ; j < cr.scores.size(); j++ )
+      {
+        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+        
+        //check for larger mean without abs as well
+        if (cr.scores[j] > maxMean)
+        {
+          sndMaxMean = maxMean;
+          maxMean = cr.scores[j];
+        }
+        // and also for the second highest mean of all classes
+        else if (cr.scores[j] > sndMaxMean)
+        {
+          sndMaxMean = cr.scores[j];
+        }          
+      }
+      
+      //look at the difference in the absolut mean values for the most plausible class
+      // and the second most plausible class
+      double gpMeanRatioVal= maxMean - sndMaxMean;
+
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = gpMeanRatioVal;
+        }
+      }    
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPWeightAll(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  0.01;
+  //TODO getMethod for GPHIK
+  //conf->gD("GPHIK", "noise", 0.01);
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+      
+      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+      
+      double gpWeightAllVal ( 0.0 );
+
+      if ( numberOfClasses > 2)
+      {
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < cr.scores.size(); j++ )
+        {
+          if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+          {
+            continue;
+          }          
+          
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(cr.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(cr.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(cr.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAllVal = thirdTerm*firstTerm;        
+      }
+      else //binary scenario
+      {
+        gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
+        gpWeightAllVal *= firstTerm;
+      }
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = gpWeightAllVal;
+        }
+      }
+   
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPWeightRatio(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  0.01;
+  //TODO getMethod for GPHIK
+  //conf->gD("GPHIK", "noise", 0.01);
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+ 
+
+       double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+
+       double gpWeightRatioVal ( 0.0 );
+
+       if ( numberOfClasses > 2)
+       {
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < cr.scores.size(); j++ )
+        {
+          if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+          {
+            continue;
+          }          
+          
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(cr.scores[j] - 1));
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already   
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+  //         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+  //         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
+       }
+       else //binary scenario
+       {
+         gpWeightRatioVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
+         gpWeightRatioVal *= firstTerm;
+       }
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;          
+          noveltyImage ( xl, yl ) = gpWeightRatioVal;  
+        }
+      }
+       
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+
+void SemSegNovelty::addNewExample(const NICE::Vector& v_newExample, const int & newClassNo)
+{
+  //accept the new class as valid information
+  if ( forbidden_classesTrain.find ( newClassNo ) != forbidden_classesTrain.end() )
+  {
+    forbidden_classesTrain.erase(newClassNo);
+    numberOfClasses++;
+  }
+  if ( classesInUse.find ( newClassNo ) == classesInUse.end() )
+  {
+    classesInUse.insert( newClassNo );
+  }    
+  
+  
+  //then add it to the classifier used
+  if ( classifier != NULL )
+  { 
+    if (this->classifierString.compare("GPHIKClassifier") == 0)    
+    {
+      Example newExample;
+      SparseVector svec ( v_newExample );
+      newExample.svec = &svec;
+      static_cast<GPHIKClassifierNICE*>(classifier)->addExample ( newExample, newClassNo );
+    } 
+  }
+  else //vclassifier
+  {
+    if (this->classifierString.compare("nn") == 0)    
+    {
+      vclassifier->teach ( newClassNo, v_newExample );
+    }
+  }
+}
+
+void SemSegNovelty::addNovelExamples()
+{
+
+  Timer timer;
+  
+  //show the image that contains the most novel region
+  if (b_visualizeALimages)
+    showImage(maskedImg, "Most novel region");  
+  
+  timer.start();
+  
+
+    std::stringstream out;
+    std::vector< std::string > list2;
+    StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
+    out << resultdir << "/" << list2.back();     
+    
+    maskedImg.writePPM ( out.str() + "_run_" +  NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+ "_query.ppm" );
+
+  
+  timer.stop();
+  std::cerr << "AL time for writing queried image: " << timer.getLast() << std::endl;
+
+  timer.start();
+  
+  //check which classes will be added using the features from the novel region
+  std::set<int> newClassNumbers;
+  newClassNumbers.clear(); //just to be sure  
+  for ( uint i = 0 ; i < newTrainExamples.size() ; i++ )
+  {
+    if (newClassNumbers.find(newTrainExamples[i].first /* classNumber*/) == newClassNumbers.end() )
+    {
+      newClassNumbers.insert(newTrainExamples[i].first );
+    }
+  }
+
+  //accept the new classes as valid information
+  for (std::set<int>::const_iterator clNoIt = newClassNumbers.begin(); clNoIt != newClassNumbers.end(); clNoIt++)
+  {
+    if ( forbidden_classesTrain.find ( *clNoIt ) != forbidden_classesTrain.end() )
+    {
+      forbidden_classesTrain.erase(*clNoIt);
+      numberOfClasses++;
+    }
+    if ( classesInUse.find ( *clNoIt ) == classesInUse.end() )
+    {
+      classesInUse.insert( *clNoIt );
+    }
+  }
+  
+  timer.stop();
+  std::cerr << "AL time for accepting possible new classes: " << timer.getLast() << std::endl;
+  
+  timer.start();
+  //then add the new features to the classifier used
+  if ( classifier != NULL )
+  { 
+    if (this->classifierString.compare("GPHIKClassifier") == 0)    
+    {
+      classifier->addMultipleExamples ( this->newTrainExamples );
+    }    
+  }
+  else //vclassifier
+  {
+    //TODO
+  }
+  
+  timer.stop();
+  std::cerr << "AL time for actually updating the classifier: " << timer.getLast() << std::endl;
+  
+  std::cerr << "the current region to query is: " << currentRegionToQuery.first << " -- " << currentRegionToQuery.second << std::endl;
+  
+  //did we already query a region of this image?
+  if ( queriedRegions.find( currentRegionToQuery.first ) != queriedRegions.end() )
+  {
+    queriedRegions[ currentRegionToQuery.first ].insert(currentRegionToQuery.second);
+  }
+  else
+  {
+    std::set<int> tmpSet; tmpSet.insert(currentRegionToQuery.second);
+    queriedRegions.insert(std::pair<std::string,std::set<int> > (currentRegionToQuery.first, tmpSet ) );
+  }  
+  
+  std::cerr << "Write already queried regions: " << std::endl;
+  for (std::map<std::string,std::set<int> >::const_iterator it = queriedRegions.begin(); it != queriedRegions.end(); it++)
+  {
+    std::cerr << "image: " << it->first << " --   ";
+    for (std::set<int>::const_iterator itReg = it->second.begin(); itReg != it->second.end(); itReg++)
+    {
+      std::cerr << *itReg << " ";
+    } 
+    std::cerr << std::endl;
+  }
+  
+  //clear the latest results, since one iteration is over
+  globalMaxUncert = -numeric_limits<double>::max();
+  if (!mostNoveltyWithMaxScores)
+    globalMaxUncert = numeric_limits<double>::max();
+}
+
+const Examples * SemSegNovelty::getNovelExamples() const
+{
+  return &(this->newTrainExamples);
+}
+
+///////////////////// INTERFACE PERSISTENT /////////////////////
+// interface specific methods for store and restore
+///////////////////// INTERFACE PERSISTENT ///////////////////// 
+
+void SemSegNovelty::restore ( std::istream & is, int format )
+{
+  //delete everything we knew so far...
+  this->clear();
+  
+  bool b_restoreVerbose ( false );
+#ifdef B_RESTOREVERBOSE
+  b_restoreVerbose = true;
+#endif  
+  
+  if ( is.good() )
+  {
+    if ( b_restoreVerbose ) 
+      std::cerr << " restore SemSegNovelty" << std::endl;
+    
+    std::string tmp;
+    is >> tmp; //class name 
+    
+    if ( ! this->isStartTag( tmp, "SemSegNovelty" ) )
+    {
+      std::cerr << " WARNING - attempt to restore SemSegNovelty, but start flag " << tmp << " does not match! Aborting... " << std::endl;
+      throw;
+    }   
+    
+    if (classifier != NULL)
+    {
+      delete classifier;
+      classifier = NULL;
+    }    
+    
+    is.precision (numeric_limits<double>::digits10 + 1);
+    
+    bool b_endOfBlock ( false ) ;
+    
+    while ( !b_endOfBlock )
+    {
+      is >> tmp; // start of block 
+      
+      if ( this->isEndTag( tmp, "SemSegNovelty" ) )
+      {
+        b_endOfBlock = true;
+        continue;
+      }      
+      
+      tmp = this->removeStartTag ( tmp );
+      
+      if ( b_restoreVerbose )
+        std::cerr << " currently restore section " << tmp << " in SemSegNovelty" << std::endl;
+      
+      
+      ///////////////////////////////
+      //     FEATURE EXTRACTION    //
+      ///////////////////////////////
+      if ( tmp.compare("featExtract") == 0 )
+      { 
+        featExtract->restore(is, format);
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }      
+      else if ( tmp.compare("trainWSize") == 0 )
+      { 
+        is >> trainWSize;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("whs") == 0 )
+      { 
+        is >> whs;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("testWSize") == 0 )
+      { 
+        is >> testWSize;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }  
+      ///////////////////////////////
+      //     NOVELTY COMPUTATION   //
+      ///////////////////////////////
+      else if ( tmp.compare("noveltyMethod") == 0 )
+      {
+        unsigned int ui_noveltyMethod;
+        is >> ui_noveltyMethod;        
+        this->noveltyMethod = static_cast<NoveltyMethod> ( ui_noveltyMethod );
+	
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("noveltyMethodString") == 0 )
+      {
+        is >> noveltyMethodString;        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("globalMaxUncert") == 0 )
+      {
+        is >> globalMaxUncert;        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("mostNoveltyWithMaxScores") == 0 )
+      { 
+        is >> mostNoveltyWithMaxScores;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("findMaximumUncert") == 0 )
+      { 
+        is >> findMaximumUncert;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      //TODO maskedImg
+      else if ( tmp.compare("b_visualizeALimages") == 0 )
+      { 
+        is >> b_visualizeALimages;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      ///////////////////////////////
+      //     CLASSIFICATION STUFF  //
+      /////////////////////////////// 
+      else if ( tmp.compare("classifier") == 0 )
+      {
+        std::string isNull;
+        is >> isNull;
+        
+        // check whether we originally used a classifier
+        if ( isNull.compare( "NULL" ) == 0 )
+        {
+          if ( classifier != NULL )
+            delete classifier;
+          classifier = NULL;
+        }
+        else
+        {
+          if ( classifier == NULL )
+            classifier = new OBJREC::GPHIKClassifierNICE();
+
+          classifier->restore(is, format);              
+        }
+          
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("vclassifier") == 0 )
+      {
+        std::string isNull;
+        is >> isNull;
+        
+        // check whether we originally used a vclassifier
+        if ( isNull.compare( "NULL" ) == 0 )
+        {
+          if ( vclassifier != NULL )
+            delete vclassifier;
+          vclassifier = NULL;
+        }
+        else
+        {
+          fthrow ( NICE::Exception, "Restoring of VecClassifiers is not implemented yet!" );
+/*          if ( vclassifier == NULL )
+            vclassifier = new OBJREC::VecClassifier();
+
+          vclassifier->restore(is, format);  */          
+        }
+          
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("forbidden_classesTrain") == 0 )
+      {
+        is >> tmp; // size
+        int forbClTrainSize ( 0 );
+        is >> forbClTrainSize;
+
+        forbidden_classesTrain.clear();
+        
+        if ( b_restoreVerbose ) 
+          std::cerr << "restore forbidden_classesTrain with size: " << forbClTrainSize << std::endl;
+
+        if ( forbClTrainSize > 0 )
+        {
+          if ( b_restoreVerbose ) 
+            std::cerr << " restore forbidden_classesTrain" << std::endl;
+          
+          for (int i = 0; i < forbClTrainSize; i++)
+          {
+            int classNo;
+            is >> classNo;        
+            forbidden_classesTrain.insert ( classNo );
+          }
+        } 
+        else
+        {
+          if ( b_restoreVerbose ) 
+            std::cerr << " skip restoring forbidden_classesTrain" << std::endl;
+        }
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("forbidden_classesActiveLearning") == 0 )
+      {
+        is >> tmp; // size
+        int forbClALSize ( 0 );
+        is >> forbClALSize;
+
+        forbidden_classesActiveLearning.clear();
+        
+        if ( b_restoreVerbose ) 
+          std::cerr << "restore forbidden_classesActiveLearning with size: " << forbClALSize << std::endl;
+
+        if ( forbClALSize > 0 )
+        {
+          if ( b_restoreVerbose ) 
+            std::cerr << " restore forbidden_classesActiveLearning" << std::endl;
+          
+          for (int i = 0; i < forbClALSize; i++)
+          {
+            int classNo;
+            is >> classNo;        
+            forbidden_classesActiveLearning.insert ( classNo );
+          }
+        } 
+        else
+        {
+          if ( b_restoreVerbose ) 
+            std::cerr << " skip restoring forbidden_classesActiveLearning" << std::endl;
+        }
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("classesInUse") == 0 )
+      {
+        is >> tmp; // size
+        int clInUseSize ( 0 );
+        is >> clInUseSize;
+
+        classesInUse.clear();
+        
+        if ( b_restoreVerbose ) 
+          std::cerr << "restore classesInUse with size: " << clInUseSize << std::endl;
+
+        if ( clInUseSize > 0 )
+        {
+          if ( b_restoreVerbose ) 
+            std::cerr << " restore classesInUse" << std::endl;
+          
+          for (int i = 0; i < clInUseSize; i++)
+          {
+            int classNo;
+            is >> classNo;        
+            classesInUse.insert ( classNo );
+          }
+        } 
+        else
+        {
+          if ( b_restoreVerbose ) 
+            std::cerr << " skip restoring classesInUse" << std::endl;
+        }
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("numberOfClasses") == 0 )
+      { 
+        is >> numberOfClasses;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("read_classifier") == 0 )
+      { 
+        is >> read_classifier;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("save_classifier") == 0 )
+      { 
+        is >> save_classifier;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      } 
+      else if ( tmp.compare("cache") == 0 )
+      { 
+        is >> cache;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      else if ( tmp.compare("resultdir") == 0 )
+      { 
+        is >> resultdir;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      //TODO newTrainExamples
+      ///////////////////////////////
+      //     SEGMENTATION STUFF    //
+      ///////////////////////////////
+      //TODO regionSeg
+      else if ( tmp.compare("s_rsMethode") == 0 )
+      { 
+        is >> this->s_rsMethode;
+	// theoretically, we should properly store and restore the regionSeg object. However,  its parent class does not provide
+	// a Persistent interface yet. Hence, we perform this tiny workaround which works, since regionSeg is not changed over time...
+	// only be aware of parameters originally set via config...
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }
+      //NOTE regionSeg seems really important to keep track off
+      else if ( tmp.compare("reuseSegmentation") == 0 )
+      { 
+        is >> reuseSegmentation;
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }       
+      else if ( tmp.compare("queriedRegions") == 0 )
+      {
+        is >> tmp; // size
+        int queriedRegionsSize ( 0 );
+        is >> queriedRegionsSize;
+        queriedRegions.clear();
+
+        if ( b_restoreVerbose ) 
+          std::cerr << "restore queriedRegions with size: " << queriedRegionsSize << std::endl;
+        for ( int i = 0; i < queriedRegionsSize; i++ )
+        {
+	  // restore key
+          std::string key;
+          is >> key;
+	  
+	  // restore values -- inner loop over sets
+	  is >> tmp; // size
+	  int regionsOfImgSize ( 0 );
+	  is >> regionsOfImgSize;
+
+	  std::set< int > regionsOfImg;
+	  regionsOfImg.clear();
+	    
+	    for (int i = 0; i < regionsOfImgSize; i++)
+	    {
+	      int idxRegion;
+	      is >> idxRegion;        
+	      regionsOfImg.insert ( idxRegion );
+	    }
+          queriedRegions.insert ( std::pair<std::string, std::set< int > > ( key, regionsOfImg ) );
+        }
+        
+        is >> tmp; // end of block 
+        tmp = this->removeEndTag ( tmp );
+      }            
+      //
+      //TODO currentRegionToQuery
+      //
+      ///////////////////////////////
+      //       PARENT OBJECT       //
+      ///////////////////////////////
+      else if ( tmp.compare("SemSegNovelty--Parent") == 0 )
+      {
+        // restore parent object
+        SemanticSegmentation::restore(is);
+      }      
+      else
+      {
+      std::cerr << "WARNING -- unexpected SemSegNovelty object -- " << tmp << " -- for restoration... aborting" << std::endl;
+      throw;
+      }
+      
+      // INSTANTIATE (YET) NON-RESTORABLE OBJECTS
+      //TODO destructor of regionSeg is non-virtual so far - change this accordingly!
+      if ( this->regionSeg != NULL ) 
+	delete this->regionSeg;
+      
+      if( this->s_rsMethode == "none" )
+      {
+	this->regionSeg = NULL;
+      }
+      else
+      {
+	//NOTE using an empty config file might not be save...
+	NICE::Config tmpConfEmpty;
+	RegionSegmentationMethod *tmpRegionSeg = GenericRegionSegmentationMethodSelection::selectRegionSegmentationMethod( &tmpConfEmpty, this->s_rsMethode );    
+	if ( reuseSegmentation )
+	  this->regionSeg = new RSCache ( &tmpConfEmpty, tmpRegionSeg );
+	else
+	  this->regionSeg = tmpRegionSeg;
+      }      
+      
+      // done restoration
+    }
+  }
+  else
+  {
+    std::cerr << "SemSegNovelty::restore -- InStream not initialized - restoring not possible!" << std::endl;
+    throw;
+  }
+  
+  
+ 
+}
+
+void SemSegNovelty::store ( std::ostream & os, int format ) const
+{ 
+  if (os.good())
+  {
+    // show starting point
+    os << this->createStartTag( "SemSegNovelty" ) << std::endl;    
+    
+    ///////////////////////////////
+    //     FEATURE EXTRACTION    //
+    ///////////////////////////////
+    os << this->createStartTag( "featExtract" ) << std::endl;
+    featExtract->store ( os );
+    os << this->createStartTag( "featExtract" ) << std::endl; 
+    
+    os << this->createStartTag( "trainWsize" ) << std::endl;
+    os << this->trainWSize << std::endl;
+    os << this->createStartTag( "trainWsize" ) << std::endl;  
+    
+    os << this->createStartTag( "whs" ) << std::endl;
+    os << this->whs << std::endl;
+    os << this->createStartTag( "whs" ) << std::endl;
+    
+    os << this->createStartTag( "testWSize" ) << std::endl;
+    os << this->testWSize << std::endl;
+    os << this->createStartTag( "testWSize" ) << std::endl;      
+    
+    ///////////////////////////////
+    //     NOVELTY COMPUTATION   //
+    ///////////////////////////////
+    
+    os << this->createStartTag( "noveltyMethod" ) << std::endl;
+    os << this->noveltyMethod << std::endl;
+    os << this->createStartTag( "noveltyMethod" ) << std::endl;
+
+    os << this->createStartTag( "noveltyMethodString" ) << std::endl;
+    os << this->noveltyMethodString << std::endl;
+    os << this->createStartTag( "noveltyMethodString" ) << std::endl;
+    
+    os << this->createStartTag( "globalMaxUncert" ) << std::endl;
+    os << this->globalMaxUncert << std::endl;
+    os << this->createStartTag( "globalMaxUncert" ) << std::endl;
+
+    os << this->createStartTag( "mostNoveltyWithMaxScores" ) << std::endl;
+    os << this->mostNoveltyWithMaxScores << std::endl;
+    os << this->createStartTag( "mostNoveltyWithMaxScores" ) << std::endl;
+
+    os << this->createStartTag( "findMaximumUncert" ) << std::endl;
+    os << this->findMaximumUncert << std::endl;
+    os << this->createStartTag( "findMaximumUncert" ) << std::endl;    
+
+    //TODO maskedImg
+    
+    os << this->createStartTag( "b_visualizeALimages" ) << std::endl;
+    os << this->b_visualizeALimages << std::endl;
+    os << this->createStartTag( "b_visualizeALimages" ) << std::endl;  
+
+    
+    ///////////////////////////////
+    //     CLASSIFICATION STUFF  //
+    ///////////////////////////////
+    
+    os << this->createStartTag( "classifierString" ) << std::endl;
+    os << this->classifierString << std::endl;
+    os << this->createStartTag( "classifierString" ) << std::endl; 
+
+    os << this->createStartTag( "classifier" ) << std::endl;
+    if ( this->classifier != NULL )
+    {
+      os << "NOTNULL" << std::endl;
+      classifier->store ( os, format );
+    }
+    else
+    {
+     os << "NULL" << std::endl;
+    }
+    os << this->createEndTag( "classifier" ) << std::endl;
+    
+    //
+    
+    os << this->createStartTag( "vclassifier" ) << std::endl;
+    if ( this->classifier != NULL )
+    {
+      os << "NOTNULL" << std::endl;
+      vclassifier->store ( os, format );
+    }
+    else
+    {
+     os << "NULL" << std::endl;
+    }
+    os << this->createEndTag( "vclassifier" ) << std::endl;
+    
+    
+    os << this->createStartTag( "forbidden_classesTrain" ) << std::endl;
+    os << "size: " << forbidden_classesTrain.size() << std::endl;
+
+    for ( std::set< int >::const_iterator itForbClassTrain = forbidden_classesTrain.begin();
+          itForbClassTrain != forbidden_classesTrain.end();
+          itForbClassTrain++
+        )
+    {
+      os << *itForbClassTrain << " " << std::endl;
+    }   
+    os << this->createEndTag( "forbidden_classesTrain" ) << std::endl;
+    
+    //
+    
+    os << this->createStartTag( "forbidden_classesActiveLearning" ) << std::endl;
+    os << "size: " << forbidden_classesActiveLearning.size() << std::endl;
+
+    for ( std::set< int >::const_iterator itForbClassAL = forbidden_classesActiveLearning.begin();
+          itForbClassAL != forbidden_classesActiveLearning.end();
+          itForbClassAL++
+        )
+    {
+      os << *itForbClassAL << " " << std::endl;
+    }   
+    os << this->createEndTag( "forbidden_classesActiveLearning" ) << std::endl;     
+    
+    //
+    
+    os << this->createStartTag( "classesInUse" ) << std::endl;
+    os << "size: " << classesInUse.size() << std::endl;
+
+    for ( std::set< int >::const_iterator itClassesInUse = classesInUse.begin();
+          itClassesInUse != classesInUse.end();
+          itClassesInUse++
+        )
+    {
+      os << *itClassesInUse << " " << std::endl;
+    }   
+    os << this->createEndTag( "classesInUse" ) << std::endl;
+    
+    
+    os << this->createStartTag( "numberOfClasses" ) << std::endl;
+    os << this->numberOfClasses << std::endl;
+    os << this->createStartTag( "numberOfClasses" ) << std::endl;     
+    
+    
+    os << this->createStartTag( "read_classifier" ) << std::endl;
+    os << this->read_classifier << std::endl;
+    os << this->createStartTag( "read_classifier" ) << std::endl;     
+    
+    
+    os << this->createStartTag( "save_classifier" ) << std::endl;
+    os << this->save_classifier << std::endl;
+    os << this->createStartTag( "save_classifier" ) << std::endl;     
+    
+    
+    os << this->createStartTag( "cache" ) << std::endl;
+    os << this->cache << std::endl;
+    os << this->createStartTag( "cache" ) << std::endl;     
+    
+    
+    os << this->createStartTag( "resultdir" ) << std::endl;
+    os << this->resultdir << std::endl;
+    os << this->createStartTag( "resultdir" ) << std::endl;     
+
+    //TODO newTrainExamples    
+    
+    ///////////////////////////////
+    //     SEGMENTATION STUFF    //
+    ///////////////////////////////
+    
+    // theoretically, we should properly store and restore the regionSeg object. However,  its parent class does not provide
+    // a Persistent interface yet. Hence, we perform this tiny workaround which works, since regionSeg is not changed over time...
+    // only be aware of parameters originally set via config...    
+    os << this->createStartTag( "s_rsMethode" ) << std::endl;
+    os << this->s_rsMethode << std::endl;
+    os << this->createStartTag( "s_rsMethode" ) << std::endl;    
+    
+    os << this->createStartTag( "reuseSegmentation" ) << std::endl;
+    os << this->reuseSegmentation << std::endl;
+    os << this->createStartTag( "reuseSegmentation" ) << std::endl;    
+    
+    os << this->createStartTag( "queriedRegions" ) << std::endl;
+    os << "size: " << queriedRegions.size() << std::endl;
+    std::map< std::string, std::set< int > >::const_iterator itQueriedRegions = queriedRegions.begin();
+    for ( uint i = 0; i < queriedRegions.size(); i++ )
+    {
+      // store key
+      os << itQueriedRegions->first << std::endl;
+      
+      // store values -- inner loop over sets
+      os << "size: " << ( itQueriedRegions->second ).size() << std::endl;
+
+      for ( std::set< int >::const_iterator itRegionsOfImg = ( itQueriedRegions->second ).begin();
+	    itRegionsOfImg != ( itQueriedRegions->second ).end();
+	    itRegionsOfImg++
+	  )
+      {
+	os << *itRegionsOfImg << " " << std::endl;
+      }       
+      
+      itQueriedRegions++;
+    } 
+    os << this->createStartTag( "queriedRegions" ) << std::endl;
+    //
+    //TODO currentRegionToQuery
+
+    
+    
+    ///////////////////////////////
+    //       PARENT OBJECT       //
+    ///////////////////////////////
+    os << this->createStartTag( "SemSegNovelty--Parent" ) << std::endl;
+    SemanticSegmentation::store(os);  
+    os << this->createStartTag( "SemSegNovelty--Parent" ) << std::endl;
+    
+    
+    // done
+    os << this->createEndTag( "SemSegNovelty" ) << std::endl;    
+  }
+  else
+  {
+    std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
+  }
+}
+
+void SemSegNovelty::clear ()
+{
+ //TODO
+}

+ 308 - 0
semseg/SemSegNovelty.h

@@ -0,0 +1,308 @@
+/**
+ * @file SemSegNovelty.h
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich, Alexander Freytag
+ * @date 04/24/2009
+ */
+#ifndef _NICE_SEMSEGNOVELTYINCLUDE
+#define _NICE_SEMSEGNOVELTYINCLUDE
+
+
+// nice-core includes
+#include <core/basics/Persistent.h>
+
+// nice-vislearning includes
+#include <vislearning/classifier/classifierbase/FeaturePoolClassifier.h>
+#include <vislearning/classifier/genericClassifierSelection.h>
+#include <vislearning/features/localfeatures/LocalFeatureColorWeijer.h>
+
+// nice-segmentation includes
+#include <segmentation/RegionSegmentationMethod.h>
+
+// nice-semseg includes
+#include "SemanticSegmentation.h"
+#include "SemSegTools.h"
+
+
+
+/** @brief pixelwise labeling systems */
+
+namespace OBJREC {
+
+class SemSegNovelty : public SemanticSegmentation
+{
+
+  protected:
+    
+    /////////////////////////
+    /////////////////////////
+    // PROTECTED VARIABLES //
+    /////////////////////////
+    /////////////////////////    
+    
+    ////////////////////////////////////////
+    // variables only setable via configfile
+    ////////////////////////////////////////
+  
+    
+    ///////////////////////////////
+    //     FEATURE EXTRACTION    //
+    ///////////////////////////////  
+
+    //! feature extraction
+    LocalFeatureColorWeijer *featExtract;     
+    
+    //! distance between features for training
+    int trainWSize;
+    
+    //! half of the window size for local features
+    int whs;
+    
+    //! rectangle size for classification, 1 means pixelwise
+    int testWSize;
+    
+    
+    ///////////////////////////////
+    //     NOVELTY COMPUTATION   //
+    ///////////////////////////////
+    
+    enum NoveltyMethod{
+      GPVARIANCE, // novel = large variance
+      GPUNCERTAINTY, //novel = small uncertainty (mean / var)
+      GPMINMEAN,  //novel = small mean
+      GPMEANRATIO,  //novel = small difference between mean of most plausible class and mean of snd
+                   //        most plausible class (not useful in binary settings)
+      GPWEIGHTALL, // novel = large weight in alpha vector after updating the model (can be predicted exactly)
+      GPWEIGHTRATIO, // novel = small difference between weights for alpha vectors with assumptions of GT label to be the most 
+                    //         plausible against the second most plausible class
+      RANDOM        // query regions randomly
+    }; 
+    
+    //! specify how "novelty" shall be computed, e.g., using GP-variance, GP-uncertainty, or predicted weight entries
+    NoveltyMethod noveltyMethod;
+    std::string noveltyMethodString;
+    
+    //! maximum uncertainty over all images, i.e., the novelty score of the most "novel" region of all test images
+    double globalMaxUncert;
+    
+    //! determine whether a "novelty" method computes large scores for novel objects (e.g., variance), or small scores (e.g., min abs mean)
+    bool mostNoveltyWithMaxScores;
+    
+    //! find the maximum uncertainty or not within the whole test set
+    bool findMaximumUncert;
+    
+    //! image with most uncertain region
+    NICE::ColorImage maskedImg;
+    
+    //! for debugging and visualization: show novelty images with and without region segmentation and the most novel region
+    bool b_visualizeALimages;      
+    
+    
+    ///////////////////////////////
+    //     CLASSIFICATION STUFF  //
+    /////////////////////////////// 
+    
+    //! just store the name of our classifier
+    // Theoretically redundant, but currently makes things easier for store and restore...
+    std::string classifierString;
+    
+    //! Classifier
+    FeaturePoolClassifier *classifier;
+    VecClassifier *vclassifier; 
+    
+    //! set of forbidden/background classes for the initial training
+    std::set<int> forbidden_classesTrain;
+    //! set of forbidden/background classes for the whole process of learning over time
+    std::set<int> forbidden_classesActiveLearning;
+    //! store the class numbers currently used
+    std::set<int> classesInUse;
+    
+   //! obviously, the number of classes used for training (i.e., classesInUse.size() )
+    int numberOfClasses;
+    
+    //! boolean whether to read the initial classifier from a file. If not, training will be performed
+    bool read_classifier;
+    
+    //! boolean whether to save the final classifier or not
+    bool save_classifier;
+
+    //! The cached Data
+    std::string cache; 
+    
+   //! where to save the resulting images (uncertainty and classification results)
+    std::string resultdir;    
+        
+    
+    //! current examples for most uncertain region
+    Examples newTrainExamples;    
+
+    NICE::MultiChannelImageT<double> m_CurrentImageFeatures;
+    ///////////////////////////////
+    //     SEGMENTATION STUFF    //
+    /////////////////////////////// 
+    
+    //! just store the name of our segmentation method. 
+    // Theoretically redundant, but currently makes things easier for store and restore...
+    std::string s_rsMethode;
+    
+    //! low level Segmentation method
+    RegionSegmentationMethod *regionSeg;
+
+    //! boolean whether to reuse segmentation results for single images in different runs
+    bool reuseSegmentation;    
+
+    
+    //! contains filenames of images and indices of contained regions, that where already queried, to prevent them from being queried again
+    std::map<std::string,std::set<int> > queriedRegions;
+    
+    std::pair<std::string, int> currentRegionToQuery;
+    
+
+    ///////////////////////////////
+    //     protected methods
+    ///////////////////////////////
+    
+  
+    inline void computeClassificationResults( const NICE::MultiChannelImageT<double> & feats, 
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                                    const int & xsize,
+                                                    const int & ysize,
+                                                    const int & featdim );
+
+   void computeNoveltyByRandom(         NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );    
+    
+   void computeNoveltyByVariance(       NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );
+   
+   void computeNoveltyByGPUncertainty ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );
+   
+   void computeNoveltyByGPMean        ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPMeanRatio   ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPWeightAll   ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPWeightRatio ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );     
+   
+  public:
+
+    /** 
+     * @brief default constructor
+     * @author Alexander Freytag
+     * @date 06-02-2014 ( dd-mm-yyyy )
+     */
+    SemSegNovelty ( );    
+    
+    /** 
+     * @brief recommended constructor
+     * @author Alexander Freytag
+     *  @param conf needs a configfile
+     *  @param md and a MultiDataset (contains images and other things)
+     */
+    SemSegNovelty ( const NICE::Config *conf, const MultiDataset *md );
+
+    /** simple destructor */
+    virtual ~SemSegNovelty();
+    
+    void initFromConfig ( const NICE::Config * conf, const std::string _confSection = "SemSegNovelty" );
+
+    /** The trainingstep
+      *  @param md and a MultiDataset (contains images and other things)
+      */
+    void train ( const MultiDataset *md );
+
+    /** The main procedure. Input: Image, Output: Segmented Image with pixelwise labeles and the probabilities
+      * @param ce image data
+      * @param segresult result of the semantic segmentation with a label for each pixel
+      * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+      */
+    void semanticseg ( CachedExample *ce,
+                       NICE::Image & segresult,
+                       NICE::MultiChannelImageT<double> & probabilities );
+    
+    /**
+     * @brief visualize a specific region in the original image
+     *
+     * @param img input image
+     * @param regions map of the regions
+     * @param region visualize this region
+     * @param outimage result
+     * @return void
+     **/
+    void visualizeRegion(const NICE::ColorImage &img, const NICE::Matrix &regions, int region, NICE::ColorImage &outimage);
+
+    /**
+     * @brief Add a new example to the known training data
+     *
+     * @param newExample (NICE::Vector) the feature vector of the new examples
+     * @param newClassNo (int) the corresponding GT class number
+     * @return void
+     **/    
+    void addNewExample(const NICE::Vector & newExample, const int & newClassNo);
+    
+    /**
+     * @brief Add those examples, which belong to the most novel region seen so far
+     *
+     * @return void
+     **/    
+    virtual void addNovelExamples();    
+
+    /**
+     * @brief Get a pointer to the examples extracted from the most novel region seen so far
+     *
+     * @return Examples *
+     **/        
+    virtual const Examples * getNovelExamples() const; 
+    
+    ///////////////////// INTERFACE PERSISTENT /////////////////////
+    // interface specific methods for store and restore
+    ///////////////////// INTERFACE PERSISTENT /////////////////////   
+    
+    /** 
+     * @brief Load active-segmentation-object from external file (stream)
+     * @author Alexander Freytag
+     */     
+    virtual void restore ( std::istream & is, int format = 0 );
+    
+    /** 
+     * @brief Save active-segmentation-object to external file (stream)
+     * @author Alexander Freytag
+     */       
+    virtual void store( std::ostream & os, int format = 0 ) const;
+    
+    /** 
+     * @brief Clear active-segmentation-object object
+     * @author Alexander Freytag
+     */    
+    virtual void clear ();
+    
+};
+
+} //namespace
+
+#endif //_NICE_SEMSEGNOVELTYINCLUDE

+ 1527 - 0
semseg/SemSegNoveltyBinary.cpp

@@ -0,0 +1,1527 @@
+#include <sstream>
+#include <iostream>
+
+#include "SemSegNoveltyBinary.h"
+
+#include <core/image/FilterT.h>
+#include <core/basics/numerictools.h>
+#include <core/basics/StringTools.h>
+#include <core/basics/Timer.h>
+
+#include <gp-hik-exp/GPHIKClassifierNICE.h>
+#include <vislearning/baselib/ICETools.h>
+#include <vislearning/baselib/Globals.h>
+#include <vislearning/features/fpfeatures/SparseVectorFeature.h>
+
+#include "segmentation/GenericRegionSegmentationMethodSelection.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+SemSegNoveltyBinary::SemSegNoveltyBinary ( const Config *conf,
+                               const MultiDataset *md )
+    : SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
+{
+  this->conf = conf;
+
+  globalMaxUncert = -numeric_limits<double>::max();
+  
+  string section = "SemSegNoveltyBinary";
+
+  featExtract = new LocalFeatureColorWeijer ( conf );
+
+  this->reuseSegmentation = conf->gB ( "FPCPixel", "reuseSegmentation", true ); //save and read segmentation results from files
+  this->save_classifier = conf->gB ( "FPCPixel", "save_classifier", true ); //save the classifier to a file
+  this->read_classifier = conf->gB ( "FPCPixel", "read_classifier", false ); //read the classifier from a file
+
+  //write uncertainty results in the same folder as done for the segmentation results
+  resultdir = conf->gS("debug", "resultdir", "result");
+  cache = conf->gS ( "cache", "root", "" );
+  
+  
+  //stupid work around of the const attribute
+  Config confCopy = *conf;
+  
+  //just to make sure, that we do NOT perform an optimization after every iteration step
+  //this would just take a lot of time, which is not desired so far
+  confCopy.sB("ClassifierGPHIK","performOptimizationAfterIncrement",false);
+  
+  classifierString = conf->gS ( section, "classifier", "ClassifierGPHIK" );  
+  classifier = NULL;
+  vclassifier = NULL;
+  if ( classifierString.compare("ClassifierGPHIK") == 0)
+    classifier = new GPHIKClassifierNICE ( &confCopy, "ClassifierGPHIK" );
+  else
+    vclassifier = GenericClassifierSelection::selectVecClassifier ( conf, classifierString );
+  
+
+
+  findMaximumUncert = conf->gB(section, "findMaximumUncert", true);
+  whs = conf->gI ( section, "window_size", 10 );
+  //distance to next descriptor during training
+  trainWsize = conf->gI ( section, "train_window_size", 10 );
+  //distance to next descriptor during testing
+  testWSize = conf->gI (section, "test_window_size", 10);
+  // select your segmentation method here
+  string rsMethode = conf->gS ( section, "segmentation", "none" );
+ 
+  if(rsMethode == "none")
+  {
+    regionSeg = NULL;
+  }
+  else
+  {
+    RegionSegmentationMethod *tmpRegionSeg = GenericRegionSegmentationMethodSelection::selectRegionSegmentationMethod(conf, rsMethode);    
+    if ( reuseSegmentation )
+      regionSeg = new RSCache ( conf, tmpRegionSeg );
+    else
+      regionSeg = tmpRegionSeg;
+  }
+  
+  cn = md->getClassNames ( "train" );
+
+  if ( read_classifier )
+  {
+    try
+    {
+      if ( classifier != NULL )
+      {
+        string classifierdst = "/classifier.data";        
+        fprintf ( stderr, "SemSegNoveltyBinary:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );        
+        classifier->read ( cache + classifierdst );
+      }
+      else
+      {
+        string classifierdst = "/veccl.data";        
+        fprintf ( stderr, "SemSegNoveltyBinary:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );          
+        vclassifier->read ( cache + classifierdst );      
+      }
+      
+
+      fprintf ( stderr, "SemSegNoveltyBinary:: successfully read\n" );
+    }
+    catch ( char *str )
+    {
+      cerr << "error reading data: " << str << endl;
+    }
+  }
+  else
+  {
+    train ( md );
+  }
+  
+  //define which measure for "novelty" we want to use
+  noveltyMethodString = conf->gS( section,  "noveltyMethod", "gp-variance");
+  if (noveltyMethodString.compare("gp-variance") == 0)  // novel = large variance
+  {
+    this->noveltyMethod = GPVARIANCE;
+    this->mostNoveltyWithMaxScores = true;
+  }
+  else if (noveltyMethodString.compare("gp-uncertainty") == 0) //novel = large uncertainty (mean / var)
+  {
+    this->noveltyMethod = GPUNCERTAINTY;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  } 
+  else if (noveltyMethodString.compare("gp-mean") == 0) //novel = small mean
+  {
+    this->noveltyMethod = GPMINMEAN;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  }
+  else if (noveltyMethodString.compare("gp-meanRatio") == 0)  //novel = small difference between mean of most plausible class and mean of snd
+                                                              //        most plausible class (not useful in binary settings)
+  {
+    this->noveltyMethod = GPMEANRATIO;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  }
+  else if (noveltyMethodString.compare("gp-weightAll") == 0) // novel = large weight in alpha vector after updating the model (can be predicted exactly)
+  {
+    this->noveltyMethod = GPWEIGHTALL;
+    this->mostNoveltyWithMaxScores = true;
+  }
+  else if (noveltyMethodString.compare("gp-weightRatio") == 0) // novel = small difference between weights for alpha vectors 
+                                                               //     with assumptions of GT label to be the most 
+                                                               //     plausible against the second most plausible class   
+  {
+    this->noveltyMethod = GPWEIGHTRATIO;
+    this->mostNoveltyWithMaxScores = false;
+    globalMaxUncert = numeric_limits<double>::max();
+  }
+  else if (noveltyMethodString.compare("random") == 0) 
+  {
+     initRand(); 
+     this->noveltyMethod = RANDOM;
+  }
+  else
+  {
+    this->noveltyMethod = GPVARIANCE;
+    this->mostNoveltyWithMaxScores = true;
+  }
+  
+  //we don't have queried any region so far
+  queriedRegions.clear();
+  visualizeALimages = conf->gB(section, "visualizeALimages", false);
+  
+  resultsOfSingleRun.clear();
+  
+  write_results = conf->gB( "debug", "write_results", false );
+}
+
+SemSegNoveltyBinary::~SemSegNoveltyBinary()
+{
+  if(newTrainExamples.size() > 0)
+  {
+    // show most uncertain region
+    if (visualizeALimages)
+      showImage(maskedImg);
+    
+    //incorporate new information into the classifier
+    if (classifier != NULL)
+      classifier->addMultipleExamples(newTrainExamples);
+    
+    //store the classifier, such that we can read it again in the next round (if we like that)
+    classifier->save ( cache + "/classifier.data" );
+  }
+  
+  // clean-up
+  if ( classifier != NULL )
+    delete classifier;
+  if ( vclassifier != NULL )
+    delete vclassifier;
+  if ( featExtract != NULL )
+    delete featExtract;
+}
+
+void SemSegNoveltyBinary::visualizeRegion(const NICE::ColorImage &img, const NICE::Matrix &regions, int region, NICE::ColorImage &outimage)
+{
+  std::vector<uchar> color;
+  color.push_back(255);
+  color.push_back(0);
+  color.push_back(0);
+    
+  int width = img.width();
+  int height = img.height();
+  
+  outimage.resize(width,height);
+  
+  for(int y = 0; y < height; y++)
+  {
+    for(int x = 0; x < width; x++)
+    {
+      if(regions(x,y) == region)
+      {
+        for(int c = 0; c < 3; c++)
+        {
+          outimage(x,y,c) = color[c];
+        }
+      }
+      else
+      {
+        for(int c = 0; c < 3; c++)
+        {
+          outimage(x,y,c) = img(x,y,c);
+        }
+      }
+    }
+  }
+}
+
+void SemSegNoveltyBinary::train ( const MultiDataset *md )
+{
+  const LabeledSet train = * ( *md ) ["train"];
+  const LabeledSet *trainp = &train;
+
+  ////////////////////////
+  // feature extraction //
+  ////////////////////////
+ 
+  //check the same thing for the training classes - this is very specific to our setup 
+  std::string forbidden_classesTrain_s = conf->gS ( "analysis", "donttrainTrain", "" );
+  if ( forbidden_classesTrain_s == "" )
+  {
+    forbidden_classesTrain_s = conf->gS ( "analysis", "forbidden_classesTrain", "" );
+  }
+  cn.getSelection ( forbidden_classesTrain_s, forbidden_classesTrain );
+  
+  //check whether we have a single positive class
+  std::string positiveClass_s = conf->gS ( "SemSegNoveltyBinary", "positiveClass", "" );
+  std::set<int> positiveClassNumberTmp;
+  cn.getSelection ( positiveClass_s, positiveClassNumberTmp );  
+
+  std::cerr << "BINARY SETTING ENABLED! " << std::endl;
+  switch ( positiveClassNumberTmp.size() )
+  {
+    case 0:
+    {
+      positiveClass = 0;
+      std::cerr << "no positive class given, assume 0 as positive class" << std::endl;
+      break;
+    }
+    case 1:
+    {
+      positiveClass = *(positiveClassNumberTmp.begin());
+      std::cerr << "positive class will be number" << positiveClass << " with the name: " << positiveClass_s << std::endl;
+      break;
+    }
+    default:
+    {
+      //we specified more than a single positive class. right now, this is not what we are interested in, but 
+      //in theory we could also accept this and convert positiveClass into a set of ints of possible positive classes
+      positiveClass = 0;
+      std::cerr << "no positive class given, assume 0 as positive class" << std::endl;
+      break;
+    }
+  }  
+  std::cerr << "============================" << std::endl << std::endl;  
+
+
+  ProgressBar pb ( "Local Feature Extraction" );
+  pb.show();
+
+  int imgnb = 0;
+
+  Examples examples;
+  examples.filename = "training";
+
+  int featdim = -1;
+
+  classesInUse.clear();  
+  
+  LOOP_ALL_S ( *trainp )
+  {
+    //EACH_S(classno, currentFile);
+    EACH_INFO ( classno, info );
+
+    std::string currentFile = info.img();
+
+    CachedExample *ce = new CachedExample ( currentFile );
+    
+    const LocalizationResult *locResult = info.localization();
+    if ( locResult->size() <= 0 )
+    {
+      fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
+                currentFile.c_str() );
+      continue;
+    }
+
+    int xsize, ysize;
+    ce->getImageSize ( xsize, ysize );
+
+    Image labels ( xsize, ysize );
+    labels.set ( 0 );
+    locResult->calcLabeledImage ( labels, ( *classNames ).getBackgroundClass() );
+
+    NICE::ColorImage img;
+    try {
+      img = ColorImage ( currentFile );
+    } catch ( Exception ) {
+      cerr << "SemSegNoveltyBinary: error opening image file <" << currentFile << ">" << endl;
+      continue;
+    }
+
+    Globals::setCurrentImgFN ( currentFile );
+
+    MultiChannelImageT<double> feats;
+
+    // extract features
+    featExtract->getFeats ( img, feats );
+    featdim = feats.channels();
+    feats.addChannel(featdim);
+
+    for (int c = 0; c < featdim; c++)
+    {
+      ImageT<double> tmp = feats[c];
+      ImageT<double> tmp2 = feats[c+featdim];
+
+      NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
+    }
+    featdim += featdim;
+
+    // compute integral images
+    for ( int c = 0; c < featdim; c++ )
+    {
+      feats.calcIntegral ( c );
+    }
+
+    for ( int y = 0; y < ysize; y += trainWsize)
+    {
+      for ( int x = 0; x < xsize; x += trainWsize )
+      {
+
+        int classnoTmp = labels.getPixel ( x, y );
+        
+        if ( forbidden_classesTrain.find ( classnoTmp ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+        
+        if (classesInUse.find(classnoTmp) == classesInUse.end())
+        {
+          classesInUse.insert(classnoTmp);
+        }
+        
+        Example example;
+        example.vec = NULL;
+        example.svec = new SparseVector ( featdim );
+        for ( int f = 0; f < featdim; f++ )
+        {
+          double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+          if ( val > 1e-10 )
+            ( *example.svec ) [f] = val;
+        }
+
+        example.svec->normalize();
+
+        example.position = imgnb;
+        if ( classnoTmp == positiveClass )
+          examples.push_back ( pair<int, Example> ( 1, example ) );
+        else
+          examples.push_back ( pair<int, Example> ( 0, example ) );
+      }
+    }
+ 
+    
+    
+
+    delete ce;
+    imgnb++;
+    pb.update ( trainp->count() );
+  }
+  
+    
+  numberOfClasses = classesInUse.size();
+  std::cerr << "numberOfClasses: " << numberOfClasses << std::endl;  
+  std::cerr << "classes in use: " << std::endl;
+  for (std::set<int>::const_iterator it = classesInUse.begin(); it != classesInUse.end(); it++)
+  {
+    std::cerr << *it << " : " <<  cn.text(*it) <<  " ";
+  }    
+  std::cerr << std::endl;
+
+  pb.hide();
+
+
+  //////////////////////
+  // train classifier //
+  //////////////////////
+  FeaturePool fp;
+
+  Feature *f = new SparseVectorFeature ( featdim );
+
+  f->explode ( fp );
+  delete f;
+
+  if ( classifier != NULL )
+  {
+    std::cerr << "train FP-classifier with " << examples.size() << " examples" << std::endl;
+    classifier->train ( fp, examples );
+    std::cerr << "training finished" << std::endl;
+  }
+  else
+  {
+    LabeledSetVector lvec;
+    convertExamplesToLSet ( examples, lvec );
+    vclassifier->teach ( lvec );
+//     if ( usegmm )
+//       convertLSetToSparseExamples ( examples, lvec );
+//     else
+    std::cerr << "classifierString: " << classifierString << std::endl;
+    if (this->classifierString.compare("nn") == 0)
+    {
+      convertLSetToExamples ( examples, lvec, true /* only remove pointers to the data in the LSet-struct*/);
+    }
+    else
+    {
+      convertLSetToExamples ( examples, lvec, false /* remove all training examples of the LSet-struct */);
+    }
+    vclassifier->finishTeaching();
+  }  
+
+  fp.destroy();
+
+  if ( save_classifier )
+  {
+    if ( classifier != NULL )
+      classifier->save ( cache + "/classifier.data" );
+    else
+      vclassifier->save ( cache + "/veccl.data" );    
+  }
+
+  ////////////
+  //clean up//
+  ////////////
+  for ( int i = 0; i < ( int ) examples.size(); i++ )
+  {
+    examples[i].second.clean();
+  }
+  examples.clear();
+
+  cerr << "SemSeg training finished" << endl;
+}
+
+
+void SemSegNoveltyBinary::semanticseg ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
+{  
+  Timer timer;
+  timer.start();
+  
+  //segResult contains the GT labels when this method is called
+  // we simply store them in labels, to have an easy access to the GT information lateron
+  Image labels = segresult;
+  //just to be sure that we do not have a GT-biased result :)
+  segresult.set(0);
+
+  int featdim = -1;
+
+  std::string currentFile = Globals::getCurrentImgFN();
+
+
+  int xsize, ysize;
+  ce->getImageSize ( xsize, ysize );
+
+  probabilities.reInit( xsize, ysize, 2);
+  probabilities.setAll ( 0.0 );
+   
+  NICE::ColorImage img;
+  try {
+    img = ColorImage ( currentFile );
+  } catch ( Exception ) {
+    cerr << "SemSegNoveltyBinary: error opening image file <" << currentFile << ">" << endl;
+    return;
+  }
+
+  MultiChannelImageT<double> feats;
+
+  // extract features
+  featExtract->getFeats ( img, feats );
+  featdim = feats.channels();
+  feats.addChannel(featdim);
+
+  for (int c = 0; c < featdim; c++)
+  {
+    ImageT<double> tmp = feats[c];
+    ImageT<double> tmp2 = feats[c+featdim];
+
+    NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
+  }
+  featdim += featdim;
+
+  // compute integral images
+  for ( int c = 0; c < featdim; c++ )
+  {
+    feats.calcIntegral ( c );
+  }
+  
+  timer.stop();
+  std::cout << "AL time for preparation: " << timer.getLastAbsolute() << std::endl;
+    
+  timer.start();
+  //classification results currently only needed to be computed separately if we use the vclassifier, i.e., the nearest neighbor used 
+  // for the "novel feature learning" approach
+  //in all other settings, such as active sem seg in general, we do this within the novelty-computation-methods
+  if ( classifier == NULL )
+  {
+    this->computeClassificationResults( feats, segresult, probabilities, xsize, ysize, featdim);
+  }
+//   timer.stop();
+//   
+//   std::cerr << "classification results computed" << std::endl;
+  
+  FloatImage noveltyImage ( xsize, ysize );
+  noveltyImage.set ( 0.0 );  
+  
+  switch (noveltyMethod)
+  {
+    case GPVARIANCE:
+    {
+         this->computeNoveltyByVariance( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;
+    }
+    case GPUNCERTAINTY:
+    {
+         this->computeNoveltyByGPUncertainty( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPMINMEAN:
+    {
+         std::cerr << "compute novelty using the minimum mean" << std::endl;
+         this->computeNoveltyByGPMean( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPMEANRATIO:
+    {
+         this->computeNoveltyByGPMeanRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPWEIGHTALL:
+    {
+         this->computeNoveltyByGPWeightAll( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPWEIGHTRATIO:
+    {
+         this->computeNoveltyByGPWeightRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }    
+    case RANDOM:
+    {
+         this->computeNoveltyByRandom( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;               
+    }
+    default:
+    {
+         //do nothing, keep the image constant to 0.0
+         break;
+    }
+         
+  }
+  
+  timer.stop();
+  std::cout << "AL time for novelty score computation: " << timer.getLastAbsolute() << std::endl;
+
+  if ( write_results || visualizeALimages )
+  {
+    ColorImage imgrgbTmp (xsize, ysize);
+    ICETools::convertToRGB ( noveltyImage, imgrgbTmp );  
+  
+    this->cn.labelToRGB( segresult, imgrgbTmp );  
+    
+    if ( write_results )
+    {
+      std::stringstream out;
+      std::vector< std::string > list2;
+      StringTools::split ( currentFile, '/', list2 );
+      out << resultdir << "/" << list2.back();
+//       std::cerr << "writing to " << out.str() + "_run_" +  NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+"_unsmoothed.rawfloat" << std::endl;
+    
+      noveltyImage.writeRaw("run_" +  NICE::intToString(this->iterationCountSuffix) + "_" + out.str() + "_" + noveltyMethodString+"_unsmoothed.rawfloat");
+      
+    }
+    
+    if (visualizeALimages)
+    {
+        showImage(imgrgbTmp, "Novelty Image without Region Segmentation");       
+        showImage(imgrgbTmp, "Classification Result without Region Segmentation");        
+    }    
+  }
+
+  
+    
+  timer.start();
+  
+  //Regionen ermitteln
+  if(regionSeg != NULL)
+  {
+    NICE::Matrix mask;
+    int amountRegions = regionSeg->segRegions ( img, mask );
+    
+    //compute probs per region
+    std::vector<std::vector<double> > regionProb(amountRegions, std::vector<double>(probabilities.channels(),0.0));
+    std::vector<double> regionNoveltyMeasure (amountRegions, 0.0);
+
+    std::vector<int> regionCounter(amountRegions, 0);
+    std::vector<int> regionCounterNovelty(amountRegions, 0);
+    for ( int y = 0; y < ysize; y += trainWsize) //y++)
+    {
+      for (int x = 0; x < xsize; x += trainWsize) //x++)
+      {
+        int r = mask(x,y);
+        regionCounter[r]++;
+        for(int j = 0; j < probabilities.channels(); j++)
+        {
+          regionProb[r][j] += probabilities ( x, y, j );
+        }
+        
+        if ( forbidden_classesActiveLearning.find( labels(x,y) ) == forbidden_classesActiveLearning.end() )
+        {
+          //count the amount of "novelty" for the corresponding region
+          regionNoveltyMeasure[r] += noveltyImage(x,y);
+          regionCounterNovelty[r]++;
+        }
+      }
+    }
+       
+    //find best class per region
+    std::vector<int> bestClassPerRegion(amountRegions,0);
+    
+    double maxNoveltyScore = -numeric_limits<double>::max();
+    if (!mostNoveltyWithMaxScores)
+    {
+      maxNoveltyScore = numeric_limits<double>::max();
+    }   
+    
+    int maxUncertRegion = -1;
+    
+    //loop over all regions and compute averaged novelty scores
+    for(int r = 0; r < amountRegions; r++)
+    {
+      
+      //check for the most plausible class per region
+      double maxval = -numeric_limits<double>::max();
+      
+      //loop over all classes
+      for(int c = 0; c < probabilities.channels(); c++)
+      {
+        regionProb[r][c] /= regionCounter[r];
+        
+        if(  (maxval < regionProb[r][c]) ) //&& (regionProb[r][c] != 0.0) ) 
+        {        
+              maxval = regionProb[r][c];
+              bestClassPerRegion[r] = c;
+        }
+      }
+       
+      //if the region only contains unvalid information (e.g., background) skip it
+      if (regionCounterNovelty[r] == 0)
+      {
+        continue;
+      }
+      
+      //normalize summed novelty scores to region size
+      regionNoveltyMeasure[r] /= regionCounterNovelty[r];
+    
+      //did we find a region that has a higher score as the most novel region known so far within this image?
+      if(   (  mostNoveltyWithMaxScores && (maxNoveltyScore < regionNoveltyMeasure[r]) )    // if we look for large novelty scores, e.g., variance
+        || ( !mostNoveltyWithMaxScores && (maxNoveltyScore > regionNoveltyMeasure[r]) ) )  // if we look for small novelty scores, e.g., min mean
+      {
+                   //did we already query a region of this image?                --   and it was this specific region
+        if ( (queriedRegions.find( currentFile ) != queriedRegions.end() ) && ( queriedRegions[currentFile].find(r) != queriedRegions[currentFile].end() ) )
+        {
+          continue;
+        }
+        else //only accept the region as novel if we never queried it before
+        {
+          maxNoveltyScore = regionNoveltyMeasure[r];
+          maxUncertRegion = r;        
+        }
+
+      }
+
+    }
+    
+    // after finding the most novel region for the current image, check whether this region is also the most novel with respect
+    // to all previously seen test images
+    // if so, store the corresponding features, since we want to "actively" query them to incorporate useful information
+    if(findMaximumUncert)
+    {
+      if(    (   mostNoveltyWithMaxScores && (maxNoveltyScore > globalMaxUncert) )
+          || (  !mostNoveltyWithMaxScores && (maxNoveltyScore < globalMaxUncert) ) )
+      {
+        //current most novel region of the image has "higher" novelty score then previous most novel region of all test images worked on so far
+        // -> save new important features of this region
+        Examples examples;
+        for ( int y = 0; y < ysize; y += trainWsize )
+        {
+          for ( int x = 0; x < xsize; x += trainWsize)
+          {
+            if(mask(x,y) == maxUncertRegion)
+            {
+              int classnoTmp = labels(x,y);
+              if ( forbidden_classesActiveLearning.find(classnoTmp) != forbidden_classesActiveLearning.end() )
+                continue;
+              
+              Example example;
+              example.vec = NULL;
+              example.svec = new SparseVector ( featdim );
+              
+              for ( int f = 0; f < featdim; f++ )
+              {
+                double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+                if ( val > 1e-10 )
+                  ( *example.svec ) [f] = val;
+              }
+              example.svec->normalize();
+              if ( classnoTmp == positiveClass )
+                examples.push_back ( pair<int, Example> ( 1, example ) );
+              else
+                examples.push_back ( pair<int, Example> ( 0, example ) );
+            }
+          }
+        }
+        
+        if(examples.size() > 0)
+        {
+          std::cerr << "found " << examples.size() << " new examples in the queried region" << std::endl << std::endl;
+          newTrainExamples.clear();
+          newTrainExamples = examples;
+          globalMaxUncert = maxNoveltyScore;
+          //prepare for later visualization
+          visualizeRegion(img,mask,maxUncertRegion,maskedImg);
+        }
+        else
+        {
+          std::cerr << "the queried region has no valid information" << std::endl << std::endl;
+        }
+        
+        //save filename and region index
+        currentRegionToQuery.first = currentFile;
+        currentRegionToQuery.second = maxUncertRegion;
+      }
+    }
+
+    //write back best results per region
+    //i.e., write normalized novelty scores for every region into the novelty image
+    for ( int y = 0; y < ysize; y++)
+    {
+      for (int x = 0; x < xsize; x++)
+      {
+        int r = mask(x,y);
+        for(int j = 0; j < probabilities.channels(); j++)
+        {
+          probabilities ( x, y, j ) = regionProb[r][j];
+        }
+        if ( bestClassPerRegion[r] == 0 )
+          segresult(x,y) = positiveClass;
+        else //take the various class as negative
+          segresult(x,y) = 22; //bestClassPerRegion[r];
+        
+        // write novelty scores for every segment into the "final" image
+        noveltyImage(x,y) = regionNoveltyMeasure[r];
+      }
+    }
+    
+    //compute these nice Classification results
+    for ( int y = 0; y < ysize; y++)
+    {
+      for (int x = 0; x < xsize; x++)
+      {
+        OBJREC::FullVector scoresTmp (2);
+        scoresTmp[1] = probabilities ( x, y, 0 ); //probabilities[0] == negative class == scores[1]
+        scoresTmp[0] = probabilities ( x, y, 1 ); //probabilities[1] == positive class == scores[0]
+        
+        int cno = scoresTmp[1] > 0 ? 1 : 0;
+
+        ClassificationResult cr ( cno/*doesn't matter*/, scoresTmp );
+        
+        if ( labels(x,y) == positiveClass )
+          cr.classno_groundtruth = 1;
+        else
+          cr.classno_groundtruth = 0;
+        
+        resultsOfSingleRun.push_back(cr);        
+      }      
+    }
+  } // if regionSeg != null
+  
+  timer.stop();
+  std::cout << "AL time for determination of novel regions: " << timer.getLastAbsolute() << std::endl;
+
+  timer.start();
+
+  ColorImage imgrgb ( xsize, ysize );
+
+  if ( write_results )
+  {
+    std::stringstream out;
+    std::vector< std::string > list2;
+    StringTools::split ( currentFile, '/', list2 );
+    out << resultdir << "/" << list2.back();
+    
+    noveltyImage.writeRaw(out.str() + "_run_" +  NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+".rawfloat");
+  }
+  
+  if (visualizeALimages)
+  {
+    ICETools::convertToRGB ( noveltyImage, imgrgb );
+    showImage(imgrgb, "Novelty Image");
+    
+    ColorImage tmp (xsize, ysize);
+    cn.labelToRGB(segresult,tmp);
+    showImage(tmp, "Cl result after region seg");    
+  }
+
+  timer.stop();
+  cout << "AL time for writing the raw novelty image: " << timer.getLastAbsolute() << endl;
+}
+
+inline void SemSegNoveltyBinary::computeClassificationResults( const NICE::MultiChannelImageT<double> & feats, 
+                                                   NICE::Image & segresult,
+                                                   NICE::MultiChannelImageT<double> & probabilities,
+                                                   const int & xsize,
+                                                   const int & ysize,
+                                                   const int & featdim
+                                                       )
+{
+  std::cerr << "featdim: " << featdim << std::endl;
+  
+  if ( classifier != NULL )
+  {  
+
+            
+    #pragma omp parallel for
+    for ( int y = 0; y < ysize; y += testWSize )
+    {
+      Example example;
+      example.vec = NULL;
+      example.svec = new SparseVector ( featdim );
+      for ( int x = 0; x < xsize; x += testWSize)
+      {
+        for ( int f = 0; f < featdim; f++ )
+        {
+          double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+          if ( val > 1e-10 )
+            ( *example.svec ) [f] = val;
+        }
+        example.svec->normalize();
+
+        ClassificationResult cr = classifier->classify ( example );
+
+        int xs = std::max(0, x - testWSize/2);
+        int xe = std::min(xsize - 1, x + testWSize/2);
+        int ys = std::max(0, y - testWSize/2);
+        int ye = std::min(ysize - 1, y + testWSize/2);
+        for (int yl = ys; yl <= ye; yl++)
+        {
+          for (int xl = xs; xl <= xe; xl++)
+          {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              probabilities ( xl, yl, j ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == 1 )
+              segresult ( xl, yl ) = positiveClass;
+            else
+              segresult ( xl, yl ) = 22; //various  
+          }
+        }
+        
+        example.svec->clear();
+      }
+      delete example.svec;
+      example.svec = NULL;
+    }
+  }
+  else //vclassifier
+  {
+    std::cerr << "compute classification results with vclassifier" << std::endl;
+    #pragma omp parallel for
+    for ( int y = 0; y < ysize; y += testWSize )
+    {
+      for ( int x = 0; x < xsize; x += testWSize)
+      {
+        NICE::Vector v(featdim);
+        for ( int f = 0; f < featdim; f++ )
+        {
+          double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+          v[f] = val;
+        }
+        v.normalizeL1();
+
+        ClassificationResult cr = vclassifier->classify ( v );
+
+        int xs = std::max(0, x - testWSize/2);
+        int xe = std::min(xsize - 1, x + testWSize/2);
+        int ys = std::max(0, y - testWSize/2);
+        int ye = std::min(ysize - 1, y + testWSize/2);
+        for (int yl = ys; yl <= ye; yl++)
+        {
+          for (int xl = xs; xl <= xe; xl++)
+          {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              probabilities ( xl, yl, j ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == 1 )
+              segresult ( xl, yl ) = positiveClass;
+            else
+              segresult ( xl, yl ) = 22; //various  
+          }
+        }
+      }
+    }    
+
+  }
+}
+
+// compute novelty images depending on the strategy chosen
+
+void SemSegNoveltyBinary::computeNoveltyByRandom(         NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim  )
+{
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+  
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      
+      double randVal = randDouble();
+
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              if ( cr.scores[j] == 1)
+                probabilities ( xl, yl, j ) = cr.scores[j];
+              else
+                probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == 1 )
+              segresult ( xl, yl ) = positiveClass;
+            else
+              segresult ( xl, yl ) = 22; //various  
+              
+          noveltyImage ( xl, yl ) = randVal; 
+        }
+      }     
+    }
+  }  
+}
+
+
+void SemSegNoveltyBinary::computeNoveltyByVariance(       NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+      
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              if ( cr.scores[j] == 1)
+                probabilities ( xl, yl, j ) = cr.scores[j];
+              else
+                probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == 1 )
+              segresult ( xl, yl ) = positiveClass;
+            else
+              segresult ( xl, yl ) = 22; //various  
+              
+          noveltyImage ( xl, yl ) = cr.uncertainty; 
+        }
+      }          
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNoveltyBinary::computeNoveltyByGPUncertainty(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+           
+      double gpMeanVal = abs(cr.scores[0]);    //very specific to the binary setting  
+
+      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+      
+      //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+      // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+      double gpUncertaintyVal = gpMeanVal*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {         
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              if ( cr.scores[j] == 1)
+                probabilities ( xl, yl, j ) = cr.scores[j];
+              else
+                probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == positiveClass )
+              segresult ( xl, yl ) = cr.classno;
+            else
+              segresult ( xl, yl ) = 22; //various        
+          noveltyImage ( xl, yl ) = gpUncertaintyVal;  
+        }
+      }   
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNoveltyBinary::computeNoveltyByGPMean(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+    
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+
+      double gpMeanVal = abs(cr.scores[0]);  //very specific to the binary setting  
+  
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == 1 )
+              segresult ( xl, yl ) = positiveClass;
+            else
+              segresult ( xl, yl ) = 22; //various  
+              
+          noveltyImage ( xl, yl ) = gpMeanVal; 
+        }
+      }     
+    }
+  }  
+}
+
+void SemSegNoveltyBinary::computeNoveltyByGPMeanRatio(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+  //NOTE in binary settings, this is the same as the same as 2*mean  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+     
+      //look at the difference in the absolut mean values for the most plausible class
+      // and the second most plausible class
+      double gpMeanRatioVal= 2*abs(cr.scores[0]);  //very specific to the binary setting  
+
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              if ( cr.scores[j] == 1)
+                probabilities ( xl, yl, j ) = cr.scores[j];
+              else
+                probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == positiveClass )
+              segresult ( xl, yl ) = cr.classno;
+            else
+              segresult ( xl, yl ) = 22; //various      
+          noveltyImage ( xl, yl ) = gpMeanRatioVal;
+        }
+      }    
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNoveltyBinary::computeNoveltyByGPWeightAll(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+      
+      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+      
+      double gpWeightAllVal ( 0.0 );
+
+      //binary scenario
+      gpWeightAllVal = std::min( abs(cr.scores[0]+1), abs(cr.scores[0]-1) );
+      gpWeightAllVal *= firstTerm;
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              if ( cr.scores[j] == 1)
+                probabilities ( xl, yl, j ) = cr.scores[j];
+              else
+                probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == positiveClass )
+              segresult ( xl, yl ) = cr.classno;
+            else
+              segresult ( xl, yl ) = 22; //various         
+          noveltyImage ( xl, yl ) = gpWeightAllVal;
+        }
+      }
+   
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNoveltyBinary::computeNoveltyByGPWeightRatio(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+  //NOTE in binary settings, this is the same as the same as 2*weightAll
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+ 
+
+       double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+
+       double gpWeightRatioVal ( 0.0 );
+
+      //binary scenario
+      gpWeightRatioVal = std::min( abs(cr.scores[0]+1), abs(cr.scores[0]-1) );
+      gpWeightRatioVal *= 2*firstTerm;
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+            for ( int j = 0 ; j < cr.scores.size(); j++ )
+            { 
+              if ( cr.scores[j] == 1)
+                probabilities ( xl, yl, j ) = cr.scores[j];
+              else
+                probabilities ( xl, yl, 0 ) = cr.scores[j];
+            }
+            
+            if ( cr.classno == positiveClass )
+              segresult ( xl, yl ) = cr.classno;
+            else
+              segresult ( xl, yl ) = 22; //various         
+          noveltyImage ( xl, yl ) = gpWeightRatioVal;  
+        }
+      }
+       
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+
+void SemSegNoveltyBinary::addNewExample(const NICE::Vector& newExample, const int & newClassNo)
+{
+  //accept the new class as valid information
+  if ( forbidden_classesTrain.find ( newClassNo ) != forbidden_classesTrain.end() )
+  {
+    forbidden_classesTrain.erase(newClassNo);
+    numberOfClasses++;
+  }
+  if ( classesInUse.find ( newClassNo ) == classesInUse.end() )
+  {
+    classesInUse.insert( newClassNo );
+  }    
+  
+  
+  //then add it to the classifier used
+  if ( classifier != NULL )
+  { 
+    //TODO    
+  }
+  else //vclassifier
+  {
+    if (this->classifierString.compare("nn") == 0)    
+    {
+      vclassifier->teach ( newClassNo, newExample );
+    }
+  }
+}
+
+void SemSegNoveltyBinary::addNovelExamples()
+{
+
+  Timer timer;
+  
+  //show the image that contains the most novel region
+  if (visualizeALimages)
+    showImage(maskedImg, "Most novel region");  
+  
+  timer.start();
+    
+  std::stringstream out;
+  std::vector< std::string > list;
+  StringTools::split ( currentRegionToQuery.first, '/', list );  
+  out << resultdir << "/" << list.back();     
+    
+  maskedImg.writePPM ( out.str() + "_run_" +  NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+ "_query.ppm" );
+
+  
+  timer.stop();
+  std::cerr << "AL time for writing queried image: " << timer.getLast() << std::endl;
+
+  timer.start();
+  
+  //check which classes will be added using the features from the novel region
+  std::set<int> newClassNumbers;
+  newClassNumbers.clear(); //just to be sure  
+  for ( uint i = 0 ; i < newTrainExamples.size() ; i++ )
+  {
+    if (newClassNumbers.find(newTrainExamples[i].first /* classNumber*/) == newClassNumbers.end() )
+    {
+      newClassNumbers.insert(newTrainExamples[i].first );
+    }
+  }
+
+  //accept the new classes as valid information
+  for (std::set<int>::const_iterator clNoIt = newClassNumbers.begin(); clNoIt != newClassNumbers.end(); clNoIt++)
+  {
+    if ( forbidden_classesTrain.find ( *clNoIt ) != forbidden_classesTrain.end() )
+    {
+      forbidden_classesTrain.erase(*clNoIt);
+      numberOfClasses++;
+    }
+    if ( classesInUse.find ( *clNoIt ) == classesInUse.end() )
+    {
+      classesInUse.insert( *clNoIt );
+    }
+  }
+  
+  timer.stop();
+  std::cerr << "AL time for accepting possible new classes: " << timer.getLast() << std::endl;
+  
+  timer.start();
+  //then add the new features to the classifier used
+  if ( classifier != NULL )
+  { 
+    if (this->classifierString.compare("ClassifierGPHIK") == 0)    
+    {
+      classifier->addMultipleExamples ( this->newTrainExamples );
+    }    
+  }
+  else //vclassifier
+  {
+    //TODO
+  }
+  
+  timer.stop();
+  std::cerr << "AL time for actually updating the classifier: " << timer.getLast() << std::endl;
+  
+  std::cerr << "the current region to query is: " << currentRegionToQuery.first << " -- " << currentRegionToQuery.second << std::endl;
+  
+  //did we already query a region of this image?
+  if ( queriedRegions.find( currentRegionToQuery.first ) != queriedRegions.end() )
+  {
+    queriedRegions[ currentRegionToQuery.first ].insert(currentRegionToQuery.second);
+  }
+  else
+  {
+    std::set<int> tmpSet; tmpSet.insert(currentRegionToQuery.second);
+    queriedRegions.insert(std::pair<std::string,std::set<int> > (currentRegionToQuery.first, tmpSet ) );
+  }  
+  
+  std::cerr << "Write already queried regions: " << std::endl;
+  for (std::map<std::string,std::set<int> >::const_iterator it = queriedRegions.begin(); it != queriedRegions.end(); it++)
+  {
+    std::cerr << "image: " << it->first << " --   ";
+    for (std::set<int>::const_iterator itReg = it->second.begin(); itReg != it->second.end(); itReg++)
+    {
+      std::cerr << *itReg << " ";
+    } 
+    std::cerr << std::endl;
+  }
+  
+  //clear the latest results, since one iteration is over
+  globalMaxUncert = -numeric_limits<double>::max();
+  if (!mostNoveltyWithMaxScores)
+    globalMaxUncert = numeric_limits<double>::max();
+}
+
+const Examples * SemSegNoveltyBinary::getNovelExamples() const
+{
+  return &(this->newTrainExamples);
+}
+
+
+double SemSegNoveltyBinary::getAUCPerformance() const
+{
+  std::cerr << "evaluate AUC performance" << std::endl;
+  int noGTPositives ( 0 );
+  int noGTNegatives ( 0 );
+  
+  for (std::vector<OBJREC::ClassificationResult>::const_iterator it = resultsOfSingleRun.begin(); it != resultsOfSingleRun.end(); it++)
+  {
+    if (it->classno_groundtruth == 1)
+    {
+       noGTPositives++;
+    }
+    else
+      noGTNegatives++;
+  }
+  
+  std::cerr << "GT positives: " << noGTPositives << " -- GT negatives: " << noGTNegatives << std::endl;
+  
+  std::cerr << "ARR: " << resultsOfSingleRun.getAverageRecognitionRate() << std::endl;
+  
+  return resultsOfSingleRun.getBinaryClassPerformance( ClassificationResults::PERF_AUC ); 
+}

+ 245 - 0
semseg/SemSegNoveltyBinary.h

@@ -0,0 +1,245 @@
+/**
+ * @file SemSegNoveltyBinary.h
+ * @brief semantic segmentation using the method from Csurka08
+ * @author Björn Fröhlich, Alexander Freytag
+ * @date 04/24/2009
+ */
+#ifndef SemSegNoveltyBinaryINCLUDE
+#define SemSegNoveltyBinaryINCLUDE
+
+#include "SemanticSegmentation.h"
+
+#include "SemSegTools.h"
+#include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
+#include "vislearning/classifier/genericClassifierSelection.h"
+#include "vislearning/features/localfeatures/LocalFeatureColorWeijer.h"
+#include "vislearning/cbaselib/ClassificationResults.h"
+
+#include "segmentation/RegionSegmentationMethod.h"
+
+
+/** @brief pixelwise labeling systems */
+
+namespace OBJREC {
+
+class SemSegNoveltyBinary : public SemanticSegmentation
+{
+
+  protected:
+    //! boolean whether to reuse segmentation results for single images in different runs
+    bool reuseSegmentation;
+
+    //! boolean whether to read the initial classifier from a file. If not, training will be performed
+    bool read_classifier;
+    
+    //! boolean whether to save the final classifier or not
+    bool save_classifier;
+
+    //! The cached Data
+    std::string cache;
+    
+    //! Classifier
+    FeaturePoolClassifier *classifier;
+    VecClassifier *vclassifier;
+    
+    //! feature extraction
+    LocalFeatureColorWeijer *featExtract;
+    
+    //! Configuration File
+    const NICE::Config *conf;
+    
+    //! distance between features for training
+    int trainWsize;
+    
+    //! half of the window size for local features
+    int whs;
+    
+    //! rectangle size for classification, 1 means pixelwise
+    int testWSize;
+    
+    //! name of all classes
+    ClassNames cn;
+    
+    //! low level Segmentation method
+    RegionSegmentationMethod *regionSeg;
+    
+    //! set of forbidden/background classes for the initial training
+    std::set<int> forbidden_classesTrain;
+    //! set of forbidden/background classes for the whole process of learning over time
+    std::set<int> forbidden_classesActiveLearning;
+    //! store the class numbers currently used
+    std::set<int> classesInUse;
+    
+    //! only needed for binary scenarios, index of the positive class
+    int positiveClass;    
+    
+    //! obviously, the number of classes used for training (i.e., classesInUse.size() )
+    int numberOfClasses; 
+    
+    //! where to save the resulting images (uncertainty and classification results)
+    std::string resultdir;
+    
+    //! find the maximum uncertainty or not within the whole test set
+    bool findMaximumUncert;
+    
+    //! image with most uncertain region
+    NICE::ColorImage maskedImg;
+    
+    //! for debugging and visualization: show novelty images with and without region segmentation and the most novel region
+    bool visualizeALimages;
+    
+    //! maximum uncertainty over all images, i.e., the novelty score of the most "novel" region of all test images
+    double globalMaxUncert;
+    
+    //! determine whether a "novelty" method computes large scores for novel objects (e.g., variance), or small scores (e.g., min abs mean)
+    bool mostNoveltyWithMaxScores;
+    
+    //! current examples for most uncertain region
+    Examples newTrainExamples;
+    
+    //! contains filenames of images and indices of contained regions, that where already queried, to prevent them from being queried again
+    std::map<std::string,std::set<int> > queriedRegions;
+        
+    std::pair<std::string, int> currentRegionToQuery;
+    
+    //! store the binary classification results from a single run to evaluate them with AUC lateron
+    ClassificationResults resultsOfSingleRun;
+    
+    bool write_results;
+    
+    enum NoveltyMethod{
+      GPVARIANCE, // novel = large variance
+      GPUNCERTAINTY, //novel = small uncertainty (mean / var)
+      GPMINMEAN,  //novel = small mean
+      GPMEANRATIO,  //novel = small difference between mean of most plausible class and mean of snd
+                   //        most plausible class (not useful in binary settings)
+      GPWEIGHTALL, // novel = large weight in alpha vector after updating the model (can be predicted exactly)
+      GPWEIGHTRATIO, // novel = small difference between weights for alpha vectors with assumptions of GT label to be the most 
+                    //         plausible against the second most plausible class
+      RANDOM        // query regions randomly
+    }; 
+    
+    //! specify how "novelty" shall be computed, e.g., using GP-variance, GP-uncertainty, or predicted weight entries
+    NoveltyMethod noveltyMethod;
+    std::string noveltyMethodString;
+    
+    //! just store the name of our classifier
+    std::string classifierString;
+    
+    inline void computeClassificationResults( const NICE::MultiChannelImageT<double> & feats, 
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                                    const int & xsize,
+                                                    const int & ysize,
+                                                    const int & featdim );
+
+   void computeNoveltyByRandom(         NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );    
+    
+   void computeNoveltyByVariance(       NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );
+   
+   void computeNoveltyByGPUncertainty ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );
+   
+   void computeNoveltyByGPMean        ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPMeanRatio   ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPWeightAll   ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPWeightRatio ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );     
+   
+  public:
+
+    /** constructor
+      *  @param conf needs a configfile
+      *  @param md and a MultiDataset (contains images and other things)
+      */
+    SemSegNoveltyBinary ( const NICE::Config *conf, const MultiDataset *md );
+
+    /** simple destructor */
+    virtual ~SemSegNoveltyBinary();
+
+    /** The trainingstep
+      *  @param md and a MultiDataset (contains images and other things)
+      */
+    void train ( const MultiDataset *md );
+
+    /** The main procedure. Input: Image, Output: Segmented Image with pixelwise labeles and the probabilities
+      * @param ce image data
+      * @param segresult result of the semantic segmentation with a label for each pixel
+      * @param probabilities multi-channel image with one channel for each class and corresponding probabilities for each pixel
+      */
+    void semanticseg ( CachedExample *ce,
+                       NICE::Image & segresult,
+                       NICE::MultiChannelImageT<double> & probabilities );
+    
+    
+    /**
+     * @brief visualize a specific region in the original image
+     *
+     * @param img input image
+     * @param regions map of the regions
+     * @param region visualize this region
+     * @param outimage result
+     * @return void
+     **/
+    void visualizeRegion(const NICE::ColorImage &img, const NICE::Matrix &regions, int region, NICE::ColorImage &outimage);
+
+    /**
+     * @brief Add a new example to the known training data
+     *
+     * @param newExample (NICE::Vector) the feature vector of the new examples
+     * @param newClassNo (int) the corresponding GT class number
+     * @return void
+     **/    
+    void addNewExample(const NICE::Vector & newExample, const int & newClassNo);
+    
+    /**
+     * @brief Add those examples, which belong to the most novel region seen so far
+     *
+     * @return void
+     **/    
+    virtual void addNovelExamples();    
+
+    /**
+     * @brief Get a pointer to the examples extracted from the most novel region seen so far
+     *
+     * @return Examples *
+     **/        
+    virtual const Examples * getNovelExamples() const; 
+    
+    /**
+     * @brief Compute AUC scores from the results of the images computed so far
+     *
+     * @return double
+     **/       
+    double getAUCPerformance() const;
+};
+
+} //namespace
+
+#endif

+ 263 - 0
semseg/operations/Operations.cpp

@@ -0,0 +1,263 @@
+#include "Operations.h"
+
+using namespace OBJREC;
+using namespace std;
+using namespace NICE;
+
+Operation::Operation()
+{
+  values = NULL;
+  maxtypes = 1000;
+}
+
+void Operation::set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+{
+  x1 = _x1;
+  y1 = _y1;
+  x2 = _x2;
+  y2 = _y2;
+  channel1 = _channel1;
+  channel2 = _channel2;
+  values = _values;
+}
+
+void Operation::setContext ( bool _context )
+{
+  context = _context;
+}
+
+bool Operation::getContext()
+{
+  return context;
+}
+
+void Operation::setFeatType ( int _featType )
+{
+  featType = _featType;
+}
+
+int Operation::getFeatType()
+{
+  return featType;
+}
+
+void Operation::getXY ( const Features &feats, int &xsize, int &ysize )
+{
+  xsize = feats.feats->width();
+  ysize = feats.feats->height();
+}
+
+
+void Operation::store ( std::ostream & os )
+{
+  os << x1 << " " << x2 << " " << y1 << " " << y2 << " " << channel1 << " " << channel2 << " " << featType << std::endl;
+  if ( values == NULL )
+    os << -1 << std::endl;
+  else
+    os << values->getType() << std::endl;
+}
+
+void Operation::restore ( std::istream &is )
+{
+  is >> x1;
+  is >> x2;
+  is >> y1;
+  is >> y2;
+  is >> channel1;
+  is >> channel2;
+  is >> featType;
+
+  int tmp;
+  is >> tmp;
+
+  if ( tmp >= 0 )
+  {
+    if ( tmp == RAWFEAT )
+    {
+      values = new MCImageAccess();
+    }
+    else if ( tmp == CONTEXT )
+    {
+      values = new ClassificationResultAccess();
+    }
+    else
+    {
+      throw ( "no valid ValueAccess" );
+    }
+  }
+  else
+  {
+    values = NULL;
+  }
+}
+
+std::string Operation::writeInfos()
+{
+  std::stringstream ss;
+  ss << " x1: " << x1 << " y1: " << y1 << " x2: " << x2 << " y2: " << y2 << " c1: " << channel1 << " c2: " << channel2;
+  return ss.str();
+}
+
+double RegionFeat::getVal ( const Features &feats, const int &x, const int &y )
+{
+  return (*feats.rProbs)[(*feats.feats)(x,y,channel1)][channel2];
+}
+
+double Minus::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
+  double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel2 );
+  return v1 -v2;
+}
+
+double MinusAbs::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
+  double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel2 );
+  return abs ( v1 -v2 );
+}
+
+double Addition::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
+  double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize -
+                               1 ), channel2 );
+  return v1 + v2;
+}
+
+double Only1::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
+  return v1;
+}
+
+double RelativeXPosition::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  return ( double ) x / ( double ) xsize;
+}
+
+double RelativeYPosition::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  return ( double ) y / ( double ) ysize;
+}
+
+double IntegralOps::getVal ( const Features &feats, const int &x, const int &y )
+{
+  return feats.feats->getIntegralValue(x + x1, y + y1, x + x2, y + y2, channel1);
+}
+
+double GlobalFeats::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  return feats.feats->getIntegralValue( 0, 0, xsize - 1, ysize - 1, channel1 );
+}
+
+double IntegralCenteredOps::getVal ( const Features &feats, const int &x, const int &y )
+{
+  return feats.feats->getIntegralValue(x - x1, y - y1, x + x1, y + y1, channel1);
+}
+
+double BiIntegralCenteredOps::getVal ( const Features &feats, const int &x, const int &y )
+{
+  return feats.feats->getIntegralValue(x - x1, y - y1, x + x1, y + y1, channel1 ) - feats.feats->getIntegralValue(x - x2, y - y2, x + x2, y + y2, channel1);
+}
+
+double HaarHorizontal::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int tlx = x - x1;
+  int tly = y - y1;
+  int lrx = x + x1;
+  int lry = y + y1;
+
+  return feats.feats->getIntegralValue(tlx, tly, lrx, y, channel1 ) - feats.feats->getIntegralValue(tlx, y, lrx, lry, channel1);
+}
+
+double HaarVertical::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int tlx = x - x1;
+  int tly = y - y1;
+  int lrx = x + x1;
+  int lry = y + y1;
+
+  return feats.feats->getIntegralValue(tlx, tly, x, lry, channel1) - feats.feats->getIntegralValue(x, tly, lrx, lry, channel1);
+}
+
+double HaarDiag::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int tlx = x - x1;
+  int tly = y - y1;
+  int lrx = x + x1;
+  int lry = y + y1;
+
+  return feats.feats->getIntegralValue(tlx, tly, x, y, channel1) + feats.feats->getIntegralValue(x, y, lrx, lry, channel1) - feats.feats->getIntegralValue(tlx, y, x, lry, channel1) - feats.feats->getIntegralValue(x, tly, lrx, y, channel1);
+}
+
+double Haar3Horiz::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int tlx = x - x2;
+  int tly = y - y2;
+  int mtly = y - y1;
+  int mlry = y + y1;
+  int lrx = x + x2;
+  int lry = y + y2;
+
+  return feats.feats->getIntegralValue(tlx, tly, lrx, mtly, channel1) - feats.feats->getIntegralValue(tlx, mtly, lrx, mlry, channel1) + feats.feats->getIntegralValue(tlx, mlry, lrx, lry, channel1);
+}
+
+double Haar3Vert::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int tlx = x - x2;
+  int tly = y - y2;
+  int mtlx = x - x1;
+  int mlrx = x + x1;
+  int lrx = x + x2;
+  int lry = y + y2;
+
+  return feats.feats->getIntegralValue(tlx, tly, mtlx, lry, channel1) - feats.feats->getIntegralValue(mtlx, tly, mlrx, lry, channel1) + feats.feats->getIntegralValue(mlrx, tly, lrx, lry, channel1);
+}
+
+void IntegralOps::set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+{
+  x1 = std::min ( _x1, _x2 );
+  y1 = std::min ( _y1, _y2 );
+  x2 = std::max ( _x1, _x2 );
+  y2 = std::max ( _y1, _y2 );
+  channel1 = _channel1;
+  channel2 = _channel2;
+  values = _values;
+}
+
+void IntegralCenteredOps::set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+{
+  x1 = abs ( _x1 );
+  y1 = abs ( _y1 );
+  x2 = abs ( _x2 );
+  y2 = abs ( _y2 );
+  channel1 = _channel1;
+  channel2 = _channel2;
+  values = _values;
+}
+
+void BiIntegralCenteredOps::set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
+{
+  x1 = std::min ( abs ( _x1 ), abs ( _x2 ) );
+  y1 = std::min ( abs ( _y1 ), abs ( _y2 ) );
+  x2 = std::max ( abs ( _x1 ), abs ( _x2 ) );
+  y2 = std::max ( abs ( _y1 ), abs ( _y2 ) );
+  channel1 = _channel1;
+  channel2 = _channel2;
+  values = _values;
+}

+ 1173 - 0
semseg/operations/Operations.h

@@ -0,0 +1,1173 @@
+/**
+* @file Operation.h
+* @brief abstract class for any kind of feature extraction from images
+* @author Björn Fröhlich
+* @date 24.04.2012
+
+*/
+
+#include "core/image/MultiChannelImageT.h"
+
+#define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
+
+namespace OBJREC {
+
+class Operation;
+
+/**
+ * @brief methods for value access
+ **/
+enum ValueTypes
+{
+  RAWFEAT,
+  CONTEXT,
+  SPARSE,
+  NBVALUETYPES
+};
+
+/**
+ * @brief feature extraction methods
+ **/
+enum OperationTypes {
+  MINUS,
+  MINUSABS,
+  ADDITION,
+  ONLY1,
+  INTEGRAL,
+  INTEGRALCENT,
+  BIINTEGRALCENT,
+  HAARHORIZ,
+  HAARVERT,
+  HAARDIAG,
+  HAAR3HORIZ,
+  HAAR3VERT,
+  RELATIVEXPOSITION,
+  RELATIVEYPOSITION,
+  GLOBALFEATS,
+  EQUALITY,
+  NBOPERATIONS
+};
+
+/**
+ * @brief node class for context tree
+ **/
+class TreeNode
+{
+
+  public:
+    /** left child node */
+    int left;
+
+    /** right child node */
+    int right;
+
+    /** position of feat for decision */
+    Operation *feat;
+
+    /** decision stamp */
+    double decision;
+
+    /** is the node a leaf or not */
+    bool isleaf;
+
+    /** distribution in current node */
+    std::vector<double> dist;
+
+    /** depth of the node in the tree */
+    int depth;
+
+    /** how many pixels are in this node */
+    int featcounter;
+
+    /** unique number */
+    int nodeNumber;
+
+    /** simple constructor */
+    TreeNode() : left ( -1 ), right ( -1 ), feat ( NULL ), decision ( -1.0 ), isleaf ( false ) {}
+
+    /** standard constructor */
+    TreeNode ( int _left, int _right, Operation *_feat, double _decision ) : left ( _left ), right ( _right ), feat ( _feat ), decision ( _decision ), isleaf ( false ) {}
+};
+
+/**
+ * @brief holds all necessary information for feature extraction
+ **/
+struct Features {
+  /** simple features like RGB values */
+  NICE::MultiChannelImageT<double> *feats;
+
+  /** current leaf position for each pixel and each tree */
+  NICE::MultiChannelImageT<unsigned short int> *cfeats;
+
+  /** amount of trees */
+  int cTree;
+
+  /** tree nodes */
+  std::vector<TreeNode> *tree;
+  
+  /** probabilities for each region */
+  std::vector<std::vector<double> > *rProbs;
+};
+
+/**
+ * @brief abstract values access class
+ **/
+class ValueAccess
+{
+  public:
+    /**
+     * @brief extract value on specific position x,y and channel;
+     *
+     * @param feats see struct Features
+     * @param x position of feature
+     * @param y position of feature
+     * @param channel position of feature
+     * @return double value
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel ) = 0;
+
+    /**
+     * @brief print some infos about feature type
+     *
+     * @return string feature type
+     **/
+    virtual std::string writeInfos() = 0;
+
+    /**
+     * @brief get feature type
+     *
+     * @return feature type
+     **/
+    virtual ValueTypes getType() = 0;
+};
+
+/**
+ * @brief simple MultiChannelImageT access
+ **/
+class MCImageAccess: public ValueAccess
+{
+
+  public:
+    /**
+     * @brief extract value on specific position x,y and channel;
+     *
+     * @param feats see struct Features
+     * @param x position of feature
+     * @param y position of feature
+     * @param channel position of feature
+     * @return double value
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel )
+    {
+      return feats.feats->get ( x, y, channel );
+    }
+
+    /**
+     * @brief print some infos about feature type
+     *
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "raw";
+    }
+
+    /**
+     * @brief get feature type
+     *
+     * @return feature type
+     **/
+    virtual ValueTypes getType()
+    {
+      return RAWFEAT;
+    }
+};
+
+class ClassificationResultAccess: public ValueAccess
+{
+  public:
+    /**
+     * @brief extract value on specific position x,y and channel;
+     *
+     * @param feats see struct Features
+     * @param x position of feature
+     * @param y position of feature
+     * @param channel position of feature
+     * @return double value
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel )
+    {
+      return ( *feats.tree ) [feats.cfeats->get ( x,y,feats.cTree ) ].dist[channel];
+    }
+
+    /**
+     * @brief print some infos about feature type
+     *
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "context";
+    }
+
+    /**
+     * @brief get feature type
+     *
+     * @return feature type
+     **/
+    virtual ValueTypes getType()
+    {
+      return CONTEXT;
+    }
+};
+
+#if 0
+/**
+ * @brief not finished yet, do we really need sparse feature representation or ClassificationResultAccess sufficient
+ **/
+class SparseImageAccess: public ValueAccess
+{
+  private:
+    double scale;
+
+  public:
+    /**
+     * @brief extract value on specific position x,y and channel;
+     *
+     * @param feats see struct Features
+     * @param x position of feature
+     * @param y position of feature
+     * @param channel position of feature
+     * @return double value
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel )
+    {
+      //MultiChannelImageT<SparseVectorInt> textonMap;
+      //TODO: implement access
+      return -1.0;
+    }
+
+    /**
+     * @brief print some infos about feature type
+     *
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "context";
+    }
+
+    /**
+     * @brief get feature type
+     *
+     * @return feature type
+     **/
+    virtual ValueTypes getType()
+    {
+      return CONTEXT;
+    }
+};
+#endif
+
+/**
+ * @brief abstract operation class
+ **/
+class Operation
+{
+  protected:
+    /** two different points (e.g. for an rectangle or two positions), channels and size  */
+    int x1, y1, x2, y2, channel1, channel2, maxtypes;
+    
+    /** type of feature */
+    int featType;
+    
+    ValueAccess *values;
+
+    bool context;
+
+  public:
+
+    /** simple constructor */
+    Operation();
+
+    /**
+     * @brief set all parameters
+     * @param _x1 position 1
+     * @param _y1 position 1
+     * @param _x2 position 2
+     * @param _y2 position 2
+     * @param _channel1 channel 1
+     * @param _channel2 channel 2
+     * @param _values value extraction method
+     * @return void nothing
+     **/
+    virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values );
+
+    /**
+     * @brief set whether it is a context feature or not
+     * @param _context context boolean
+     * @return void nothing
+     **/
+    void setContext ( bool _context );
+
+    /**
+     * @brief return context information (set by setContext(bool)
+     *
+     * @return bool whether context is used or not
+     **/
+    bool getContext();
+    
+    /**
+     * @brief set type of feature
+     * @param _featType type of feature
+     * @return void nothing
+     **/
+    void setFeatType ( int _featType );
+
+    /**
+     * @brief return context information (set by setContext(bool)
+     *
+     * @return int get feature type
+     **/
+    int getFeatType();   
+
+    /**
+     * @brief abstract interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y ) = 0;
+
+    /**
+     * @brief virtual clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone() = 0;
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos();
+
+    /**
+     * @brief exctract current image boarders
+     * @param feats image information
+     * @param xsize width
+     * @param ysize height
+     * @return void
+     **/
+    inline void getXY ( const Features &feats, int &xsize, int &ysize );
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps() = 0;
+
+    /**
+     * @brief store all information for current operation in stream
+     * @param os out stream
+     * @return void
+     **/
+    virtual void store ( std::ostream & os );
+
+    /**
+     * @brief restore all information for current operation from stream
+     * @param is in stream
+     * @return void
+     **/
+    virtual void restore ( std::istream & is );
+};
+
+/**
+ * @brief simple equality check ?(A==B)
+ **/
+class RegionFeat: public Operation
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new RegionFeat();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      std::string out = "RegionFeat";
+
+      if ( values != NULL )
+        out += values->writeInfos();
+
+      return out + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return EQUALITY;
+    }
+};
+
+/**
+ * @brief simple difference operation A-B
+ **/
+class Minus: public Operation
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new Minus();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      std::string out = "Minus";
+
+      if ( values != NULL )
+        out += values->writeInfos();
+
+      return out + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return MINUS;
+    }
+};
+
+/**
+ * @brief simple absolute difference operation |A-B|
+ **/
+class MinusAbs: public Operation
+{
+
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new MinusAbs();
+    };
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      std::string out = "MinusAbs";
+
+      if ( values != NULL )
+        out += values->writeInfos();
+
+      return out;
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return MINUSABS;
+    }
+};
+
+/**
+ * @brief simple addition operation A+B
+ **/
+class Addition: public Operation
+{
+
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new Addition();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      std::string out = "Addition";
+
+      if ( values != NULL )
+        out += values->writeInfos();
+
+      return out + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return ADDITION;
+    }
+};
+
+/**
+ * @brief simple single element access operation
+ **/
+class Only1: public Operation
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new Only1();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      std::string out = "Only1";
+
+      if ( values != NULL )
+        out += values->writeInfos();
+
+      return out + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return ONLY1;
+    }
+};
+
+/**
+ * @brief get current relative x position
+ **/
+class RelativeXPosition: public Operation
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new RelativeXPosition();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "RelativeXPosition" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return RELATIVEXPOSITION;
+    }
+};
+
+/**
+ * @brief get current relative y position
+ **/
+class RelativeYPosition: public Operation
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new RelativeYPosition();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "RelativeYPosition" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return RELATIVEYPOSITION;
+    }
+};
+
+
+/**
+ * @brief uses mean in a window given by (x1,y1) (x2,y2)
+ **/
+class IntegralOps: public Operation
+{
+  public:
+    /**
+     * @brief set all parameters
+     * @param _x1 position 1
+     * @param _y1 position 1
+     * @param _x2 position 2
+     * @param _y2 position 2
+     * @param _channel1 channel 1
+     * @param _channel2 channel 2
+     * @param _values value extraction method
+     * @return void nothing
+     **/
+    virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values );
+
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new IntegralOps();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "IntegralOps" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return INTEGRAL;
+    }
+};
+
+
+/**
+ * @brief like a global bag of words to model the current appearance of classes in an image without local context
+ **/
+class GlobalFeats: public IntegralOps
+{
+
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new GlobalFeats();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "GlobalFeats" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return GLOBALFEATS;
+    }
+};
+
+/**
+ * @brief uses mean of Integral image given by x1, y1 with current pixel as center
+ **/
+class IntegralCenteredOps: public IntegralOps
+{
+  public:
+    /**
+     * @brief set all parameters
+     * @param _x1 position 1
+     * @param _y1 position 1
+     * @param _x2 position 2
+     * @param _y2 position 2
+     * @param _channel1 channel 1
+     * @param _channel2 channel 2
+     * @param _values value extraction method
+     * @return void nothing
+     **/
+    virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values );
+
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new IntegralCenteredOps();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "IntegralCenteredOps" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return INTEGRALCENT;
+    }
+};
+
+/**
+ * @brief uses different of mean of Integral image given by two windows, where (x1,y1) is the width and height of window1 and (x2,y2) of window 2
+ **/
+class BiIntegralCenteredOps: public IntegralCenteredOps
+{
+  public:
+    /**
+     * @brief set all parameters
+     * @param _x1 position 1
+     * @param _y1 position 1
+     * @param _x2 position 2
+     * @param _y2 position 2
+     * @param _channel1 channel 1
+     * @param _channel2 channel 2
+     * @param _values value extraction method
+     * @return void nothing
+     **/
+    virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values );
+
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new BiIntegralCenteredOps();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "BiIntegralCenteredOps" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return BIINTEGRALCENT;
+    }
+};
+
+/**
+ * @brief horizontal Haar features
+ * ++
+ * --
+ **/
+class HaarHorizontal: public IntegralCenteredOps
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new HaarHorizontal();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "HaarHorizontal" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return HAARHORIZ;
+    }
+};
+
+/**
+ * @brief vertical Haar features
+ * +-
+ * +-
+ **/
+class HaarVertical: public IntegralCenteredOps
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new HaarVertical();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "HaarVertical" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return HAARVERT;
+    }
+};
+
+/**
+ * @brief diagonal Haar features
+ * +-
+ * -+
+ **/
+class HaarDiag: public IntegralCenteredOps
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new HaarDiag();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "HaarDiag" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return HAARDIAG;
+    }
+};
+
+/**
+ * @brief horizontal Haar features
+ * +++
+ * ---
+ * +++
+ */
+
+class Haar3Horiz: public BiIntegralCenteredOps
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new Haar3Horiz();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "Haar3Horiz" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return HAAR3HORIZ;
+    }
+};
+
+/**
+ * @brief vertical Haar features
+ * +-+
+ * +-+
+ * +-+
+ */
+class Haar3Vert: public BiIntegralCenteredOps
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new Haar3Vert();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      return "Haar3Vert" + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return HAAR3VERT;
+    }
+};
+
+} //end namespace
+

+ 8 - 0
semseg/postsegmentation/Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
semseg/postsegmentation/Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 199 - 0
semseg/postsegmentation/PPGraphCut.cpp

@@ -0,0 +1,199 @@
+#include "PPGraphCut.h"
+
+#include "segmentation/RegionGraph.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+void PPGraphCut::setClassNo(int _classno)
+{
+  classno = _classno;
+
+  coocurence = new double[classno*classno];
+
+  for (int i = 0; i < classno*classno; i++)
+  {
+    coocurence[i] = 0.0;
+  }
+
+}
+
+PPGraphCut::PPGraphCut()
+{
+  conf = new Config();
+  Init();
+}
+
+PPGraphCut::PPGraphCut(const Config *_conf): conf(_conf)
+{
+  Init();
+}
+
+void PPGraphCut::Init()
+{
+  std::string section = "PostProcess";
+}
+
+PPGraphCut::~PPGraphCut()
+{
+
+}
+
+void PPGraphCut::optimizeImage(RegionGraph &regions, vector<vector<double> > & probabilities)
+{
+  vector<Node*> nodes;
+  regions.get(nodes);
+
+  GCoptimizationGeneralGraph graphcut(nodes.size(), classno);
+
+  graphcut.setSmoothCost(coocurence);
+
+  map<pair<int, int>, int> pairs;
+
+  for (int i = 0; i < (int) nodes.size(); i++)
+  {
+    vector<Node*> nbs;
+    nodes[i]->getNeighbors(nbs);
+    int pos1 = nodes[i]->getNumber();
+    for (int j = 0; j < (int)nbs.size(); j++)
+    {
+      int pos2 = nbs[j]->getNumber();
+      pair<int, int> p(std::min(pos1, pos2), std::max(pos1, pos2));
+      map<pair<int, int>, int>::iterator iter = pairs.find(p);
+      if (iter == pairs.end())
+      {
+        pairs.insert(make_pair(p, 1));
+        graphcut.setNeighbors(pos1, pos2, 1.0);
+      }
+    }
+    for (int l = 0; l < classno; l++)
+    {
+      double val = probabilities[i][l];
+      if (val <= 0.0)
+        val = 1e-10;
+      val = -log(val);
+      graphcut.setDataCost(pos1, l, val);
+    }
+    graphcut.setLabel(pos1, nodes[i]->getLabel());
+  }
+
+  graphcut.swap(20);
+
+  //double E_smooth = graphcut->smoothnessEnergy();
+
+  //double E_data   = graphcut->dataEnergy();
+
+  for (int i = 0; i < (int)nodes.size(); i++ )
+  {
+    regions[i]->setLabel(graphcut.whatLabel(i));
+  }
+}
+
+void PPGraphCut::optimizeImage(Examples &regions, NICE::Matrix &mask, NICE::MultiChannelImageT<double> & probabilities)
+{
+  RegionGraph g;
+  g.computeGraph(regions, mask);
+
+  vector<vector<double> > probs;
+
+  for (int p = 0; p < (int)regions.size(); p++)
+  {
+    vector<double> pr;
+    for (int l = 0; l < classno; l++)
+    {
+      pr.push_back(probabilities.get(regions[p].second.x, regions[p].second.y, l));
+    }
+    probs.push_back(pr);
+  }
+
+  optimizeImage(g, probs);
+}
+
+void PPGraphCut::trainImage(RegionGraph &g)
+{
+  vector<Node*> nodes;
+  g.get(nodes);
+
+  for (int i = 0; i < (int) nodes.size(); i++)
+  {
+    vector<Node*> nbs;
+    nodes[i]->getNeighbors(nbs);
+    for (int j = 0; j < (int)nbs.size(); j++)
+    {
+      //if(nodes[i]->getLabel() != nbs[j]->getLabel())
+      coocurence[nodes[i]->getLabel()*classno + nbs[j]->getLabel()] += 1.0;
+    }
+  }
+}
+
+void PPGraphCut::trainImage(Examples &regions, NICE::Matrix &mask)
+{
+  // coocurence Matrix bestimmen
+  RegionGraph g;
+  g.computeGraph(regions, mask);
+  trainImage(g);
+}
+
+void PPGraphCut::finishPP(ClassNames &cn)
+{
+  for (int i = 0; i < classno; i++)
+  {
+    for (int j = 0; j < classno; j++)
+    {
+      cout << coocurence[classno*i+j] << " ";
+    }
+    cout << endl;
+  }
+  cout << endl;
+
+  double weight = conf->gD( "PPGC", "weight", 0.01 );
+  double maxv =  -numeric_limits<double>::max();
+  for (int i = 0; i < classno; i++)
+  {
+    for (int j = 0; j < classno; j++)
+    {
+      if (j == i)
+        coocurence[classno*i+j] = 0.0;
+      else
+        maxv = std::max(maxv, coocurence[classno*i+j]);
+    }
+  }
+
+  maxv += 1 + 1e-10;
+
+  for (int i = 0; i < classno; i++)
+  {
+    for (int j = 0; j < classno; j++)
+    {
+      if (j == i)
+        coocurence[classno*i+j] = 0.0;
+      else
+        coocurence[classno*i+j] = -weight * (log(( coocurence[classno*i+j] + 1.0) / maxv));
+    }
+  }
+  for (int i = 0; i < classno; i++)
+  {
+    for (int j = 0; j < classno; j++)
+    {
+      cout << coocurence[classno*i+j] << " ";
+    }
+    cout << endl;
+  }
+  //GetChar();
+}
+
+void PPGraphCut::restore (istream & is, int format)
+{
+
+}
+
+void PPGraphCut::store (ostream & os, int format) const
+{
+
+}
+
+void PPGraphCut::clear()
+{
+
+}

+ 126 - 0
semseg/postsegmentation/PPGraphCut.h

@@ -0,0 +1,126 @@
+/**
+ * @file PPGraphCut.h
+ * @brief a post procession step after semantic segmentation which use a variant of GraphCut
+ * @author Björn Fröhlich
+ * @date 09/08/2009
+
+ */
+#ifndef PPGRAPHCUTINCLUDE
+#define PPGRAPHCUTINCLUDE
+
+#include "core/image/MultiChannelImageT.h"
+
+#include "vislearning/cbaselib/CachedExample.h"
+#include "vislearning/baselib/Preprocess.h"
+#include "vislearning/baselib/Globals.h"
+
+#include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+#include <vislearning/features/fpfeatures/VectorFeature.h>
+
+#include "vislearning/cbaselib/ClassNames.h"
+
+#include "segmentation/RSMeanShift.h"
+
+#include "vislearning/mrf/mrfmin/GCoptimization.h"
+
+
+namespace OBJREC
+{
+
+class PPGraphCut : public NICE::Persistent
+{
+
+  protected:
+    //! the configfile
+    const NICE::Config *conf;
+
+    //! count of classes
+    int classno;
+
+    //! Shape features
+    Examples shapefeats;
+
+    //! classifier for shape features
+    FPCRandomForests *rf;
+
+    double *coocurence;
+
+  public:
+
+    /** simple constructor */
+    PPGraphCut();
+
+    /** simple constructor */
+    PPGraphCut ( const NICE::Config *_conf );
+
+    /** simple destructor */
+    ~PPGraphCut();
+
+    /**
+     * set the count of classes
+     * @param _classno count of classes
+     */
+    void setClassNo ( int _classno );
+
+    /** initialize the RelativeLocationPrior Variables*/
+    void Init();
+
+    /**
+     * train region
+     * @param regions input regions with size and position
+     * @param mask
+     */
+    void trainImage ( Examples &regions, NICE::Matrix &mask );
+
+    /**
+     * train region
+     * @param regions input regions with size and position
+     */
+    void trainImage ( RegionGraph &regions );
+
+
+    /**
+     * finish the priors maps
+     */
+    void finishPP ( ClassNames &cn );
+
+    /**
+     * use shape pp
+     * @param regions
+     * @param mask
+     * @param probabilities probability maps for each pixel
+     */
+    void optimizeImage ( Examples &regions, NICE::Matrix &mask, NICE::MultiChannelImageT<double> & probabilities );
+
+    /**
+     * use shape pp
+     * @param regions
+     * @param mask
+     * @param probabilities for each region
+     */
+    void optimizeImage ( OBJREC::RegionGraph &regions, std::vector<std::vector<double> > & probabilities );
+
+    /**
+     * load data from an input stream
+     * @param is input stream
+     * @param format
+     */
+    void restore ( std::istream & is, int format = 0 );
+
+    /**
+     * write data to an output stream
+     * @param os outputstream
+     * @param format
+     */
+    void store ( std::ostream & os, int format = 0 ) const;
+
+    /**
+     * clear all informations
+     */
+    void clear ();
+};
+
+} //namespace
+
+#endif

+ 286 - 0
semseg/postsegmentation/PPSuperregion.cpp

@@ -0,0 +1,286 @@
+#include "PPSuperregion.h"
+
+#ifdef NICE_USELIB_ICE
+#include <core/iceconversion/convertice.h>
+#endif
+
+#include "segmentation/RegionGraph.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+PPSuperregion::PPSuperregion()
+{
+  conf = new Config();
+  Init();
+}
+
+PPSuperregion::PPSuperregion(const Config *_conf): conf(_conf)
+{
+  Init();
+}
+
+void PPSuperregion::Init()
+{
+  std::string section = "PostProcessSG";
+  rf = new FPCRandomForests( conf, "ShapeRF" );
+}
+
+PPSuperregion::~PPSuperregion()
+{
+}
+
+
+void PPSuperregion::optimizeShape(Examples &regions, NICE::Matrix &mask, NICE::MultiChannelImageT<double> & probabilities)
+{
+#ifdef NICE_USELIB_ICE
+  vector<ice::Region> superregions;
+  vector<double> probs;
+  vector<int> classes;
+  NICE::Matrix smask;
+  getSuperregions(regions, mask, superregions, classes, smask);
+
+  for (int i = 0; i < (int)superregions.size(); i++)
+  {
+    ice::Moments m;
+    superregions[i].CalcMoments(m);
+
+    NICE::Vector tmp = makeEVector(m.AffineHuInvariants());
+    NICE::Vector *tmp2 = new NICE::Vector(tmp);
+    Example tex(tmp2);
+
+    ClassificationResult r = rf->classify ( tex );
+
+    probs.push_back(r.scores[classes[i]]);
+  }
+
+  vector<ice::Region> orgregions;
+  for (int i = 0; i < (int)regions.size(); i++)
+  {
+    orgregions.push_back(ice::Region());
+  }
+
+  for (int y = 0; y < (int)mask.cols(); y++)
+  {
+    for (int x = 0; x < (int)mask.rows(); x++)
+    {
+      int pos = mask(x, y);
+      orgregions[pos].Add(x, y);
+    }
+  }
+
+  // maps the regions to their superregions
+  vector<int> regmapsreg(regions.size(), 0);
+  for (int y = 0; y < (int)smask.cols(); y++)
+  {
+    for (int x = 0; x < (int)smask.rows(); x++)
+    {
+      int r = mask(x, y);
+      int sr = smask(x, y);
+      regmapsreg[r] = sr;
+    }
+  }
+
+  RegionGraph g;
+  g.computeGraph(regions, mask);
+
+  vector<Node*> nodes;
+  g.get(nodes);
+
+  bool change = true;
+  int k = 0;
+  while (change && k < 100)
+  {
+    k++;
+    change = false;
+    int anders = 0;
+    for (int i = 0; i < (int) nodes.size(); i++)
+    {
+
+      set<int> sr;
+      int regnb = nodes[i]->getRegion();
+      int orgreg = regmapsreg[regnb];
+
+      if (nodes[i]->isAtBorder())
+      {
+        vector<Node*> nbs;
+        nodes[i]->getNeighbors(nbs);
+        for (int j = 0; j < (int)nbs.size(); j++)
+          sr.insert(regmapsreg[nbs[j]->getRegion()]);
+      }
+
+      vector<double> otherprobs;
+
+      ice::Region re = superregions[orgreg];
+      re.Del(orgregions[regnb]);
+
+      ice::Moments m;
+
+      if (re.Area() > 0)
+      {
+        re.CalcMoments(m);
+
+        NICE::Vector tmp = makeEVector( m.AffineHuInvariants());
+        NICE::Vector *tmp2 = new NICE::Vector(tmp);
+        Example tex(tmp2);
+        ClassificationResult r = rf->classify ( tex );
+        tex.vec = NULL;
+        delete tmp2;
+
+        double val = probabilities.get(regions[regnb].second.x, regions[regnb].second.y, classes[orgreg]) * r.scores[classes[orgreg]];
+
+        otherprobs.push_back(val);
+        if (otherprobs[0] < probs[orgreg])
+          continue;
+      }
+
+      for ( set<int>::const_iterator iter = sr.begin();iter != sr.end();++iter )
+      {
+        ice::Moments m2;
+        ice::Region re2 = superregions[regmapsreg[*iter]];
+        re2.Add(orgregions[regnb]);
+        re2.CalcMoments(m2);
+        NICE::Vector tmp = makeEVector(m2.AffineHuInvariants());
+        NICE::Vector *tmp2 = new NICE::Vector(tmp);
+        Example tex(tmp2);
+        ClassificationResult r2 = rf->classify ( tex );
+        tex.vec = NULL;
+        delete tmp2;
+
+        double val = probabilities.get(regions[regnb].second.x, regions[regnb].second.y, classes[*iter]) * r2.scores[classes[*iter]];
+
+        otherprobs.push_back(val);
+      }
+
+      int k = 1;
+      int best = -1;
+      double bestval = -1.0;
+      for ( set<int>::const_iterator iter = sr.begin();iter != sr.end();++iter, k++ )
+      {
+        if (otherprobs[k] > probs[*iter])
+        {
+          if (bestval < otherprobs[k])
+          {
+            bestval = otherprobs[k];
+            best = *iter;
+          }
+        }
+      }
+
+      if (best < 0 || bestval <= 0.0)
+        continue;
+
+      change = true;
+
+      probs[best] = bestval;
+
+      superregions[best].Add(orgregions[regnb]);
+
+      probs[orgreg] = otherprobs[0];
+
+      superregions[orgreg].Del(orgregions[regnb]);
+
+      regmapsreg[regnb] = best;
+
+      nodes[i]->setLabel(classes[best]);
+      anders++;
+    }
+  }
+
+  for (int i = 0; i < (int)regions.size(); i++)
+  {
+    regions[i].first = classes[regmapsreg[i]];
+  }
+#else
+  throw("PPSuperRegion.cpp: please use ice library for this function");
+#endif
+}
+#ifdef NICE_USELIB_ICE
+void PPSuperregion::getSuperregions(const Examples &regions, const NICE::Matrix &mask, vector<ice::Region> &superregions, vector<int> &classes, NICE::Matrix &smask)
+{
+  NICE::Image tmp (mask.rows(), mask.cols());
+  tmp.set(0);
+  NICE::ColorImage m2 (tmp, tmp, tmp);
+  for (int y = 0; y < (int)mask.cols(); y++)
+  {
+    for (int x = 0; x < (int)mask.rows(); x++)
+    {
+      int pos = mask(x, y);
+
+      m2.setPixel(x, y, 0, regions[pos].first);
+      m2.setPixel(x, y, 1, regions[pos].first);
+      m2.setPixel(x, y, 2, regions[pos].first);
+    }
+  }
+
+  RSMeanShift rs(conf);
+  int count = rs.transformSegmentedImg( m2, smask);
+
+  classes.resize(count);
+  for (int i = 0; i < count; i++)
+  {
+    superregions.push_back(ice::Region());
+  }
+
+  for (int y = 0; y < (int)smask.cols(); y++)
+  {
+    for (int x = 0; x < (int)smask.rows(); x++)
+    {
+      int pos = smask(x, y);
+      superregions[pos].Add(x, y);
+      classes[pos] = regions[mask(x,y)].first;
+    }
+  }
+}
+#endif
+
+void PPSuperregion::trainShape(Examples &regions, NICE::Matrix &mask)
+{
+#ifdef NICE_USELIB_ICE
+  // bestimme Superregionen
+  vector<ice::Region> superregions;
+  vector<int> classes;
+  // refactor-nice.pl: check this substitution
+  // old: Image smask;
+  NICE::Matrix smask;
+  getSuperregions(regions, mask, superregions, classes, smask);
+
+  // berechne die Momente der Superregionen und speichere diese als Merkmale ab
+  for (int i = 0; i < (int)superregions.size(); i++)
+  {
+    ice::Moments m;
+    superregions[i].CalcMoments(m);
+    NICE::Vector tmp = makeEVector(m.AffineHuInvariants());
+    NICE::Vector *tmp2 = new NICE::Vector(tmp);
+    shapefeats.push_back(pair<int, Example>(classes[i], Example(tmp2)));
+  }
+#else
+  throw("PPSuperRegion.cpp: please use ice library for this function");
+#endif
+}
+
+void PPSuperregion::finishShape(ClassNames &cn)
+{
+  //Lerne Klassifikator mit dem den Formmerkmalen an
+  FeaturePool fp;
+  Feature *f = new VectorFeature ( 7 );
+  f->explode ( fp );
+  delete f;
+  rf->train ( fp, shapefeats);
+}
+
+void PPSuperregion::restore (istream & is, int format)
+{
+
+}
+
+void PPSuperregion::store (ostream & os, int format) const
+{
+
+}
+
+void PPSuperregion::clear()
+{
+
+}

+ 121 - 0
semseg/postsegmentation/PPSuperregion.h

@@ -0,0 +1,121 @@
+/**
+ * @file PPSuperregion.h
+ * @brief a post procession step after semantic segmentation which use a variant of Region Growing
+ * @author Björn Fröhlich
+ * @date 08/19/2009
+
+ */
+
+#ifndef PPSUPERREGIONINCLUDE
+#define PPSUPERREGIONINCLUDE
+
+#include "core/image/MultiChannelImageT.h"
+
+#include "vislearning/cbaselib/CachedExample.h"
+#include "vislearning/baselib/Preprocess.h"
+#include "vislearning/baselib/Globals.h"
+
+#include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+#include "vislearning/features/fpfeatures/VectorFeature.h"
+
+#include "vislearning/cbaselib/ClassNames.h"
+
+#include "segmentation/RSMeanShift.h"
+
+#ifdef NICE_USELIB_ICE
+#include <image_nonvis.h>
+#endif
+
+namespace OBJREC
+{
+
+class PPSuperregion : public NICE::Persistent
+{
+
+  protected:
+    //! the configfile
+    const NICE::Config *conf;
+
+    //! count of classes
+    int classno;
+
+    //! Shape features
+    Examples shapefeats;
+
+    //! classifier for shape features
+    FPCRandomForests *rf;
+
+  public:
+
+    /** simple constructor */
+    PPSuperregion();
+
+    /** simple constructor */
+    PPSuperregion ( const NICE::Config *_conf );
+
+    /** simple destructor */
+    ~PPSuperregion();
+
+    /**
+     * set the count of classes
+     * @param _classno count of classes
+     */
+    void setClassNo ( int _classno );
+
+    /** initialize the RelativeLocationPrior Variables*/
+    void Init();
+
+    /**
+     * combines connected regions with the same label to superregions
+     * @param regions the input regions
+     * @param mask the mask for the regions
+     * @param superregions the superregions
+     * @param classes the classlabels of the superregions
+     */
+#ifdef NICE_USELIB_ICE
+    void getSuperregions ( const Examples &regions, const NICE::Matrix &mask, std::vector<ice::Region> &superregions, std::vector<int> &classes, NICE::Matrix &smask );
+#endif
+
+    /**
+     * Lerne Form der Regionen an
+     * @param regions input regions with size and position
+     * @param mask
+     */
+    void trainShape ( Examples &regions, NICE::Matrix &mask );
+
+    /**
+     * finish the priors maps
+     */
+    void finishShape ( ClassNames &cn );
+
+    /**
+     * use shape pp
+     * @param regions
+     * @param mask
+     */
+    void optimizeShape ( Examples &regions, NICE::Matrix &mask, NICE::MultiChannelImageT<double> & probabilities );
+
+    /**
+     * load data from an input stream
+     * @param is input stream
+     * @param format
+     */
+    void restore ( std::istream & is, int format = 0 );
+
+    /**
+     * write data to an output stream
+     * @param os outputstream
+     * @param format
+     */
+    void store ( std::ostream & os, int format = 0 ) const;
+
+    /**
+     * clear all informations
+     */
+    void clear ();
+};
+
+} //namespace
+
+#endif

+ 115 - 0
semseg/postsegmentation/PSSImageLevelPrior.cpp

@@ -0,0 +1,115 @@
+/**
+* @file PSSImageLevelPrior.cpp
+* @brief incorporate prior from image categorization method
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+
+#include <iostream>
+#include <set>
+#include <assert.h>
+#include <algorithm>
+#include "PSSImageLevelPrior.h"
+
+using namespace OBJREC;
+
+using namespace std;
+
+using namespace NICE;
+
+
+
+PSSImageLevelPrior::PSSImageLevelPrior ( int imagePriorMethod, int priorK, double alphaImagePrior )
+{
+  this->imagePriorMethod = imagePriorMethod;
+  this->priorK = priorK;
+  this->alphaImagePrior = alphaImagePrior;
+}
+
+PSSImageLevelPrior::~PSSImageLevelPrior()
+{
+}
+
+void PSSImageLevelPrior::setPrior ( FullVector & prior )
+{
+  this->prior = prior;
+}
+
+void PSSImageLevelPrior::postprocess ( NICE::Image & result, NICE::MultiChannelImageT<double> & probabilities )
+{
+  assert ( prior.size() == ( int ) probabilities.channels() );
+  int xsize = probabilities.width();
+  int ysize = probabilities.height();
+
+  if ( imagePriorMethod == IMAGE_PRIOR_BEST_K )
+  {
+    vector<int> indices;
+    prior.getSortedIndices ( indices );
+
+    reverse ( indices.begin(), indices.end() );
+    set<int> bestComponents;
+    vector<int>::const_iterator j = indices.begin();
+    if ( indices.size() > ( size_t ) priorK )
+      advance ( j, priorK );
+    else
+      j = indices.end();
+
+    for ( vector<int>::const_iterator jj = indices.begin();
+          jj != j ; jj++ )
+      bestComponents.insert ( *jj );
+
+    for ( int ys = 0 ; ys < ysize ; ys ++ )
+      for ( int xs = 0 ; xs < xsize ; xs++)
+      {
+        int maxindex = 0;
+        double maxvalue = - numeric_limits<double>::max();
+        double sum = 0.0;
+        for ( int i = 0 ; i < ( int ) probabilities.channels() ; i++ )
+        {
+          if ( bestComponents.find ( i ) == bestComponents.end() )
+            probabilities[i](xs,ys) = 0.0;
+          sum += probabilities[i](xs,ys);
+
+          if ( probabilities[i](xs,ys) > maxvalue )
+          {
+            maxindex = i;
+            maxvalue = probabilities[i](xs,ys);
+          }
+        }
+
+        if ( sum > 1e-11 )
+          for ( int i = 0 ; i < ( int ) probabilities.channels() ; i++ )
+          {
+            probabilities[i](xs,ys) /= sum;
+          }
+
+        result.setPixel ( xs, ys, maxindex );
+      }
+  } else if ( imagePriorMethod == IMAGE_PRIOR_PSEUDOPROB ) {
+    for ( int ys = 0 ; ys < ysize ; ys ++ )
+      for ( int xs = 0 ; xs < xsize ; xs++)
+      {
+        int maxindex = 0;
+        double maxvalue = - numeric_limits<double>::max();
+        double sum = 0.0;
+
+        for ( int i = 0 ; i < ( int ) probabilities.channels() ; i++ )
+        {
+          probabilities[i](xs,ys) *= pow ( prior[i], alphaImagePrior );
+          sum += probabilities[i](xs,ys);
+          if ( probabilities[i](xs,ys) > maxvalue )
+          {
+            maxindex = i;
+            maxvalue = probabilities[i](xs,ys);
+          }
+        }
+        if ( sum > 1e-11 )
+          for ( int i = 0 ; i < ( int ) probabilities.channels() ; i++ )
+          {
+            probabilities[i](xs,ys) /= sum;
+          }
+        result.setPixel ( xs, ys, maxindex );
+      }
+  }
+}

+ 51 - 0
semseg/postsegmentation/PSSImageLevelPrior.h

@@ -0,0 +1,51 @@
+/**
+* @file PSSImageLevelPrior.h
+* @brief incorporate prior from image categorization method
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef PSSIMAGELEVELPRIORINCLUDE
+#define PSSIMAGELEVELPRIORINCLUDE
+
+#include "PostSemSeg.h"
+#include "vislearning/math/mathbase/FullVector.h"
+
+
+namespace OBJREC
+{
+
+/** incorporate prior from image categorization method */
+class PSSImageLevelPrior : public PostSemSeg
+{
+
+  protected:
+    int imagePriorMethod;
+    FullVector prior;
+
+    int priorK;
+    double alphaImagePrior;
+
+  public:
+
+    enum
+    {
+      IMAGE_PRIOR_BEST_K = 0,
+      IMAGE_PRIOR_PSEUDOPROB
+    };
+
+    /** simple constructor */
+    PSSImageLevelPrior ( int imagePriorMethod, int priorK, double alphaImagePrior );
+
+    /** simple destructor */
+    virtual ~PSSImageLevelPrior();
+
+    void setPrior ( FullVector & prior );
+
+    void postprocess ( NICE::Image & result, NICE::MultiChannelImageT<double> & probabilities );
+};
+
+
+} // namespace
+
+#endif

+ 27 - 0
semseg/postsegmentation/PostSemSeg.cpp

@@ -0,0 +1,27 @@
+/** 
+* @file PostSemSeg.cpp
+* @brief abstract interface for post processing steps concerning semantic segmentation routines
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#include <iostream>
+
+#include "PostSemSeg.h"
+
+using namespace OBJREC;
+
+using namespace std;
+
+using namespace NICE;
+
+
+
+PostSemSeg::PostSemSeg()
+{
+}
+
+PostSemSeg::~PostSemSeg()
+{
+}
+

+ 38 - 0
semseg/postsegmentation/PostSemSeg.h

@@ -0,0 +1,38 @@
+/**
+* @file PostSemSeg.h
+* @brief abstract interface for post processing steps concerning semantic segmentation routines
+* @author Erik Rodner
+* @date 03/19/2009
+
+*/
+#ifndef POSTSEMSEGINCLUDE
+#define POSTSEMSEGINCLUDE
+
+#include "core/image/MultiChannelImageT.h"
+
+
+namespace OBJREC
+{
+
+/** abstract interface for post processing steps concerning semantic segmentation routines */
+class PostSemSeg
+{
+
+  protected:
+
+  public:
+
+    /** simple constructor */
+    PostSemSeg();
+
+    /** simple destructor */
+    virtual ~PostSemSeg();
+
+    virtual void postprocess ( NICE::Image & result, NICE::MultiChannelImageT<double> & probabilities ) = 0;
+
+};
+
+
+} // namespace
+
+#endif

+ 561 - 0
semseg/postsegmentation/RelativeLocationPrior.cpp

@@ -0,0 +1,561 @@
+#include "RelativeLocationPrior.h"
+
+#include "core/image/Filter.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+RelativeLocationPrior::RelativeLocationPrior()
+{
+  conf = new Config();
+  mapsize = 200;
+}
+
+RelativeLocationPrior::RelativeLocationPrior ( const Config *_conf ) : conf ( _conf )
+{
+}
+
+void RelativeLocationPrior::setClassNo ( int _classno )
+{
+  classno = _classno;
+  Init();
+}
+
+void RelativeLocationPrior::Init()
+{
+  std::string section = "PostProcessRLP";
+  mapsize = conf->gI ( section, "mapsize", 200 );
+
+  featdim = classno * 3;
+
+  //Priorsmaps erzeugen
+  for ( int i = 0; i < classno; i++ )
+  {
+    NICE::MultiChannelImageT<double> *tmp  = new NICE::MultiChannelImageT<double> ( mapsize, mapsize, classno);
+    tmp->setAll ( 0.0 );
+    priormaps.push_back ( tmp );
+  }
+}
+
+RelativeLocationPrior::~RelativeLocationPrior()
+{
+  for ( int i = 0; i < classno; i++ )
+  {
+    delete priormaps[i];
+  }
+}
+
+void RelativeLocationPrior::trainPriorsMaps ( Examples &regions, int xsize, int ysize )
+{
+  for ( int j = 0; j < ( int ) regions.size(); j++ )
+  {
+    for ( int i = 0; i < ( int ) regions.size(); i++ )
+    {
+      if ( i == j )
+        continue;
+
+      int x = regions[i].second.x - regions[j].second.x;
+      int y = regions[i].second.y - regions[j].second.y;
+
+      convertCoords ( x, xsize );
+      convertCoords ( y, ysize );
+
+      priormaps[regions[i].first]->set ( x, y, priormaps[regions[i].first]->get ( x, y, regions[j].first ) + 1.0/*regions[j].second.weight*/, regions[j].first );
+    }
+  }
+}
+
+void RelativeLocationPrior::finishPriorsMaps ( ClassNames &cn )
+{
+  // Priormaps normalisieren
+  double alpha = 5;
+  for ( int i = 0; i < classno; i++ )
+  {
+    for ( int j = 0; j < classno; j++ )
+    {
+      double val = 0.0;
+
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+          val = std::max ( val, priormaps[i]->get ( x, y, j ) );
+        }
+      }
+      if ( val != 0.0 )
+      {
+        for ( int x = 0; x < mapsize; x++ )
+        {
+          for ( int y = 0; y < mapsize; y++ )
+          {
+            double old = priormaps[i]->get ( x, y, j );
+
+#undef DIRICHLET
+#ifdef DIRICHLET
+            old = ( old + alpha ) / ( val + classno * alpha );
+#else
+            old /= val;
+#endif
+            priormaps[i]->set ( x, y, old, j );
+          }
+        }
+      }
+    }
+  }
+
+  double sigma = 0.1 * ( double ) mapsize; // 10% der Breite/Höhe der Maps
+
+  // alle Priormaps weichzeichnen
+  for ( int j = 0; j < classno; j++ )
+  {
+    for ( int i = 0; i < classno; i++ )
+    {
+      NICE::FloatImage tmp ( mapsize, mapsize );
+      tmp.set ( 0.0 );
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+          tmp.setPixelQuick ( x, y, priormaps[j]->get ( x, y, i ) );
+        }
+      }
+
+      NICE::FloatImage out;
+      //FourierLibrary::gaussFilterD(tmp, out, sigma);
+      NICE::filterGaussSigmaApproximate<float, float, float> ( tmp, sigma, &out );
+
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+          priormaps[j]->set ( x, y, out.getPixel ( x, y ), i );
+        }
+      }
+    }
+  }
+
+  // Summe aller Pixel an einer Position über jede Klasse = 1
+  for ( int i = 0; i < classno; i++ )
+  {
+    for ( int x = 0; x < mapsize; x++ )
+    {
+      for ( int y = 0; y < mapsize; y++ )
+      {
+        double val = 0.0;
+        for ( int j = 0; j < classno; j++ )
+        {
+          val += priormaps[i]->get ( x, y, j );
+        }
+        if ( val != 0.0 )
+        {
+          for ( int j = 0; j < classno; j++ )
+          {
+            double old = priormaps[i]->get ( x, y, j );
+            old /= val;
+            priormaps[i]->set ( x, y, old, j );
+          }
+        }
+      }
+    }
+  }
+
+#undef VISDEBUG
+#ifdef VISDEBUG
+#ifndef NOVISUAL
+  NICE::ColorImage rgbim ( ( classno - 1 ) * ( mapsize + 10 ), ( classno - 1 ) * ( mapsize + 10 ) );
+
+  double maxval = -numeric_limits<double>::max();
+  double minval = numeric_limits<double>::max();
+
+  for ( int j = 0; j < classno; j++ )
+  {
+    if ( j == 6 ) continue;
+    for ( int i = 0; i < classno; i++ )
+    {
+      if ( i == 6 ) continue;
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+          double val = priormaps[j]->get ( x, y, i );
+          maxval = std::max ( val, maxval );
+          minval = std::min ( val, minval );
+        }
+      }
+    }
+  }
+
+  int jcounter = 0;
+  for ( int j = 0; j < classno; j++ )
+  {
+    if ( j == 6 ) continue;
+    int icounter = 0;
+    for ( int i = 0; i < classno; i++ )
+    {
+      if ( i == 6 ) continue;
+
+      NICE::FloatImage tmp ( mapsize, mapsize );
+      tmp.set ( 0.0 );
+
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+          tmp.setPixel ( x, y, priormaps[j]->get ( x, y, i ) );
+        }
+      }
+
+      tmp.setPixel ( 0, 0, maxval );
+      tmp.setPixel ( 0, 1, minval );
+      cout << "i: " << cn.text ( i ) << endl;
+      NICE::ColorImage imgrgb2 ( mapsize, mapsize );
+      ICETools::convertToRGB ( tmp, imgrgb2 );
+
+      imgrgb2.setPixel ( 0, 0, 2, imgrgb2.getPixel ( 1, 0, 2 ) );
+      imgrgb2.setPixel ( 0, 1, 2, imgrgb2.getPixel ( 1, 1, 2 ) );
+      imgrgb2.setPixel ( 0, 0, 0, imgrgb2.getPixel ( 1, 0, 0 ) );
+      imgrgb2.setPixel ( 0, 1, 0, imgrgb2.getPixel ( 1, 1, 0 ) );
+      imgrgb2.setPixel ( 0, 0, 1, imgrgb2.getPixel ( 1, 0, 1 ) );
+      imgrgb2.setPixel ( 0, 1, 1, imgrgb2.getPixel ( 1, 1, 1 ) );
+
+      for ( int y = 0; y < mapsize; y++ )
+      {
+        for ( int x = 0; x < mapsize; x++ )
+        {
+          rgbim.setPixel ( x + jcounter* ( mapsize + 10 ), y + icounter* ( mapsize + 10 ), 2, imgrgb2.getPixel ( x, y, 2 ) );
+          rgbim.setPixel ( x + jcounter* ( mapsize + 10 ), y + icounter* ( mapsize + 10 ), 0, imgrgb2.getPixel ( x, y, 0 ) );
+          rgbim.setPixel ( x + jcounter* ( mapsize + 10 ), y + icounter* ( mapsize + 10 ), 1, imgrgb2.getPixel ( x, y, 1 ) );
+        }
+      }
+      icounter++;
+    }
+    jcounter++;
+  }
+  rgbim.write ( "tmp.ppm" );
+#endif
+#endif
+}
+
+void RelativeLocationPrior::trainClassifier ( Examples &regions, NICE::MultiChannelImageT<double> & probabilities )
+{
+  // für alle Regionen einen Merkmalsvektor erzeugen und diesen der Trainingsmenge hinzufügen
+  getFeature ( regions, probabilities );
+
+  for ( int i = 0; i < ( int ) regions.size(); i++ )
+  {
+    trainingsdata.push_back ( pair<int, Example> ( regions[i].first, regions[i].second ) );
+    regions[i].second.svec = NULL;
+  }
+}
+
+void RelativeLocationPrior::finishClassifier()
+{
+  //////////////////////////////
+  // Klassifikatoren anlernen //
+  //////////////////////////////
+  FeaturePool fp;
+  Feature *f = new SparseVectorFeature ( featdim );
+  f->explode ( fp );
+  delete f;
+
+  //feature size
+  int s = 3;
+
+  classifiers.resize ( classno );
+  for ( int i = 0; i < classno; i++ )
+  {
+    classifiers[i] = SLR ( conf, "ClassifierSMLR" );
+    Examples ex2;
+    int countex = 0;
+    for ( int j = 0; j < ( int ) trainingsdata.size(); j++ )
+    {
+      Example e;
+      int z = 0;
+      e.svec = new SparseVector ( s + 1 );
+      for ( int k = i * s; k < i*s + s; k++, z++ )
+      {
+        double val = trainingsdata[j].second.svec->get ( k );
+        if ( val != 0.0 )
+          ( *e.svec ) [z] = val;
+      }
+      ( *e.svec ) [s] = 1.0;
+
+      ex2.push_back ( pair<int, Example> ( trainingsdata[j].first, e ) );
+
+      if ( trainingsdata[j].first == i )
+        countex++;
+    }
+
+    if ( ex2.size() <= 2 || countex < 1 )
+      continue;
+
+    classifiers[i].train ( fp, ex2, i );
+
+    for ( int j = 0; j < ( int ) ex2.size(); j++ )
+    {
+      delete ex2[j].second.svec;
+      ex2[j].second.svec = NULL;
+    }
+  }
+
+  trainingsdata.clear();
+}
+
+void RelativeLocationPrior::postprocess ( Examples &regions, NICE::MultiChannelImageT<double> & probabilities )
+{
+  getFeature ( regions, probabilities );
+
+  int s = 3;
+
+  for ( int i = 0; i < ( int ) regions.size(); i++ )
+  {
+    FullVector overall_distribution ( classno + 1 );
+    overall_distribution[classno] = 0.0;
+
+    double maxp = -numeric_limits<double>::max();
+    int bestclass = 0;
+
+    double sum  = 0.0;
+
+    for ( int c = 0; c < classno; c++ )
+    {
+      Example e;
+      int z = 0;
+      e.svec = new SparseVector ( s + 1 );
+      for ( int k = c * s; k < c*s + s; k++, z++ )
+      {
+        double val = regions[i].second.svec->get ( k );
+        if ( val != 0.0 )
+          ( *e.svec ) [z] = val;
+      }
+      ( *e.svec ) [s] = 1.0;
+
+      overall_distribution[c] = classifiers[c].classify ( e );
+
+      sum += overall_distribution[c];
+
+      if ( maxp < overall_distribution[c] )
+      {
+        bestclass = c;
+        maxp = overall_distribution[c];
+      }
+      delete e.svec;
+      e.svec = NULL;
+    }
+
+    for ( int c = 0; c < classno; c++ )
+    {
+      overall_distribution[c] /= sum;
+    }
+
+    ClassificationResult r = ClassificationResult ( bestclass, overall_distribution );
+
+    if ( bestclass < 0 )
+    {
+      regions[i].second.svec->store ( cout );
+      cout << endl;
+      cout << "fehler: besclass=" << bestclass << endl;
+      for ( int j = 0; j < ( int ) probabilities.channels(); j++ )
+      {
+        cout << "j: " << j << " score: " << r.scores[j] << endl;
+      }
+    }
+    regions[i].first = bestclass;
+  }
+}
+
+void RelativeLocationPrior::convertCoords ( int &x, int xsize )
+{
+  x = ( int ) round ( ( double ( x ) + ( double ) xsize ) / ( 2.0 * ( double ) xsize ) * ( ( double ) mapsize - 1.0 ) );
+
+  x = std::min ( x, mapsize - 1 );
+  x = std::max ( x, 0 );
+}
+
+void RelativeLocationPrior::getFeature ( Examples &regions, NICE::MultiChannelImageT<double> & probabilities )
+{
+
+  int xsize, ysize;
+  xsize = probabilities.width();
+  ysize = probabilities.height();
+
+  // get best classes
+  vector<int> bestclasses ( regions.size(), -1 );
+  for ( int r = 0; r < ( int ) regions.size(); r++ )
+  {
+    double maxval = -numeric_limits<double>::max();
+    for ( int c = 0; c < ( int ) probabilities.channels(); c++ )
+    {
+      double val = probabilities.get ( regions[r].second.x, regions[r].second.y, c );
+      if ( maxval < val )
+      {
+        bestclasses[r] = c;
+        maxval = val;
+      }
+    }
+  }
+
+  vector<double> alpha;
+  for ( int r = 0; r < ( int ) regions.size(); r++ )
+  {
+    double tmpalpha = probabilities.get ( regions[r].second.x, regions[r].second.y, bestclasses[r] ) * regions[r].second.weight;
+
+    alpha.push_back ( tmpalpha );
+  }
+
+  //erzeuge f_relloc
+  vector<vector<double> > vother;
+  vector<vector<double> > vself;
+  for ( int i = 0; i < ( int ) regions.size(); i++ )
+  {
+    vector<double> v, w;
+    vother.push_back ( v );
+    vself.push_back ( w );
+    for ( int c = 0; c < classno; c++ )
+    {
+      double tmp_vother = 0.0;
+      double tmp_self = 0.0;
+
+      for ( int j = 0; j < ( int ) regions.size(); j++ )
+      {
+        if ( j == i )
+          continue;
+
+        int x = regions[i].second.x - regions[j].second.x;
+        int y = regions[i].second.y - regions[j].second.y;
+
+        convertCoords ( x, xsize );
+        convertCoords ( y, ysize );
+
+        double val = priormaps[c]->get ( x, y, bestclasses[j] ) * alpha[j]; ;
+
+        if ( bestclasses[j] == bestclasses[i] ) //Objektbestandteile
+        {
+          tmp_self += val;
+        }
+        else//Kontextinformationen
+        {
+          tmp_vother += val;
+        }
+      }
+
+      if ( fabs ( tmp_self ) < 10e-7 )
+        tmp_self = 10e-7;
+      if ( fabs ( tmp_vother ) < 10e-7 )
+        tmp_vother = 10e-7;
+
+      vother[i].push_back ( tmp_vother );
+      vself[i].push_back ( tmp_self );
+    }
+  }
+
+  for ( int r = 0; r < ( int ) regions.size(); r++ )
+  {
+    if ( regions[r].second.svec != NULL )
+    {
+      delete regions[r].second.svec;
+      regions[r].second.svec = NULL;
+    }
+    if ( regions[r].second.vec != NULL )
+    {
+      delete regions[r].second.vec;
+      regions[r].second.vec = NULL;
+    }
+
+    regions[r].second.svec = new SparseVector ( classno*3 );
+
+    int counter = 0;
+
+    for ( int i = 0; i < classno; i++ )
+    {
+      //appearence feature (old probability for each class
+      double fapp = log ( probabilities.get ( regions[r].second.x, regions[r].second.y, i ) );
+
+      if ( fabs ( fapp ) > 10e-7 )
+        ( * ( regions[r].second.svec ) ) [counter] = fapp;
+      counter++;
+
+      double val = log ( vother[r][i] );
+
+      if ( fabs ( val ) > 10e-7 )
+        ( * ( regions[r].second.svec ) ) [counter] = val;
+      counter++;
+
+      val = log ( vself[r][i] );
+
+      if ( fabs ( val ) > 10e-7 )
+        ( * ( regions[r].second.svec ) ) [counter] = val;
+      counter++;
+    }
+  }
+}
+
+void RelativeLocationPrior::restore ( istream & is, int format )
+{
+  is >> classno;
+  is >> mapsize;
+  is >> featdim;
+
+  //Priorsmaps erzeugen
+  for ( int i = 0; i < classno; i++ )
+  {
+    NICE::MultiChannelImageT<double> *tmp  = new NICE::MultiChannelImageT<double> ( mapsize, mapsize, classno);
+    tmp->setAll ( 0.0 );
+    priormaps.push_back ( tmp );
+  }
+
+  double val;
+  for ( int i = 0; i < classno; i++ )
+  {
+    for ( int j = 0; j < classno; j++ )
+    {
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+
+          is >> val;
+          priormaps[i]->set ( x, y, val, j );
+        }
+      }
+    }
+  }
+
+  classifiers.resize ( classno );
+  for ( int i = 0; i < classno; i++ )
+  {
+    classifiers[i] = SLR();
+    classifiers[i].restore ( is, format );
+  }
+}
+
+void RelativeLocationPrior::store ( ostream & os, int format ) const
+{
+  os << classno << " ";
+  os << mapsize << " ";
+  os << featdim << endl;
+  for ( int i = 0; i < classno; i++ )
+  {
+    for ( int j = 0; j < classno; j++ )
+    {
+      for ( int x = 0; x < mapsize; x++ )
+      {
+        for ( int y = 0; y < mapsize; y++ )
+        {
+          os << priormaps[i]->get ( x, y, j ) << " ";
+        }
+      }
+    }
+  }
+
+  for ( int i = 0; i < classno; i++ )
+  {
+    classifiers[i].store ( os, format );
+  }
+}
+
+void RelativeLocationPrior::clear ()
+{
+
+}

+ 135 - 0
semseg/postsegmentation/RelativeLocationPrior.h

@@ -0,0 +1,135 @@
+/**
+ * @file RelativeLocationPrior.h
+ * @brief a post procession step after semantic segmentation which use relative location priors
+ * @author Björn Fröhlich
+ * @date 06/10/2009
+
+ */
+#ifndef RELATIVELOCATIONPRIORINCLUDE
+#define RELATIVELOCATIONPRIORINCLUDE
+
+#include "core/image/MultiChannelImageT.h"
+
+#include "vislearning/cbaselib/CachedExample.h"
+#include "vislearning/baselib/Preprocess.h"
+#include "vislearning/baselib/Globals.h"
+
+#include "vislearning/classifier/fpclassifier/logisticregression/SLR.h"
+#include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
+
+#include "vislearning/features/fpfeatures/SparseVectorFeature.h"
+
+#include "vislearning/cbaselib/ClassNames.h"
+
+namespace OBJREC
+{
+
+class RelativeLocationPrior : public NICE::Persistent
+{
+
+  protected:
+    //! the priormaps
+    std::vector<NICE::MultiChannelImageT<double> *> priormaps;
+
+    //! the configfile
+    const NICE::Config *conf;
+
+    //! count of classes
+    int classno;
+
+    //! size of the priormaps (mapsize x mapsize)
+    int mapsize;
+
+    //! convert Image coordinates to priormaps coordinates
+    void convertCoords ( int &x, int xsize );
+
+    //! the trainingsdata will be added subsequently to this object
+    Examples trainingsdata;
+
+    //! the one vs all sparse logistic classifiers
+    std::vector<SLR> classifiers;
+
+    //! dimension of the features
+    int featdim;
+
+  public:
+
+    /** simple constructor */
+    RelativeLocationPrior();
+
+    /** simple constructor */
+    RelativeLocationPrior ( const NICE::Config *_conf );
+
+    /** simple destructor */
+    ~RelativeLocationPrior();
+
+    /**
+     * set the count of classes
+     * @param _classno count of classes
+     */
+    void setClassNo ( int _classno );
+
+    /** initialize the RelativeLocationPrior Variables*/
+    void Init();
+
+    /**
+     * Bestimme aus dem Trainingsbild, die location priors maps
+     * @param regions input regions with size, position and label
+     */
+    void trainPriorsMaps ( Examples &regions, int xsize, int ysize );
+
+    /**
+     * finish the priors maps
+     */
+    void finishPriorsMaps ( ClassNames &cn );
+
+    /**
+     * Bestimme aus dem Trainingsbild, die location priors maps
+     * @param regions input regions with size and position
+     * @param probabilities the probabiltiy maps
+     */
+    void trainClassifier ( Examples &regions, NICE::MultiChannelImageT<double> & probabilities );
+
+    /**
+     * finish the classfiers
+     */
+    void finishClassifier();
+
+    /**
+     * appends the featurevector to the given example
+     * @param regions input regions with size and position
+     * @param probabilities the probabiltiy maps
+     */
+    void getFeature ( Examples &regions, NICE::MultiChannelImageT<double> & probabilities );
+
+    /**
+     * uses the rlp for reclassification
+     * @param regions
+     * @param result
+     * @param probabilities
+     */
+    void postprocess ( Examples &regions, NICE::MultiChannelImageT<double> & probabilities );
+
+    /**
+     * load data from an input stream
+     * @param is input stream
+     * @param format
+     */
+    void restore ( std::istream & is, int format = 0 );
+
+    /**
+     * write data to an output stream
+     * @param os outputstream
+     * @param format
+     */
+    void store ( std::ostream & os, int format = 0 ) const;
+
+    /**
+     * clear all informations
+     */
+    void clear ();
+};
+
+} //namespace
+
+#endif

+ 3 - 0
semseg/postsegmentation/libdepend.inc

@@ -0,0 +1,3 @@
+$(call PKG_DEPEND_EXT,ICE)
+$(call PKG_DEPEND_INT,core)
+$(call PKG_DEPEND_INT,vislearning/mrf)