Browse Source

modified SemSegNovelty towards active learning scores

Alexander Freytag 12 years ago
parent
commit
3ff94800f5
3 changed files with 290 additions and 29 deletions
  1. 29 20
      semseg/SemSegCsurka.cpp
  2. 256 9
      semseg/SemSegNovelty.cpp
  3. 5 0
      semseg/SemSegNovelty.h

+ 29 - 20
semseg/SemSegCsurka.cpp

@@ -11,7 +11,8 @@ using namespace NICE;
 using namespace OBJREC;
 using namespace OBJREC;
 
 
 #undef DEBUG_CSURK
 #undef DEBUG_CSURK
-// #undef UNCERTAINTY
+#undef UNCERTAINTY
+// #define UNCERTAINTY
 
 
 SemSegCsurka::SemSegCsurka ( const Config *conf,
 SemSegCsurka::SemSegCsurka ( const Config *conf,
                              const MultiDataset *md )
                              const MultiDataset *md )
@@ -1526,6 +1527,10 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
   
   
 #endif
 #endif
 
 
+  #ifdef UNCERTAINTY
+  std::cerr << "compute values for uncertainty stuff as well" << std::endl;
+  #endif
+  
   if ( classifier != NULL )
   if ( classifier != NULL )
   {
   {
     clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen: classifier != NULL" << endl;
     clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen: classifier != NULL" << endl;
@@ -1612,7 +1617,7 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
         std::vector<double> diffToNegative; diffToNegative.clear();
         std::vector<double> diffToNegative; diffToNegative.clear();
         double diffToNegativeSum(0.0);
         double diffToNegativeSum(0.0);
         
         
-        for ( int j = 0 ; j < fV.size(); j++ )
+        for ( int j = 0 ; j < r.scores.size(); j++ )
         {
         {
           if ( useclass[j] == 0 )
           if ( useclass[j] == 0 )
             continue;
             continue;
@@ -1801,6 +1806,10 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
     }
     }
   }
   }
 
 
+  #ifdef UNCERTAINTY
+  std::cerr << "uncertainty values and derived scores successfully computed" << std::endl;
+  #endif
+
 #ifdef UNCERTAINTY
 #ifdef UNCERTAINTY
   cout << "maxvdirect: " << maxu << " minvdirect: " << minu << endl;
   cout << "maxvdirect: " << maxu << " minvdirect: " << minu << endl;
   //pre-allocate the image for filtering lateron
   //pre-allocate the image for filtering lateron
@@ -1808,7 +1817,7 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
   
   
   //just store the first scale
   //just store the first scale
   ICETools::convertToRGB ( uncert[0], imgrgb );
   ICETools::convertToRGB ( uncert[0], imgrgb );
-  imgrgb.write ( out.str() + "rough.png" );
+  imgrgb.write ( out.str() + "rough.ppm" );
   
   
   //pre-allocate memory for filtering of scales
   //pre-allocate memory for filtering of scales
   FloatImage gaussGPUncertainty ( xsize, ysize );
   FloatImage gaussGPUncertainty ( xsize, ysize );
@@ -1819,15 +1828,15 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
    
    
   //just store the first scale for every method
   //just store the first scale for every method
   ICETools::convertToRGB ( gpUncertainty[0], imgrgb );
   ICETools::convertToRGB ( gpUncertainty[0], imgrgb );
-  imgrgb.write ( out.str() + "gpUncertainty.png" );
+  imgrgb.write ( out.str() + "gpUncertainty.ppm" );
   ICETools::convertToRGB ( gpMean[0], imgrgb );
   ICETools::convertToRGB ( gpMean[0], imgrgb );
-  imgrgb.write ( out.str() + "gpMean.png" );
+  imgrgb.write ( out.str() + "gpMean.ppm" );
   ICETools::convertToRGB ( gpMeanRatio[0], imgrgb );
   ICETools::convertToRGB ( gpMeanRatio[0], imgrgb );
-  imgrgb.write ( out.str() + "gpMeanRatio.png" );
+  imgrgb.write ( out.str() + "gpMeanRatio.ppm" );
   ICETools::convertToRGB ( gpWeightAll[0], imgrgb );
   ICETools::convertToRGB ( gpWeightAll[0], imgrgb );
-  imgrgb.write ( out.str() + "gpWeightAll.png" );
+  imgrgb.write ( out.str() + "gpWeightAll.ppm" );
   ICETools::convertToRGB ( gpWeightRatio[0], imgrgb );
   ICETools::convertToRGB ( gpWeightRatio[0], imgrgb );
-  imgrgb.write ( out.str() + "gpWeightRatio.png" );  
+  imgrgb.write ( out.str() + "gpWeightRatio.ppm" );  
   
   
 #endif
 #endif
 
 
@@ -2018,18 +2027,18 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
   gaussUncert(0,0) = 0.0;
   gaussUncert(0,0) = 0.0;
   gaussUncert(0,1) = 0.04;
   gaussUncert(0,1) = 0.04;
   ICETools::convertToRGB ( gaussUncert, imgrgb );
   ICETools::convertToRGB ( gaussUncert, imgrgb );
-  imgrgb.write ( out.str() + "filtered.png" );
+  imgrgb.write ( out.str() + "filtered.ppm" );
   
   
   ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
   ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
-  imgrgb.write ( out.str() + "gpUncertaintyFiltered.png" );
+  imgrgb.write ( out.str() + "gpUncertaintyFiltered.ppm" );
   ICETools::convertToRGB ( gaussGPMean, imgrgb );
   ICETools::convertToRGB ( gaussGPMean, imgrgb );
-  imgrgb.write ( out.str() + "gpMeanFiltered.png" );
+  imgrgb.write ( out.str() + "gpMeanFiltered.ppm" );
   ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
   ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
-  imgrgb.write ( out.str() + "gpMeanRatioFiltered.png" );
+  imgrgb.write ( out.str() + "gpMeanRatioFiltered.ppm" );
   ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
   ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
-  imgrgb.write ( out.str() + "gpWeightAllFiltered.png" );
+  imgrgb.write ( out.str() + "gpWeightAllFiltered.ppm" );
   ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
   ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
-  imgrgb.write ( out.str() + "gpWeightRatioFiltered.png" );  
+  imgrgb.write ( out.str() + "gpWeightRatioFiltered.ppm" );  
   
   
 #endif
 #endif
 
 
@@ -2238,18 +2247,18 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
 //    uncert(0,0) = 1;
 //    uncert(0,0) = 1;
 //    uncert(0,1) = 0;
 //    uncert(0,1) = 0;
     ICETools::convertToRGB ( gaussUncert, imgrgb );
     ICETools::convertToRGB ( gaussUncert, imgrgb );
-    imgrgb.write ( out.str() + "region.png" );
+    imgrgb.write ( out.str() + "region.ppm" );
     
     
   ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
   ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
-  imgrgb.write ( out.str() + "gpUncertaintyRegion.png" );
+  imgrgb.write ( out.str() + "gpUncertaintyRegion.ppm" );
   ICETools::convertToRGB ( gaussGPMean, imgrgb );
   ICETools::convertToRGB ( gaussGPMean, imgrgb );
-  imgrgb.write ( out.str() + "gpMeanRegion.png" );
+  imgrgb.write ( out.str() + "gpMeanRegion.ppm" );
   ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
   ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
-  imgrgb.write ( out.str() + "gpMeanRatioRegion.png" );
+  imgrgb.write ( out.str() + "gpMeanRatioRegion.ppm" );
   ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
   ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
-  imgrgb.write ( out.str() + "gpWeightAllRegion.png" );
+  imgrgb.write ( out.str() + "gpWeightAllRegion.ppm" );
   ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
   ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
-  imgrgb.write ( out.str() + "gpWeightRatioRegion.png" );      
+  imgrgb.write ( out.str() + "gpWeightRatioRegion.ppm" );      
 #endif
 #endif
 
 
 #undef WRITEREGIONS
 #undef WRITEREGIONS

+ 256 - 9
semseg/SemSegNovelty.cpp

@@ -88,7 +88,15 @@ void SemSegNovelty::train ( const MultiDataset *md )
     forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
     forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
   }
   }
   cn.getSelection ( forbidden_classes_s, forbidden_classes );
   cn.getSelection ( forbidden_classes_s, forbidden_classes );
-  cerr << "forbidden: " << forbidden_classes_s << endl;
+  
+  //check the same thing for the training classes - this is very specific to our setup 
+  std::string forbidden_classesTrain_s = conf->gS ( "analysis", "donttrainTrain", "" );
+  if ( forbidden_classesTrain_s == "" )
+  {
+    forbidden_classesTrain_s = conf->gS ( "analysis", "forbidden_classesTrain", "" );
+  }
+  cn.getSelection ( forbidden_classesTrain_s, forbidden_classesTrain );
+
 
 
   ProgressBar pb ( "Local Feature Extraction" );
   ProgressBar pb ( "Local Feature Extraction" );
   pb.show();
   pb.show();
@@ -100,6 +108,8 @@ void SemSegNovelty::train ( const MultiDataset *md )
 
 
   int featdim = -1;
   int featdim = -1;
 
 
+  classesInUse.clear();  
+  
   LOOP_ALL_S ( *trainp )
   LOOP_ALL_S ( *trainp )
   {
   {
     //EACH_S(classno, currentFile);
     //EACH_S(classno, currentFile);
@@ -160,12 +170,19 @@ void SemSegNovelty::train ( const MultiDataset *md )
     {
     {
       for ( int x = 0; x < xsize; x += featdist )
       for ( int x = 0; x < xsize; x += featdist )
       {
       {
-        int classno = labels ( x, y );
 
 
-        if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
+        int classnoTmp = labels.getPixel ( x, y );
+        
+        if ( forbidden_classesTrain.find ( classnoTmp ) != forbidden_classesTrain.end() )
+        {
           continue;
           continue;
-
-
+        }
+        
+        if (classesInUse.find(classnoTmp) == classesInUse.end())
+        {
+          classesInUse.insert(classnoTmp);
+        }
+        
         Example example;
         Example example;
         example.vec = NULL;
         example.vec = NULL;
         example.svec = new SparseVector ( featdim );
         example.svec = new SparseVector ( featdim );
@@ -179,7 +196,7 @@ void SemSegNovelty::train ( const MultiDataset *md )
         example.svec->normalize();
         example.svec->normalize();
 
 
         example.position = imgnb;
         example.position = imgnb;
-        examples.push_back ( pair<int, Example> ( classno, example ) );
+        examples.push_back ( pair<int, Example> ( classnoTmp, example ) );
       }
       }
     }
     }
 
 
@@ -187,6 +204,16 @@ void SemSegNovelty::train ( const MultiDataset *md )
     imgnb++;
     imgnb++;
     pb.update ( trainp->count() );
     pb.update ( trainp->count() );
   }
   }
+  
+    
+  numberOfClasses = classesInUse.size();
+  std::cerr << "numberOfClasses: " << numberOfClasses << std::endl;  
+  std::cerr << "classes in use: " << std::endl;
+  for (std::set<int>::const_iterator it = classesInUse.begin(); it != classesInUse.end(); it++)
+  {
+    std::cerr << *it << " ";
+  }    
+  std::cerr << std::endl;
 
 
   pb.hide();
   pb.hide();
 
 
@@ -282,10 +309,34 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
 
 
   FloatImage uncert ( xsize, ysize );
   FloatImage uncert ( xsize, ysize );
   uncert.set ( 0.0 );
   uncert.set ( 0.0 );
+  
+  FloatImage gpUncertainty ( xsize, ysize );
+  FloatImage gpMean ( xsize, ysize );    
+  FloatImage gpMeanRatio ( xsize, ysize );  
+  FloatImage gpWeightAll ( xsize, ysize );
+  FloatImage gpWeightRatio ( xsize, ysize );  
+  
+  gpUncertainty.set ( 0.0 );
+  gpMean.set ( 0.0 );
+  gpMeanRatio.set ( 0.0 );
+  gpWeightAll.set ( 0.0 );
+  gpWeightRatio.set ( 0.0 );
 
 
   double maxunc = -numeric_limits<double>::max();
   double maxunc = -numeric_limits<double>::max();
+  
+  double maxGPUncertainty = -numeric_limits<double>::max();  
+  double maxGPMean = -numeric_limits<double>::max();  
+  double maxGPMeanRatio = -numeric_limits<double>::max();  
+  double maxGPWeightAll = -numeric_limits<double>::max();  
+  double maxGPWeightRatio = -numeric_limits<double>::max();  
+
+  
   timer.stop();
   timer.stop();
   cout << "first: " << timer.getLastAbsolute() << endl;
   cout << "first: " << timer.getLastAbsolute() << endl;
+  
+  //we need this lateron for active learning stuff
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);
+  
   timer.start();
   timer.start();
 #pragma omp parallel for
 #pragma omp parallel for
   for ( int y = 0; y < ysize; y += testWSize )
   for ( int y = 0; y < ysize; y += testWSize )
@@ -304,6 +355,143 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
       example.svec->normalize();
       example.svec->normalize();
 
 
       ClassificationResult cr = classifier->classify ( example );
       ClassificationResult cr = classifier->classify ( example );
+      
+      //we need this if we want to compute GP-AL-measure lateron
+      double minMeanAbs ( numeric_limits<double>::max() );
+      double maxMeanAbs ( 0.0 );
+      double sndMaxMeanAbs ( 0.0 );       
+      double maxMean ( -numeric_limits<double>::max() );
+      double sndMaxMean ( -numeric_limits<double>::max() );     
+      
+      for ( int j = 0 ; j < cr.scores.size(); j++ )
+      {   
+        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+        
+        //check whether we found a class with higher smaller abs mean than the current minimum
+        if (abs(cr.scores[j]) < minMeanAbs)  
+          minMeanAbs = abs(cr.scores[j]);
+        //check for larger abs mean as well
+        if (abs(cr.scores[j]) > maxMeanAbs)
+        {
+          sndMaxMeanAbs = maxMeanAbs;
+          maxMeanAbs = abs(cr.scores[j]);
+        }
+        // and also for the second highest mean of all classes
+        else if (abs(cr.scores[j]) > sndMaxMeanAbs)
+        {
+          sndMaxMeanAbs = abs(cr.scores[j]);
+        }  
+        //check for larger mean without abs as well
+        if (cr.scores[j] > maxMean)
+        {
+          sndMaxMean = maxMean;
+          maxMean = cr.scores[j];
+        }
+        // and also for the second highest mean of all classes
+        else if (cr.scores[j] > sndMaxMean)
+        {
+          sndMaxMean = cr.scores[j];
+        }          
+      }
+
+      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+      
+      //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
+      // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
+      double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
+      
+      // compute results when we take the lowest mean value of all classes
+      double gpMeanVal = minMeanAbs;
+      
+      //look at the difference in the absolut mean values for the most plausible class
+      // and the second most plausible class
+      double gpMeanRatioVal= maxMean - sndMaxMean;
+      
+       double gpWeightAllVal ( 0.0 );
+       double gpWeightRatioVal ( 0.0 );
+
+       if ( numberOfClasses > 2)
+       {
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < cr.scores.size(); j++ )
+        {
+          if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+          {
+            continue;
+          }          
+          
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(cr.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(cr.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(cr.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAllVal = thirdTerm*firstTerm;        
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+  //         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+  //         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
+       }
+       else //binary scenario
+       {
+         gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
+         gpWeightAllVal *= firstTerm;
+         gpWeightRatioVal = gpWeightAllVal;
+       }
 
 
       int xs = std::max(0, x - testWSize/2);
       int xs = std::max(0, x - testWSize/2);
       int xe = std::min(xsize - 1, x + testWSize/2);
       int xe = std::min(xsize - 1, x + testWSize/2);
@@ -315,15 +503,35 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
         {
         {
           for ( int j = 0 ; j < cr.scores.size(); j++ )
           for ( int j = 0 ; j < cr.scores.size(); j++ )
           {
           {
-            probabilities ( xl, yl, j ) = cr.scores[j];
+            probabilities ( xl, yl, j ) = cr.scores[j];           
           }
           }
           segresult ( xl, yl ) = cr.classno;
           segresult ( xl, yl ) = cr.classno;
           uncert ( xl, yl ) = cr.uncertainty;
           uncert ( xl, yl ) = cr.uncertainty;
+          
+          gpUncertainty ( xl, yl ) = gpUncertaintyVal;
+          gpMean ( xl, yl ) = gpMeanVal;
+          gpMeanRatio ( xl, yl ) = gpMeanRatioVal;
+          gpWeightAll ( xl, yl ) = gpWeightAllVal;
+          gpWeightRatio ( xl, yl ) = gpWeightRatioVal;    
         }
         }
       }
       }
 
 
       if (maxunc < cr.uncertainty)
       if (maxunc < cr.uncertainty)
         maxunc = cr.uncertainty;
         maxunc = cr.uncertainty;
+      
+      if (maxGPUncertainty < gpUncertaintyVal)
+        maxGPUncertainty = gpUncertaintyVal;
+      if (maxGPMean < gpMeanVal)
+        maxGPMean = gpMeanVal;
+      if (maxGPMeanRatio < gpMeanRatioVal)
+        maxGPMeanRatio = gpMeanRatioVal;
+      if (maxGPWeightAll < gpMeanRatioVal)
+        maxGPWeightAll = gpWeightAllVal;
+      if (maxGPWeightRatio < gpWeightRatioVal)
+        maxGPWeightRatio = gpWeightRatioVal;      
+      
+//       std::cerr << "uncertainty: " << gpUncertaintyVal << " minMean: " << gpMeanVal << " gpMeanRatio: " << gpMeanRatioVal << " weightAll: " << gpWeightAllVal << " weightRatio: "<< gpWeightRatioVal << std::endl;
+      
       example.svec->clear();
       example.svec->clear();
     }
     }
     delete example.svec;
     delete example.svec;
@@ -342,13 +550,52 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
   std::vector< std::string > list2;
   std::vector< std::string > list2;
   StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
   StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
   out << uncertdir << "/" << list2.back();
   out << uncertdir << "/" << list2.back();
-
+  
   uncert.writeRaw(out.str() + ".rawfloat");
   uncert.writeRaw(out.str() + ".rawfloat");
   uncert(0, 0) = 0.0;
   uncert(0, 0) = 0.0;
-  uncert(0, 1) = 1.0;
+  uncert(0, 1) = 1.0+gpNoise;
   ICETools::convertToRGB ( uncert, imgrgb );
   ICETools::convertToRGB ( uncert, imgrgb );
   imgrgb.write ( out.str() + "rough.png" );
   imgrgb.write ( out.str() + "rough.png" );
 
 
+  //invert images such that large numbers correspond to high impact, high variance, high importance, high novelty, ...
+  for ( int y = 0; y < ysize; y++)
+  {
+    for (int x = 0; x < xsize; x++)
+    {
+      gpUncertainty(x,y) =  maxGPUncertainty - gpUncertainty(x,y);
+      gpMean(x,y) = maxGPMean - gpMean(x,y);
+      gpMeanRatio(x,y) = maxGPMeanRatio - gpMeanRatio(x,y);
+      gpWeightRatio(x,y) = maxGPWeightRatio - gpWeightRatio(x,y);
+    }
+  }
+  
+  
+  //  
+  gpUncertainty(0, 0) = 0.0;
+  gpUncertainty(0, 1) = maxGPUncertainty;
+  ICETools::convertToRGB ( gpUncertainty, imgrgb );
+  imgrgb.write ( out.str() + "gpUncertainty.png" );
+  //
+  gpMean(0, 0) = 0.0;
+  gpMean(0, 1) = maxGPMean;  
+  ICETools::convertToRGB ( gpMean, imgrgb );
+  imgrgb.write ( out.str() + "gpMean.png" );
+  //
+  gpMeanRatio(0, 0) = 0.0;
+  gpMeanRatio(0, 1) = maxGPMeanRatio;   
+  ICETools::convertToRGB ( gpMeanRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpMeanRatio.png" );
+  //
+  gpWeightAll(0, 0) = 0.0;
+  gpWeightAll(0, 1) = maxGPWeightAll;     
+  ICETools::convertToRGB ( gpWeightAll, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightAll.png" );
+  //
+  gpWeightRatio(0, 0) = 0.0;
+  gpWeightRatio(0, 1) = maxGPWeightRatio;     
+  ICETools::convertToRGB ( gpWeightRatio, imgrgb );
+  imgrgb.write ( out.str() + "gpWeightRatio.png" );    
+
 
 
   timer.stop();
   timer.stop();
   cout << "last: " << timer.getLastAbsolute() << endl;
   cout << "last: " << timer.getLastAbsolute() << endl;

+ 5 - 0
semseg/SemSegNovelty.h

@@ -54,6 +54,11 @@ class SemSegNovelty : public SemanticSegmentation
     
     
     //! set of forbidden/background classes
     //! set of forbidden/background classes
     std::set<int> forbidden_classes;
     std::set<int> forbidden_classes;
+    std::set<int> forbidden_classesTrain;
+    std::set<int> classesInUse;
+    
+    //! obviously, the number of classes used for training
+    int numberOfClasses;
     
     
     //! where to save the uncertainty
     //! where to save the uncertainty
     std::string uncertdir;
     std::string uncertdir;