فهرست منبع

SemSegNovelty restructured such that novelty method is selectable

Alexander Freytag 12 سال پیش
والد
کامیت
cb2fbb6118
2فایلهای تغییر یافته به همراه434 افزوده شده و 265 حذف شده
  1. 411 264
      semseg/SemSegNovelty.cpp
  2. 23 1
      semseg/SemSegNovelty.h

+ 411 - 264
semseg/SemSegNovelty.cpp

@@ -86,7 +86,7 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
   }
   
   //define which measure for "novelty" we want to use
-  string noveltyMethodString = conf->gS( section,  "noveltyMethod", "gp-variance");
+  noveltyMethodString = conf->gS( section,  "noveltyMethod", "gp-variance");
   if (noveltyMethodString.compare("gp-variance") == 0)  // novel = large variance
   {
     this->noveltyMethod = GPVARIANCE;
@@ -406,37 +406,9 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
   FloatImage noveltyImage ( xsize, ysize );
   noveltyImage.set ( 0.0 );
   
-  FloatImage uncert ( xsize, ysize );
-  uncert.set ( 0.0 );  
-  
-  FloatImage gpUncertainty ( xsize, ysize );
-  FloatImage gpMean ( xsize, ysize );    
-  FloatImage gpMeanRatio ( xsize, ysize );  
-  FloatImage gpWeightAll ( xsize, ysize );
-  FloatImage gpWeightRatio ( xsize, ysize );  
-  
-  gpUncertainty.set ( 0.0 );
-  gpMean.set ( 0.0 );
-  gpMeanRatio.set ( 0.0 );
-  gpWeightAll.set ( 0.0 );
-  gpWeightRatio.set ( 0.0 );
-
-  double maxNovelty = -numeric_limits<double>::max();
-  
-  double maxunc = -numeric_limits<double>::max();
-  
-  double maxGPUncertainty = -numeric_limits<double>::max();  
-  double maxGPMean = -numeric_limits<double>::max();  
-  double maxGPMeanRatio = -numeric_limits<double>::max();  
-  double maxGPWeightAll = -numeric_limits<double>::max();  
-  double maxGPWeightRatio = -numeric_limits<double>::max();  
-
   timer.stop();
   cout << "first: " << timer.getLastAbsolute() << endl;
-  
-  //we need this lateron for active learning stuff
-  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);
-  
+    
   timer.start();
   this->computeClassificationResults( feats, segresult, probabilities, xsize, ysize, featdim);
   timer.stop();
@@ -453,6 +425,26 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
          this->computeNoveltyByGPUncertainty( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
          break;         
     }
+    case GPMINMEAN:
+    {
+         this->computeNoveltyByGPMean( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPMEANRATIO:
+    {
+         this->computeNoveltyByGPMeanRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPWEIGHTALL:
+    {
+         this->computeNoveltyByGPWeightAll( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }
+    case GPWEIGHTRATIO:
+    {
+         this->computeNoveltyByGPWeightRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize,  featdim );
+         break;         
+    }    
     default:
     {
          //do nothing, keep the image constant to 0.0
@@ -461,203 +453,7 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
          
   }
   
-#pragma omp parallel for
-  for ( int y = 0; y < ysize; y += testWSize )
-  {
-    Example example;
-    example.vec = NULL;
-    example.svec = new SparseVector ( featdim );
-    for ( int x = 0; x < xsize; x += testWSize)
-    {
-      for ( int f = 0; f < featdim; f++ )
-      {
-        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
-        if ( val > 1e-10 )
-          ( *example.svec ) [f] = val;
-      }
-      example.svec->normalize();
-
-      ClassificationResult cr = classifier->classify ( example );
-      
-      //we need this if we want to compute GP-AL-measure lateron
-      double minMeanAbs ( numeric_limits<double>::max() );
-      double maxMeanAbs ( 0.0 );
-      double sndMaxMeanAbs ( 0.0 );       
-      double maxMean ( -numeric_limits<double>::max() );
-      double sndMaxMean ( -numeric_limits<double>::max() );     
-      
-      for ( int j = 0 ; j < cr.scores.size(); j++ )
-      {
-        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
-        {
-          continue;
-        }
-        
-        //check whether we found a class with higher smaller abs mean than the current minimum
-        if (abs(cr.scores[j]) < minMeanAbs)  
-          minMeanAbs = abs(cr.scores[j]);
-        //check for larger abs mean as well
-        if (abs(cr.scores[j]) > maxMeanAbs)
-        {
-          sndMaxMeanAbs = maxMeanAbs;
-          maxMeanAbs = abs(cr.scores[j]);
-        }
-        // and also for the second highest mean of all classes
-        else if (abs(cr.scores[j]) > sndMaxMeanAbs)
-        {
-          sndMaxMeanAbs = abs(cr.scores[j]);
-        }  
-        //check for larger mean without abs as well
-        if (cr.scores[j] > maxMean)
-        {
-          sndMaxMean = maxMean;
-          maxMean = cr.scores[j];
-        }
-        // and also for the second highest mean of all classes
-        else if (cr.scores[j] > sndMaxMean)
-        {
-          sndMaxMean = cr.scores[j];
-        }          
-      }
-
-      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
-      
-      //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
-      // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
-      double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
-      
-      // compute results when we take the lowest mean value of all classes
-      double gpMeanVal = minMeanAbs;
-      
-      //look at the difference in the absolut mean values for the most plausible class
-      // and the second most plausible class
-      double gpMeanRatioVal= maxMean - sndMaxMean;
-      
-       double gpWeightAllVal ( 0.0 );
-       double gpWeightRatioVal ( 0.0 );
-
-       if ( numberOfClasses > 2)
-       {
-        //compute the weight in the alpha-vector for every sample after assuming it to be 
-        // added to the training set.
-        // Thereby, we measure its "importance" for the current model
-        // 
-        //double firstTerm is already computed
-        //
-        //the second term is only needed when computing impacts
-        //double secondTerm; //this is the nasty guy :/
-        
-        //--- compute the third term
-        // this is the difference between predicted label and GT label 
-        std::vector<double> diffToPositive; diffToPositive.clear();
-        std::vector<double> diffToNegative; diffToNegative.clear();
-        double diffToNegativeSum(0.0);
-        
-        for ( int j = 0 ; j < cr.scores.size(); j++ )
-        {
-          if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
-          {
-            continue;
-          }          
-          
-          // look at the difference to plus 1          
-          diffToPositive.push_back(abs(cr.scores[j] - 1));
-          // look at the difference to -1          
-          diffToNegative.push_back(abs(cr.scores[j] + 1));
-          //sum up the difference to -1
-          diffToNegativeSum += abs(cr.scores[j] - 1);
-        }
-
-        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
-        //and use this as the third term for this specific class.
-        //the final value is obtained by minimizing over all classes
-        //
-        // originally, we minimize over all classes after building the final score
-        // however, the first and the second term do not depend on the choice of
-        // y*, therefore we minimize here already
-        double thirdTerm (numeric_limits<double>::max()) ;
-        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
-        {
-          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
-          if (tmpVal < thirdTerm)
-            thirdTerm = tmpVal;
-        }
-        gpWeightAllVal = thirdTerm*firstTerm;        
-        
-        //now look on the ratio of the resulting weights for the most plausible
-        // against the second most plausible class
-        double thirdTermMostPlausible ( 0.0 ) ;
-        double thirdTermSecondMostPlausible ( 0.0 ) ;
-        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
-        {
-          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
-          {
-            thirdTermSecondMostPlausible = thirdTermMostPlausible;
-            thirdTermMostPlausible = diffToPositive[tmpCnt];
-          }
-          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
-          {
-            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
-          }
-        }
-        //compute the resulting score
-        gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
-
-        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
-        //use it as an additional training example
-        //TODO this would be REALLY computational demanding. Do we really want to do this?
-  //         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
-  //         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
-       }
-       else //binary scenario
-       {
-         gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
-         gpWeightAllVal *= firstTerm;
-         gpWeightRatioVal = gpWeightAllVal;
-       }
-
-      int xs = std::max(0, x - testWSize/2);
-      int xe = std::min(xsize - 1, x + testWSize/2);
-      int ys = std::max(0, y - testWSize/2);
-      int ye = std::min(ysize - 1, y + testWSize/2);
-      for (int yl = ys; yl <= ye; yl++)
-      {
-        for (int xl = xs; xl <= xe; xl++)
-        {
-          for ( int j = 0 ; j < cr.scores.size(); j++ )
-          {
-            probabilities ( xl, yl, j ) = cr.scores[j];
-          }
-          segresult ( xl, yl ) = cr.classno;
-          uncert ( xl, yl ) = cr.uncertainty;
-          
-          gpUncertainty ( xl, yl ) = gpUncertaintyVal;
-          gpMean ( xl, yl ) = gpMeanVal;
-          gpMeanRatio ( xl, yl ) = gpMeanRatioVal;
-          gpWeightAll ( xl, yl ) = gpWeightAllVal;
-          gpWeightRatio ( xl, yl ) = gpWeightRatioVal;    
-        }
-      }
 
-      if (maxunc < cr.uncertainty)
-        maxunc = cr.uncertainty;
-      
-      if (maxGPUncertainty < gpUncertaintyVal)
-        maxGPUncertainty = gpUncertaintyVal;
-      if (maxGPMean < gpMeanVal)
-        maxGPMean = gpMeanVal;
-      if (maxGPMeanRatio < gpMeanRatioVal)
-        maxGPMeanRatio = gpMeanRatioVal;
-      if (maxGPWeightAll < gpMeanRatioVal)
-        maxGPWeightAll = gpWeightAllVal;
-      if (maxGPWeightRatio < gpWeightRatioVal)
-        maxGPWeightRatio = gpWeightRatioVal;      
-      
-      example.svec->clear();
-    }
-    delete example.svec;
-    example.svec = NULL;
-  }
 
   //       std::cerr << "uncertainty: " << gpUncertaintyVal << " minMean: " << gpMeanVal << " gpMeanRatio: " << gpMeanRatioVal << " weightAll: " << gpWeightAllVal << " weightRatio: "<< gpWeightRatioVal << std::endl;
   
@@ -671,11 +467,7 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
     //compute probs per region
     vector<vector<double> > regionProb(amountRegions,vector<double>(probabilities.channels(),0.0));
     vector<double> regionNoveltyMeasure (amountRegions, 0.0);
-    std::vector<double> regionGPUncertainty (amountRegions, 0.0);
-    std::vector<double> regionGPMean (amountRegions, 0.0);
-    std::vector<double> regionGPMeanRatio (amountRegions, 0.0);
-    std::vector<double> regionGPWeightAll (amountRegions, 0.0);
-    std::vector<double> regionGPWeightRatio (amountRegions, 0.0);
+
     vector<int> regionCounter(amountRegions, 0);
     for ( int y = 0; y < ysize; y++)
     {
@@ -689,20 +481,14 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
         }
         
         //count the amount of "novelty" for the corresponding region
-        regionNoveltyMeasure[r] += uncert(x,y);
-        //
-        regionGPUncertainty[r] += gpUncertainty(x,y);
-        regionGPMean[r] += gpMean(x,y);
-        regionGPMeanRatio[r] += gpMeanRatio(x,y);
-        regionGPWeightAll[r] += gpWeightAll(x,y);        
-        regionGPWeightRatio[r] += gpWeightRatio(x,y);        
+        regionNoveltyMeasure[r] += noveltyImage(x,y);       
       }
     }
        
     //find best class per region
     vector<int> bestClassPerRegion(amountRegions,0);
     
-    double maxuncert = -numeric_limits<double>::max();
+    double maxNoveltyScore = -numeric_limits<double>::max();
     int maxUncertRegion = -1;
     
     for(int r = 0; r < amountRegions; r++)
@@ -719,23 +505,18 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
       }
       //normalize summed novelty scores to region size
       regionNoveltyMeasure[r] /= regionCounter[r];
-      //
-      regionGPUncertainty[r] /= regionCounter[r];
-      regionGPMean[r] /= regionCounter[r];
-      regionGPMeanRatio[r] /= regionCounter[r];
-      regionGPWeightAll[r] /= regionCounter[r]; 
-      regionGPWeightRatio[r] /= regionCounter[r];       
+      //    
       
-      if(maxuncert < regionNoveltyMeasure[r])
+      if(maxNoveltyScore < regionNoveltyMeasure[r])
       {
-        maxuncert = regionNoveltyMeasure[r];
+        maxNoveltyScore = regionNoveltyMeasure[r];
         maxUncertRegion = r;
       }
     }
     
     if(findMaximumUncert)
     {
-      if(maxuncert > globalMaxUncert)
+      if(maxNoveltyScore > globalMaxUncert)
       {
         //save new important features
         Examples examples;
@@ -765,7 +546,7 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
         {
           newTrainExamples.clear();
           newTrainExamples = examples;
-          globalMaxUncert = maxuncert;
+          globalMaxUncert = maxNoveltyScore;
           visualizeRegion(img,mask,maxUncertRegion,maskedImg);
         }
       }
@@ -783,13 +564,7 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
         }
         segresult(x,y) = bestClassPerRegion[r];
         // write novelty scores for every segment into the "final" image
-        uncert(x,y) = regionNoveltyMeasure[r];
-        //
-        gpUncertainty(x,y) = regionGPUncertainty[r];     
-        gpMean(x,y) = regionGPMean[r];
-        gpMeanRatio(x,y) = regionGPMeanRatio[r];
-        gpWeightAll(x,y) = regionGPWeightAll[r];
-        gpWeightRatio(x,y) = regionGPWeightRatio[r];
+        noveltyImage(x,y) = regionNoveltyMeasure[r];
       }
     }
   }
@@ -805,14 +580,8 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
   StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
   out << uncertdir << "/" << list2.back();
   
-  uncert.writeRaw(out.str() + ".rawfloat");
-  
-  gpUncertainty.writeRaw(out.str() + "_gpUncertainty.rawfloat");
-  gpMean.writeRaw(out.str() + "_gpMean.rawfloat");
-  gpMeanRatio.writeRaw(out.str() + "_gpMeanRatio.rawfloat");
-  gpWeightAll.writeRaw(out.str() + "_gpWeightAll.rawfloat");
-  gpWeightRatio.writeRaw(out.str() + "_gpWeightRatio.rawfloat");
-  
+  //TODO append a suffix according to the novelty strategie chosen
+  noveltyImage.writeRaw(out.str() + "_" + noveltyMethodString+".rawfloat");
 
   timer.stop();
   cout << "last: " << timer.getLastAbsolute() << endl;
@@ -867,6 +636,8 @@ inline void SemSegNovelty::computeClassificationResults( const NICE::MultiChanne
   }
 }
 
+// compute novelty images depending on the strategy chosen
+
 void SemSegNovelty::computeNoveltyByVariance(       NICE::FloatImage & noveltyImage, 
                                               const NICE::MultiChannelImageT<double> & feats,  
                                                     NICE::Image & segresult,
@@ -988,3 +759,379 @@ void SemSegNovelty::computeNoveltyByGPUncertainty(  NICE::FloatImage & noveltyIm
     example.svec = NULL;
   }  
 }
+
+void SemSegNovelty::computeNoveltyByGPMean(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+
+      double minMeanAbs ( numeric_limits<double>::max() );
+      
+      for ( int j = 0 ; j < cr.scores.size(); j++ )
+      {
+        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+        
+        //check whether we found a class with higher smaller abs mean than the current minimum
+        if (abs(cr.scores[j]) < minMeanAbs)  
+          minMeanAbs = abs(cr.scores[j]);     
+      }
+
+      // compute results when we take the lowest mean value of all classes
+      double gpMeanVal = minMeanAbs;
+  
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;
+          noveltyImage ( xl, yl ) = gpMeanVal; 
+        }
+      }    
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPMeanRatio(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+
+      double maxMean ( -numeric_limits<double>::max() );
+      double sndMaxMean ( -numeric_limits<double>::max() );     
+      
+      for ( int j = 0 ; j < cr.scores.size(); j++ )
+      {
+        if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+        {
+          continue;
+        }
+        
+        //check for larger mean without abs as well
+        if (cr.scores[j] > maxMean)
+        {
+          sndMaxMean = maxMean;
+          maxMean = cr.scores[j];
+        }
+        // and also for the second highest mean of all classes
+        else if (cr.scores[j] > sndMaxMean)
+        {
+          sndMaxMean = cr.scores[j];
+        }          
+      }
+      
+      //look at the difference in the absolut mean values for the most plausible class
+      // and the second most plausible class
+      double gpMeanRatioVal= maxMean - sndMaxMean;
+
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;
+          noveltyImage ( xl, yl ) = gpMeanRatioVal;
+        }
+      }    
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPWeightAll(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+      
+      double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+      
+      double gpWeightAllVal ( 0.0 );
+
+      if ( numberOfClasses > 2)
+      {
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < cr.scores.size(); j++ )
+        {
+          if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+          {
+            continue;
+          }          
+          
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(cr.scores[j] - 1));
+          // look at the difference to -1          
+          diffToNegative.push_back(abs(cr.scores[j] + 1));
+          //sum up the difference to -1
+          diffToNegativeSum += abs(cr.scores[j] - 1);
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already
+        double thirdTerm (numeric_limits<double>::max()) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt])   );
+          if (tmpVal < thirdTerm)
+            thirdTerm = tmpVal;
+        }
+        gpWeightAllVal = thirdTerm*firstTerm;        
+      }
+      else //binary scenario
+      {
+        gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
+        gpWeightAllVal *= firstTerm;
+      }
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;
+          noveltyImage ( xl, yl ) = gpWeightAllVal;
+        }
+      }
+   
+      
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}
+
+void SemSegNovelty::computeNoveltyByGPWeightRatio(  NICE::FloatImage & noveltyImage, 
+                                              const NICE::MultiChannelImageT<double> & feats,  
+                                                    NICE::Image & segresult,
+                                                    NICE::MultiChannelImageT<double> & probabilities,
+                                             const int & xsize, const int & ysize, const int & featdim )
+{
+  double gpNoise =  conf->gD("GPHIK", "noise", 0.01);  
+  
+#pragma omp parallel for
+  for ( int y = 0; y < ysize; y += testWSize )
+  {
+    Example example;
+    example.vec = NULL;
+    example.svec = new SparseVector ( featdim );
+    for ( int x = 0; x < xsize; x += testWSize)
+    {
+      for ( int f = 0; f < featdim; f++ )
+      {
+        double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
+        if ( val > 1e-10 )
+          ( *example.svec ) [f] = val;
+      }
+      example.svec->normalize();
+
+      ClassificationResult cr = classifier->classify ( example );
+ 
+
+       double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
+
+       double gpWeightRatioVal ( 0.0 );
+
+       if ( numberOfClasses > 2)
+       {
+        //compute the weight in the alpha-vector for every sample after assuming it to be 
+        // added to the training set.
+        // Thereby, we measure its "importance" for the current model
+        // 
+        //double firstTerm is already computed
+        //
+        //the second term is only needed when computing impacts
+        //double secondTerm; //this is the nasty guy :/
+        
+        //--- compute the third term
+        // this is the difference between predicted label and GT label 
+        std::vector<double> diffToPositive; diffToPositive.clear();
+        std::vector<double> diffToNegative; diffToNegative.clear();
+        double diffToNegativeSum(0.0);
+        
+        for ( int j = 0 ; j < cr.scores.size(); j++ )
+        {
+          if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
+          {
+            continue;
+          }          
+          
+          // look at the difference to plus 1          
+          diffToPositive.push_back(abs(cr.scores[j] - 1));
+        }
+
+        //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
+        //and use this as the third term for this specific class.
+        //the final value is obtained by minimizing over all classes
+        //
+        // originally, we minimize over all classes after building the final score
+        // however, the first and the second term do not depend on the choice of
+        // y*, therefore we minimize here already   
+        
+        //now look on the ratio of the resulting weights for the most plausible
+        // against the second most plausible class
+        double thirdTermMostPlausible ( 0.0 ) ;
+        double thirdTermSecondMostPlausible ( 0.0 ) ;
+        for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
+        {
+          if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
+          {
+            thirdTermSecondMostPlausible = thirdTermMostPlausible;
+            thirdTermMostPlausible = diffToPositive[tmpCnt];
+          }
+          else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
+          {
+            thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
+          }
+        }
+        //compute the resulting score
+        gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;      
+
+        //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would 
+        //use it as an additional training example
+        //TODO this would be REALLY computational demanding. Do we really want to do this?
+  //         gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
+  //         gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
+       }
+       else //binary scenario
+       {
+         gpWeightRatioVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
+         gpWeightRatioVal *= firstTerm;
+       }
+
+      int xs = std::max(0, x - testWSize/2);
+      int xe = std::min(xsize - 1, x + testWSize/2);
+      int ys = std::max(0, y - testWSize/2);
+      int ye = std::min(ysize - 1, y + testWSize/2);
+      for (int yl = ys; yl <= ye; yl++)
+      {
+        for (int xl = xs; xl <= xe; xl++)
+        {
+          for ( int j = 0 ; j < cr.scores.size(); j++ )
+          {
+            probabilities ( xl, yl, j ) = cr.scores[j];
+          }
+          segresult ( xl, yl ) = cr.classno;
+          noveltyImage ( xl, yl ) = gpWeightRatioVal;  
+        }
+      }
+       
+      example.svec->clear();
+    }
+    delete example.svec;
+    example.svec = NULL;
+  }  
+}

+ 23 - 1
semseg/SemSegNovelty.h

@@ -93,6 +93,7 @@ class SemSegNovelty : public SemanticSegmentation
     
     //! specify how "novelty" shall be computed, e.g., using GP-variance, GP-uncertainty, or predicted weight entries
     NoveltyMethod noveltyMethod;
+    std::string noveltyMethodString;
     
     inline void computeClassificationResults( const NICE::MultiChannelImageT<double> & feats, 
                                                     NICE::Image & segresult,
@@ -111,7 +112,28 @@ class SemSegNovelty : public SemanticSegmentation
                                   const NICE::MultiChannelImageT<double> & feats,  
                                         NICE::Image & segresult,
                                         NICE::MultiChannelImageT<double> & probabilities,
-                                  const int & xsize, const int & ysize, const int & featdim );   
+                                  const int & xsize, const int & ysize, const int & featdim );
+   
+   void computeNoveltyByGPMean        ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPMeanRatio   ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPWeightAll   ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );  
+   void computeNoveltyByGPWeightRatio ( NICE::FloatImage & noveltyImage, 
+                                  const NICE::MultiChannelImageT<double> & feats,  
+                                        NICE::Image & segresult,
+                                        NICE::MultiChannelImageT<double> & probabilities,
+                                  const int & xsize, const int & ysize, const int & featdim );     
    
   public: